说先看一下这个图,它大体介绍了CNN的自然语言处理流程:
1.首先每个单词对应一行,d=5表示分了5个维度,一般是分128维,300维之类的,这里为了方便,用d=5。
这样的话矩阵就是7*5
2.然后第一步进行卷积的操作,分别使用了四行的卷积核两个,三行的卷积核两个,两行的卷积核两个。然后分别对75的矩阵进行卷积,对于75的话,45放上去可以向下移动4次,所以产生了41矩阵(feature map),35的则可以移动5次,所以得到51的矩阵,同理,25的得到61的矩阵。
3.然后第二步进行池化操作,图中使用的是max pooling,是最大值池化,所以前面产生的两个41的feature map分别取最大的元素,组成一个21的矩阵。同理前面产生的51、61的矩阵也要执行该操作。最终6个feature map产生了3个池化之后的矩阵
4.然后再把3个池化后的矩阵拼接起来形成6*1的feature vector。
5.然后这是一个全连接层,假如我们需要分两个类,则对这6个神经元进行分类,产生两个神经元作为输出。
记得自己最开始学这个的时候在网上到处找代码,现在分享出来。
下面是代码实现,自己加了很多注释
train.py
import tensorflow as tf
import numpy as np
import os
import time
import datetime
import data_helpers
from text_cnn import TextCNN
from tensorflow.contrib import learn
# Parameters
# ==================================================
# Data loading params
# 语料文件路径定义
#数据集里10%为验证集
tf.flags.DEFINE_float("dev_sample_percentage", .1, "Percentage of the training data to use for validation")
tf.flags.DEFINE_string("positive_data_file", "./data/rt-polaritydata/rt-polarity.pos", "Data source for the positive data.")
tf.flags.DEFINE_string("negative_data_file", "./data/rt-polaritydata/rt-polarity.neg", "Data source for the negative data.")
# Model Hyperparameters
# 定义网络超参数
#embedding维度128,3种卷积核,每种128个,0.5的dropout
tf.flags.DEFINE_integer("embedding_dim", 128, "Dimensionality of character embedding (default: 128)")
tf.flags.DEFINE_string("filter_sizes", "3,4,5", "Comma-separated filter sizes (default: '3,4,5')")
tf.flags.DEFINE_integer("num_filters", 128, "Number of filters per filter size (default: 128)")
tf.flags.DEFINE_float("dropout_keep_prob", 0.5, "Dropout keep probability (default: 0.5)")
tf.flags.DEFINE_float("l2_reg_lambda", 0.0, "L2 regularization lambda (default: 0.0)")
# Training parameters
# 训练参数
#batchsize为64,20个epoch,每100个batch后,计算验证集上的表现,每100个batch后保存模型,checkpoint
tf.flags.DEFINE_integer("batch_size", 64, "Batch Size (default: 64)")
tf.flags.DEFINE_integer("num_epochs", 50, "Number of training epochs (default: 50)")
tf.flags.DEFINE_integer("evaluate_every", 100, "Evaluate model on dev set after this many steps (default: 100)")
tf.flags.DEFINE_integer("checkpoint_every", 100, "Save model after this many steps (default: 100)")
tf.flags.DEFINE_integer("num_checkpoints", 5, "Number of checkpoints to store (default: 5)")
# Misc Parameters
#true表示自动寻找一个存在并支持的cpu或者gpu,防止指定的设备不存在
#如果将False改为True,可以看到operations被指派到哪个设备运行
tf.flags.DEFINE_boolean("allow_soft_placement", True, "Allow device soft device placement")
tf.flags.DEFINE_boolean("log_device_placement", False, "Log placement of ops on devices")
# 打印一下相关初始参数
FLAGS = tf.flags.FLAGS#FLAGS是一个对象,保存了解析后的命令行参数
FLAGS._parse_flags()
print("\nParameters:")
for attr, value in sorted(FLAGS.__flags.items()):
print("{}={}".format(attr.upper(), value))
print("")
def preprocess():
# Data Preparation
# ==================================================
# Load data
print("Loading data...")
# 把正样本和负样本传入
"""
print("start_time" + "\t\t" + str(datetime.datetime.now().isoformat()))
x_text, y = data_helpers.load_data_and_labels(FLAGS.data_file)
print("end_time" + "\t\t" + str(datetime.datetime.now().isoformat()))
"""
x_text, y = data_helpers.load_data_and_labels(FLAGS.positive_data_file, FLAGS.negative_data_file)
# #x_text是积极和消极所有的集合,y是[[0,1]\n[0,1]\n[0,1]\n[0,1]\n[0,1]\n[0,1]\n[0,1]\n.....[1,0]\n]的矩阵
# !!!这里一次性载入所有数据,注意考虑内存,大数据的情况下如何载入需要分析
# Build vocabulary
# 一行数据最多的词汇数 因为我们要保证句子的尺寸是一样的,即长度是一样的。
max_document_length = max([len(x.split(" ")) for x in x_text]) #每一条评价的最多单词数字 # 计算最长邮件
vocab_processor = learn.preprocessing.VocabularyProcessor(max_document_length)# tensorflow提供的工具,将数据填充为最大长度,默认0填充
# 单词转化为在字典中的位置,这是一个操作
# 其中VocabularyProcessor(max_document_length,min_frequency=0,vocabulary=None, tokenizer_fn=None)的构造函数中有4个参数
# max_document_length是文档的最大长度。如果文本的长度大于最大长度,那么它会被剪切,反之则用0填充
# min_frequency词频的最小值,出现次数>最小词频 的词才会被收录到词表中
# vocabulary CategoricalVocabulary 对象,不太清楚使用方法
# tokenizer_fn tokenizer function,讲句子或给定文本格式 token化得函数,可以理解为分词函数
# 把原来的句子进行特殊处理,单词赋予编号
x = np.array(list(vocab_processor.fit_transform(x_text)))#x=[[ 1 2 3 ... 0 0 0] [ 1 31 32 ... 0 0 0] [ 57 58 59 ... 0 0 0] ... [ 46 47 942 ... 0 0 0] [ 17 954 955 ... 0 0 0] [343 956 957 ... 0 0 0]]
# 如果长度低于56自动补0
# x:[
# [4719 59 182 34 190 804 0 0 0 0
# 0 0 0 0 0……]
# [129 ……]
# ]
#print(x)
# Randomly shuffle data
# 对数据进行打乱的操作
# 数据洗牌
np.random.seed(10)
#random.permutation(arr) 复制arr后打乱arr数组
"""
参数个数情况: np.arange()函数分为一个参数,两个参数,三个参数三种情况
1)一个参数时,参数值为终点,起点取默认值0,步长取默认值1。
2)两个参数时,第一个参数为起点,第二个参数为终点,步长取默认值1。
3)三个参数时,第一个参数为起点,第二个参数为终点,第三个参数为步长。其中步长支持小数
"""
# np.arange生成随机序列
shuffle_indices = np.random.permutation(np.arange(len(y)))#打乱样本
x_shuffled = x[shuffle_indices]#新的乱序样本
y_shuffled = y[shuffle_indices]#新的乱序label
# 将数据按训练train和测试dev分块
# Split train/test set
# TODO: This is very crude, should use cross-validation训练集、验证集划分完毕,全部是索引数值
# 切分 数据集分为两部分 90%训练 10%验证
dev_sample_index = -1 * int(FLAGS.dev_sample_percentage * float(len(y)))#负数,倒过来数
x_train, x_dev = x_shuffled[:dev_sample_index], x_shuffled[dev_sample_index:]#切片
y_train, y_dev = y_shuffled[:dev_sample_index], y_shuffled[dev_sample_index:]
print(x_train)
print(y_train)
del x, y, x_shuffled, y_shuffled
print("Vocabulary Size: {:d}".format(len(vocab_processor.vocabulary_)))#字典长度
print("Train/Dev split: {:d}/{:d}".format(len(y_train), len(y_dev)))#训练集和验证集长度# 打印切分的比例
print("一个评论最大长度:",x_train.shape[1])
print("标签维度:",y_train.shape[1])
print(list(map(int, FLAGS.filter_sizes.split(","))))
return x_train, y_train, vocab_processor, x_dev, y_dev
def train(x_train, y_train, vocab_processor, x_dev, y_dev):
# Training
# ==================================================
with tf.Graph().as_default():
session_conf = tf.ConfigProto(
allow_soft_placement=FLAGS.allow_soft_placement,
log_device_placement=FLAGS.log_device_placement)#这个session配置,按照前面的gpu,cpu自动选择
sess = tf.Session(config=session_conf)#建立一个配置如上的会话
with sess.as_default():#在上述session填充内容
# 卷积池化网络导入
cnn = TextCNN(
sequence_length=x_train.shape[1], # sequence_length:最长词汇数 #[0]是样本维度,样本数量,[1]是单个样本的长度
num_classes=y_train.shape[1], # num_classes:分类数#同理,这里是类别数量
vocab_size=len(vocab_processor.vocabulary_), # vocab_size:总词汇数#字典长度
embedding_size=FLAGS.embedding_dim, # embedding_size:词向量长度128
filter_sizes=list(map(int, FLAGS.filter_sizes.split(","))),
# filter_sizes: 卷积核的尺寸3 4 5# 上面定义的filter_sizes拿过来,"3,4,5"按","分割
num_filters=FLAGS.num_filters, # num_filters:128 卷积核的数量
l2_reg_lambda=FLAGS.l2_reg_lambda) # l2_reg_lambda_l2正则化系数#包含一个CNN
# TextCNN是一个类,输入参数,得到一个CNN结构
# Define Training procedure
# 其实就是统计当前训练的步数
global_step = tf.Variable(0, name="global_step", trainable=False)#定义一个变量step
# 优化器
optimizer = tf.train.AdamOptimizer(1e-3)#里面是学习速率,选择优化算法,建立优化器
# 计算梯度 ——minimize()的第一部分
grads_and_vars = optimizer.compute_gradients(cnn.loss)#选择目标函数,计算梯度;返回的是梯度和变量
# 函数minimize() 与compute_gradients()都含有一个参数gate_gradient,用于控制在应用这些梯度时并行化的程度。这里没有?
# 将计算出来的梯度应用到变量上,是函数minimize()的第二部分
# 返回一个应用指定的梯度的操作operation,对global_step做自增操作
##综上所述,上面三行代码,就相当于global_step = tf.Variable(0, name="global_step", trainable=False).minimize(cnn.loss)
train_op = optimizer.apply_gradients(grads_and_vars, global_step=global_step)#运用梯度
# Keep track of gradient values and sparsity (optional)
# 保存变量的梯度值
grad_summaries = []
for g, v in grads_and_vars: # g是梯度,v是变量
if g is not None:
grad_hist_summary = tf.summary.histogram("{}/grad/hist".format(v.name), g)
sparsity_summary = tf.summary.scalar("{}/grad/sparsity".format(v.name), tf.nn.zero_fraction(g))
grad_summaries.append(grad_hist_summary)
grad_summaries.append(sparsity_summary)
grad_summaries_merged = tf.summary.merge(grad_summaries)
# Output directory for models and summaries
# 定义输出路径
timestamp = str(int(time.time()))
out_dir = os.path.abspath(os.path.join(os.path.curdir, "runs", timestamp))
print("Writing to {}\n".format(out_dir))
# Summaries for loss and accuracy
# 记录一下loss值和准确率的值
loss_summary = tf.summary.scalar("loss", cnn.loss)
acc_summary = tf.summary.scalar("accuracy", cnn.accuracy)
# Train Summaries
# 训练部分
# merge一下
train_summary_op = tf.summary.merge([loss_summary, acc_summary, grad_summaries_merged])
# 路径定义
train_summary_dir = os.path.join(out_dir, "summaries", "train")
# 写入
train_summary_writer = tf.summary.FileWriter(train_summary_dir, sess.graph)
# Dev summaries
# 测试部分
dev_summary_op = tf.summary.merge([loss_summary, acc_summary])
dev_summary_dir = os.path.join(out_dir, "summaries", "dev")
dev_summary_writer = tf.summary.FileWriter(dev_summary_dir, sess.graph)
# Checkpoint directory. Tensorflow assumes this directory already exists so we need to create it
# 保存模型的路径
checkpoint_dir = os.path.abspath(os.path.join(out_dir, "checkpoints"))
checkpoint_prefix = os.path.join(checkpoint_dir, "model")
if not os.path.exists(checkpoint_dir):
os.makedirs(checkpoint_dir)
# 保存模型,最多保存5个
saver = tf.train.Saver(tf.global_variables(), max_to_keep=FLAGS.num_checkpoints) # 前面定义好参数num_checkpoints=5
# Write vocabulary
vocab_processor.save(os.path.join(out_dir, "vocab"))
# Initialize all variables
sess.run(tf.global_variables_initializer())#初始化所有变量
# 定义了一个函数,输入为1个batch
def train_step(x_batch, y_batch):
"""
A single training step
"""
feed_dict = {
cnn.input_x: x_batch,
cnn.input_y: y_batch,
cnn.dropout_keep_prob: FLAGS.dropout_keep_prob# 参数在前面有定义
}
_, step, summaries, loss, accuracy = sess.run(
[train_op, global_step, train_summary_op, cnn.loss, cnn.accuracy],
feed_dict)
# 梯度更新(更新模型),步骤加一,存储数据,计算一个batch的损失,计算一个batch的准确率
time_str = datetime.datetime.now().isoformat()#当时时间
print("train*********{}: step {}, loss {:g}, acc {:g}".format(time_str, step, loss, accuracy))
train_summary_writer.add_summary(summaries, step)
# 定义了一个函数,用于验证集,输入为一个batch
def dev_step(x_batch, y_batch, writer=None):
"""
Evaluates model on a dev set
"""
feed_dict = {
cnn.input_x: x_batch,
cnn.input_y: y_batch,
cnn.dropout_keep_prob: 1.0# 神经元全部保留
}
step, summaries, loss, accuracy = sess.run(
[global_step, dev_summary_op, cnn.loss, cnn.accuracy],
feed_dict)
time_str = datetime.datetime.now().isoformat()
print("dev************{}: step {}, loss {:g}, acc {:g}".format(time_str, step, loss, accuracy))
if writer:
writer.add_summary(summaries, step)
return accuracy
# Generate batches # Generate batches(生成器),得到一个generator,每一次返回一个batch,没有构成list[batch1,batch2,batch3,...]
batches = data_helpers.batch_iter(
list(zip(x_train, y_train)), FLAGS.batch_size, FLAGS.num_epochs)
# zip将样本与label配对,
# Training loop. For each batch...
for batch in batches:
x_batch, y_batch = zip(*batch)#unzip,将配对的样本,分离出来data和label # 按batch把数据拿进来
train_step(x_batch, y_batch)#训练,输入batch样本,更新模型
current_step = tf.train.global_step(sess, global_step)# 将Session和global_step值传进来
# 测试
if current_step % FLAGS.evaluate_every == 0:#每多少步,算一下验证集效果# 每FLAGS.evaluate_every次每100执行一次测试
print("\nEvaluation:")
accuracy=dev_step(x_dev, y_dev, writer=dev_summary_writer)#喂的数据为验证集,此时大小不止一个batchsize1的大小
if 1590>current_step>1490:
accuracy_avr=accuracy
print(accuracy_avr)
if current_step>1590:
accuracy_avr+=accuracy
avr_0=accuracy_avr/(current_step/100-14)#100 200 300 400 500
print("准确率的平均值:",avr_0)
# 保存模型
if current_step % FLAGS.checkpoint_every == 0:#每多少步,保存模型# 每checkpoint_every次执行一次保存模型
path = saver.save(sess, checkpoint_prefix, global_step=current_step)# 定义模型保存路径
print("Saved model checkpoint to {}\n".format(path))
def main(argv=None):
x_train, y_train, vocab_processor, x_dev, y_dev = preprocess()
train(x_train, y_train, vocab_processor, x_dev, y_dev)
if __name__ == '__main__':
tf.app.run()
text_cnn.py
import tensorflow as tf
import numpy as np
# 这部分主要是建立了一个text_cnn结构的类
# 结构比较简单,一个embedding layer+一个convolution layer(Relu)+一个maxpooling层+softmax
# 1定义CNN网络实现的类
class TextCNN(object):#定义了1个TEXTCNN的类,包含一张大的graph
"""
A CNN for text classification.
Uses an embedding layer, followed by a convolutional, max-pooling and softmax layer.
"""
# sequence_length:最长词汇数
# num_classes:分类数
# vocab_size:总词汇数
# embedding_size:词向量长度
# filter_sizes:卷积核的尺寸3 4 5
# num_filters:卷积核的数量
# l2_reg_lambda_l2正则化系数
def __init__(
self, sequence_length, num_classes, vocab_size,
embedding_size, filter_sizes, num_filters, l2_reg_lambda=0.0):#定义各种输入参数,这里的输入是句子各词的索引 # 1把train.py中TextCNN里定义的参数传进来
# Placeholders for input, output and dropout
self.input_x = tf.placeholder(tf.int32, [None, sequence_length], name="input_x")# input_x输入语料,待训练的内容,维度是sequence_length,"N个词构成的N维向量"
# 定义一个operation,名称input_x,利用参数sequence_length,None表示样本数不定,
# 不一定是一个batchsize,训练的时候是,验证的时候None不是batchsize
# 这是一个placeholder,
# 数据类型int32,(样本数*句子长度)的tensor,每个元素为一个单词
self.input_y = tf.placeholder(tf.float32, [None, num_classes], name="input_y")# input_y输入语料,待训练的内容标签,维度是num_classes,"正面 || 负面"
# 这个placeholder的数据输入类型为float,(样本数*类别)的tensor
# dropout系数
self.dropout_keep_prob = tf.placeholder(tf.float32, name="dropout_keep_prob")# dropout_keep_prob dropout参数,防止过拟合,训练时用
# placeholder表示图的一个操作或者节点,用来喂数据,进行name命名方便可视化
# Keeping track of l2 regularization loss (optional)
l2_loss = tf.constant(0.0) # 先不用,写0
# l2正则的初始化,有点像sum=0
# 其实softmax是需要的
# Embedding layer
# 参见
# Embedding layer
# 指定运算结构的运行位置在cpu非gpu,因为"embedding"无法运行在gpu
# 通过tf.name_scope指定"embedding"
with tf.device('/cpu:0'), tf.name_scope("embedding"): # 指定cpu #封装了一个叫做“embedding'的模块,使用设备cpu,模块里3个operation
# vocab_size词典大小
self.W = tf.Variable(
tf.random_uniform([vocab_size, embedding_size], -1.0, 1.0),
name="W")# 定义W并初始化 #operation1,一个(词典长度*embedsize)tensor,作为W,也就是最后的词向量
# [batch_size,sequeue_length,embedding_size]
self.embedded_chars = tf.nn.embedding_lookup(self.W, self.input_x)
# operation2,input_x的tensor维度为[none,seq_len],那么这个操作的输出为none*seq_len*em_size
# 添加一个维度,[batch_size,sequence_length,embedding_size,1]有点类似于图片哦~
self.embedded_chars_expanded = tf.expand_dims(self.embedded_chars, -1)# 加一个维度,转换为4维的格式
# 增加一个维度,变成,batch_size*seq_len*em_size*channel(=1)的4维tensor,符合图像的习惯
# Create a convolution + maxpool layer for each filter size
pooled_outputs = []
# 三种不同尺寸的卷积,所以用了一个循环
# filter_sizes卷积核尺寸,枚举后遍历
for i, filter_size in enumerate(filter_sizes):#比如(0,3),(1,4),(2,5)
with tf.name_scope("conv-maxpool-%s" % filter_size):#循环第一次,建立一个名称为如”conv-ma-3“的模块
# Convolution Layer
filter_shape = [filter_size, embedding_size, 1, num_filters]# 4个参数分别为filter_size高h,embedding_size宽w,channel为1,filter个数
# operation1,没名称,卷积核参数,高*宽*通道*卷积个数
# 下面是卷积的参数
W = tf.Variable(tf.truncated_normal(filter_shape, stddev=0.1), name="W")# W进行高斯初始化
# operation2,名称”W“,变量维度filter_shape的tensor
b = tf.Variable(tf.constant(0.1, shape=[num_filters]), name="b") # b给初始化为一个常量
# operation3,名称"b",变量维度卷积核个数的tensor
# 卷积操作
conv = tf.nn.conv2d(
self.embedded_chars_expanded, # 需要卷积的矩阵
W, # 权值
strides=[1, 1, 1, 1], # 步长 #样本,height,width,channel移动距离
padding="VALID", # 这里不需要padding
name="conv")
# operation4,卷积操作,名称”conv“,与w系数相乘得到一个矩阵
# Apply nonlinearity 激活函数
# 可以理解为,正面或者负面评价有一些标志词汇,这些词汇概率被增强,即一旦出现这些词汇,倾向性分类进正或负面评价,
# 该激励函数可加快学习进度,增加稀疏性,因为让确定的事情更确定,噪声的影响就降到了最低。
h = tf.nn.relu(tf.nn.bias_add(conv, b), name="relu")
# operation5,加上偏置,进行relu,名称"relu"
# Maxpooling over the outputs
# 池化
#最大池化
pooled = tf.nn.max_pool(
h,
ksize=[1, sequence_length - filter_size + 1, 1, 1],# (h-filter+2padding)/strides+1=h-f+1
strides=[1, 1, 1, 1],
padding='VALID',# 这里不需要padding
name="pool")
"""
#均值池化
pooled=tf.nn.avg_pool(
h,
ksize=[1, sequence_length - filter_size + 1, 1, 1],
strides=[1, 1, 1, 1],
padding='VALID',
data_format='NHWC',
name=None
)
"""
pooled_outputs.append(pooled)
# 每个卷积核和pool处理一个样本后得到一个值,这里维度如batchsize*1*1*卷积核个数
# 三种卷积核,append3次
# Combine all the pooled features
# 卷积核的数量*卷积核的尺寸种类
num_filters_total = num_filters * len(filter_sizes)
# operation,每种卷积核个数与卷积核种类的积
# 三个池化层的输出拼接起来
self.h_pool = tf.concat(pooled_outputs, 3)
# operation,将outpus在第4个维度上拼接,如本来是128*1*1*64的结果3个,拼接后为128*1*1*192的tensor
# 转化为一维的向量
self.h_pool_flat = tf.reshape(self.h_pool, [-1, num_filters_total])# 扁平化数据,跟全连接层相连
# operation,结果reshape为128*192的tensor
# Add dropout
# drop层,防止过拟合,参数为dropout_keep_prob
# 过拟合的本质是采样失真,噪声权重影响了判断,如果采样足够多,足够充分,噪声的影响可以被量化到趋近事实,也就无从过拟合。
# 即数据越大,drop和正则化就越不需要。
with tf.name_scope("dropout"):
self.h_drop = tf.nn.dropout(self.h_pool_flat, self.dropout_keep_prob)
# 添加一个"dropout"的模块,里面一个操作,输出为dropout过后的128*192的tensor
# Final (unnormalized) scores and predictions
# 全连接层
# 输出层
with tf.name_scope("output"):#添加一个”output“的模块,多个operation
W = tf.get_variable(
"W",
shape=[num_filters_total, num_classes], # [64*3,2] #前面连扁平化后的池化操作
initializer=tf.contrib.layers.xavier_initializer()) # 定义初始化方式
# operation1,系数tensor,如192*2,192个features分2类,名称为"W",注意这里用的是get_variables
b = tf.Variable(tf.constant(0.1, shape=[num_classes]), name="b")
# operation2,偏置tensor,如2,名称"b"
# l2正则化 # 损失函数导入
l2_loss += tf.nn.l2_loss(W)
# operation3,loss上加入w的l2正则
l2_loss += tf.nn.l2_loss(b)
# operation4,loss上加入b的l2正则
# xw+b
self.scores = tf.nn.softmax(tf.nn.xw_plus_b(self.h_drop, W, b, name="scores"))# 得分函数
# operation5,scores计算全连接后的输出,如[0.2,0.7]名称”scores“
# 最终预测值
self.predictions = tf.argmax(self.scores, 1, name="predictions")# 预测结果
# operations,计算预测值,输出最大值的索引,0或者1,名称”predictions“
# Calculate mean cross-entropy loss
with tf.name_scope("loss"):#定义一个”loss“的模块
# loss,交叉熵损失函数
losses = tf.nn.softmax_cross_entropy_with_logits(logits=self.scores, labels=self.input_y)
# operation1,定义losses,交叉熵,如果是一个batch,那么是一个长度为batchsize1的tensor?
self.loss = tf.reduce_mean(losses) + l2_reg_lambda * l2_loss
# operation2,计算一个batch的平均交叉熵,加上全连接层参数的正则
"""
with tf.name_scope("loss"):
epsilon = 1.e-7
gamma=2.
alpha = tf.constant(0.5, dtype=tf.float32)
self.input_y = tf.cast(self.input_y, tf.float32)
self.scores = tf.clip_by_value(self.scores, epsilon, 1. - epsilon)
alpha_t = self.input_y*alpha + (tf.ones_like(self.input_y)-self.input_y)*(1-alpha)
y_t = tf.multiply(self.input_y, self.scores) + tf.multiply(1-self.input_y, 1-self.scores)
ce = -tf.log(y_t)
weight = tf.pow(tf.subtract(1., y_t), gamma)
fl = tf.multiply(tf.multiply(weight, ce), alpha_t)
losses = tf.reduce_mean(fl)
self.loss = tf.reduce_mean(losses) + l2_reg_lambda * l2_loss
"""
# Accuracy
with tf.name_scope("accuracy"):#定义一个名称”accuracy“的模块
# 准确率,求和计算算数平均值
correct_predictions = tf.equal(self.predictions, tf.argmax(self.input_y, 1))
# operation1,根据input_y和predictions是否相同,得到一个矩阵batchsize大小的tensor
self.accuracy = tf.reduce_mean(tf.cast(correct_predictions, "float"), name="accuracy")
# operation2,计算均值即为准确率,名称”accuracy“
data_helpers.py
import numpy as np
import re
def clean_str(string):
"""
除SST之外的所有数据集的标记化/字符串清理。
Original taken from https://github.com/yoonkim/CNN_sentence/blob/master/process_data.py
"""
# 清理数据替换掉无词义的符号
string = re.sub(r"[^A-Za-z0-9(),!?\'\`]", " ", string)
string = re.sub(r"\'s", " \'s", string)
string = re.sub(r"\'ve", " \'ve", string)
string = re.sub(r"n\'t", " n\'t", string)
string = re.sub(r"\'re", " \'re", string)
string = re.sub(r"\'d", " \'d", string)
string = re.sub(r"\'ll", " \'ll", string)
string = re.sub(r",", " , ", string)
string = re.sub(r"!", " ! ", string)
string = re.sub(r"\(", " \( ", string)
string = re.sub(r"\)", " \) ", string)
string = re.sub(r"\?", " \? ", string)
string = re.sub(r"\s{2,}", " ", string)
return string.strip().lower()
def load_data_and_labels(positive_data_file, negative_data_file):
"""
Loads MR polarity data from files, splits the data into words and generates labels.
Returns split sentences and labels.
"""
# Load data from files
positive_examples = list(open(positive_data_file, "r", encoding='utf-8').readlines())
# 去掉头尾的空格 形状['I like english','how are you',……]
positive_examples = [s.strip() for s in positive_examples]
negative_examples = list(open(negative_data_file, "r", encoding='utf-8').readlines())
negative_examples = [s.strip() for s in negative_examples]
# Split by words
x_text = positive_examples + negative_examples
# 句子预处理
# ['句子','句子','']
x_text = [clean_str(sent) for sent in x_text]# 字符过滤,实现函数见clean_str()
# Generate labels
# 生成labels,因为就分两个类,正面评论,负面评论
positive_labels = [[0, 1] for _ in positive_examples]
negative_labels = [[1, 0] for _ in negative_examples]
# >> > a = np.array([[1, 2], [3, 4]])
# >> > b = np.array([[5, 6]])
# b是一个二维array
# >> > np.concatenate((a, b), axis=0)
# array([[1, 2],
# [3, 4],
# [5, 6]])
# >> > np.concatenate((a, b.T), axis=1)
# array([[1, 2, 5],
# [3, 4, 6]])
y = np.concatenate([positive_labels, negative_labels], 0)# 将两种label连在一起
return [x_text, y]#x_text是积极和消极所有的集合,y是[[0,1]\n[0,1]\n[0,1]\n[0,1]\n[0,1]\n[0,1]\n[0,1]\n.....[1,0]\n]的矩阵
"""
def batch_iter(data, batch_size, num_epochs, shuffle=True):
#Generates a batch iterator for a dataset.
data = np.array(data)
data_size = len(data)
num_batches_per_epoch = int((len(data)-1)/batch_size) + 1
for epoch in range(num_epochs):
# Shuffle the data at each epoch
if shuffle:
shuffle_indices = np.random.permutation(np.arange(data_size))
shuffled_data = data[shuffle_indices]
else:
shuffled_data = data
for batch_num in range(num_batches_per_epoch):
start_index = batch_num * batch_size
end_index = min((batch_num + 1) * batch_size, data_size)
yield shuffled_data[start_index:end_index]
"""
# 1创建batch迭代模块
def batch_iter(data, batch_size, num_epochs, shuffle=True):# 1shuffle=True洗牌
"""
Generates a batch iterator for a dataset.批量数据batchsize生成器
定义一个函数,输出batch样本,参数为data(包括feature和label),batchsize,epoch
"""
# 1每次只输出shuffled_data[start_index:end_index]这么多
data = np.array(data)#全部数据转化为array
data_size = len(data)
num_batches_per_epoch = int((len(data)-1)/batch_size) + 1# 迭代次数
print("训练样本数:",len(data)-1)
print("batch_size:",batch_size)
print("迭代次数",num_batches_per_epoch)
for epoch in range(num_epochs):#20
# Shuffle the data at each epoch
if shuffle:
# numpy.random.permutation(x)
# 随机排列一个序列,或者数组。
shuffle_indices = np.random.permutation(np.arange(data_size))# 1洗牌
shuffled_data = data[shuffle_indices]# shuffled_data按照上述乱序得到新的样本
else:
shuffled_data = data
for batch_num in range(num_batches_per_epoch):#开始生成batch
start_index = batch_num * batch_size# 1当前batch的索引开始
end_index = min((batch_num + 1) * batch_size, data_size)#这里主要是最后一个batch可能不足batchsize的处理 # 1判断下一个batch是不是超过最后一个数据了
yield shuffled_data[start_index:end_index]
#yield,在for循环执行时,每次返回一个batch的data,占用的内存为常数
项目上传到网盘了,需要的可以下载。用的TensorFlow环境,搭TensorFlow的方法我之前的博客也记录过,刚学的时候安装TensorFlow就安了好几天,现在几分钟就安好了,记下来以免后人走弯路吧。
链接:https://pan.baidu.com/s/1ReGvytsuZxdPm2Qo4vfgXg
提取码:v2hy
复制这段内容后打开百度网盘手机App,操作更方便哦