持续创作,加速成长!这是我参与「掘金日新计划 · 10 月更文挑战」的第4天,点击查看活动详情
前言
前几篇文章中,分别描述了漏洞情报的采集、数据处理、训练数据生成与模型代码等,模型目的在于训练一个漏洞类情报命名实体识别模型,最终模型输入为一段中英混合漏洞文本,输出为文本中包含的厂商(company)、产品(product)、CVE编码(CVE number)、版本(version),模型采用最基本的双向LSTM+CFR条件随机场模型,这也是大多数基于深度学习的命名实体识别模型的baseline方法之一。本文将着重介绍模型代码以及训练测试样本,作为总结,模型采用tensorflow框架搭建,版本为1.4.0,代码如下:
import numpy as np
import time
import tensorflow as tf
from tensorflow.contrib.rnn import LSTMCell
from tensorflow.contrib.crf import crf_log_likelihood
from tensorflow.contrib.crf import viterbi_decode
from data_helper import batch_yield ,pad_sequences
from utils import get_logger
class BiLSTM_CRF(object):
def __init__(self, args, embeddings, tag2label, vocab, paths, config):
# 模型初始化
self.batch_size = args.batch_size
self.epoch_num = args.epoch
self.hidden_dim = args.hidden_dim
self.embeddings = embeddings
self.CRF = args.CRF
self.update_embedding = args.update_embedding
self.dropout_keep_prob = args.dropout
self.optimizer = args.optimizer
self.lr = args.lr
self.clip_grad = args.clip
self.tag2label = tag2label
self.num_tags = len(tag2label)
self.vocab = vocab
self.shuffle = args.shuffle
self.model_path = paths['model_path']
self.summary_path = paths['summary_path']
self.logger = get_logger(paths['log_path'])
self.result_path = paths['result_path']
self.config = config
def build_graph(self):
# 模型构建
self.add_placeholders() # 占位符初始化
self.lookup_layer_op() # lookup_layer初始化 用于word_id -> embediing
self.biLSTM_layer_op() # biLSTM_layer初始化 用于sentence encoder
self.softmax_pred_op() # softmax_pred初始化 用于CRF
self.loss_op() # loss初始化
self.trainstep_op() # train函数初始化
self.init_op() # 模型初始化
def add_placeholders(self):
self.word_ids = tf.placeholder(tf.int32, shape=[None, None], name="word_ids")
self.labels = tf.placeholder(tf.int32, shape=[None, None], name="labels")
self.sequence_lengths = tf.placeholder(tf.int32, shape=[None], name="sequence_lengths")
self.dropout_pl = tf.placeholder(dtype=tf.float32, shape=[], name="dropout")
self.lr_pl = tf.placeholder(dtype=tf.float32, shape=[], name="lr")
def lookup_layer_op(self):
with tf.variable_scope("words"):
_word_embeddings = tf.Variable(self.embeddings,
dtype=tf.float32,
trainable=self.update_embedding,
name="_word_embeddings")
word_embeddings = tf.nn.embedding_lookup(params=_word_embeddings,
ids=self.word_ids,
name="word_embeddings")
self.word_embeddings = tf.nn.dropout(word_embeddings, self.dropout_pl)
def biLSTM_layer_op(self):
with tf.variable_scope("bi-lstm"):
# 使用双向LSTM作为网络单元
cell_fw = LSTMCell(self.hidden_dim)
cell_bw = LSTMCell(self.hidden_dim)
# 前向输出 后向输出 ,末态
(output_fw_seq, output_bw_seq), _ = tf.nn.bidirectional_dynamic_rnn(
cell_fw=cell_fw,
cell_bw=cell_bw,
inputs=self.word_embeddings,
sequence_length=self.sequence_lengths,
dtype=tf.float32)
# 前向后向相连
output = tf.concat([output_fw_seq, output_bw_seq], axis=-1)
# 增加dropout 防止过拟合
output = tf.nn.dropout(output, self.dropout_pl)
# 以下为全连接操作
with tf.variable_scope("proj"):
W = tf.get_variable(name="W",
shape=[2 * self.hidden_dim, self.num_tags],
initializer=tf.contrib.layers.xavier_initializer(),
dtype=tf.float32)
b = tf.get_variable(name="b",
shape=[self.num_tags],
initializer=tf.zeros_initializer(),
dtype=tf.float32)
s = tf.shape(output)
output = tf.reshape(output, [-1, 2*self.hidden_dim])
pred = tf.matmul(output, W) + b
self.logits = tf.reshape(pred, [-1, s[1], self.num_tags])
def loss_op(self):
# 使用CRF进行预测
if self.CRF:
log_likelihood, self.transition_params = crf_log_likelihood(inputs=self.logits,
tag_indices=self.labels,
sequence_lengths=self.sequence_lengths)
self.loss = -tf.reduce_mean(log_likelihood)
else:
losses = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=self.logits,
labels=self.labels)
mask = tf.sequence_mask(self.sequence_lengths)
losses = tf.boolean_mask(losses, mask)
self.loss = tf.reduce_mean(losses)
tf.summary.scalar("loss", self.loss)
def softmax_pred_op(self):
if not self.CRF:
self.labels_softmax_ = tf.argmax(self.logits, axis=-1)
self.labels_softmax_ = tf.cast(self.labels_softmax_, tf.int32)
def trainstep_op(self):
# 训练配置
with tf.variable_scope("train_step"):
self.global_step = tf.Variable(0, name="global_step", trainable=False)
if self.optimizer == 'Adam':
optim = tf.train.AdamOptimizer(learning_rate=self.lr_pl)
elif self.optimizer == 'Adadelta':
optim = tf.train.AdadeltaOptimizer(learning_rate=self.lr_pl)
elif self.optimizer == 'Adagrad':
optim = tf.train.AdagradOptimizer(learning_rate=self.lr_pl)
elif self.optimizer == 'RMSProp':
optim = tf.train.RMSPropOptimizer(learning_rate=self.lr_pl)
elif self.optimizer == 'Momentum':
optim = tf.train.MomentumOptimizer(learning_rate=self.lr_pl, momentum=0.9)
elif self.optimizer == 'SGD':
optim = tf.train.GradientDescentOptimizer(learning_rate=self.lr_pl)
else:
optim = tf.train.GradientDescentOptimizer(learning_rate=self.lr_pl)
grads_and_vars = optim.compute_gradients(self.loss)
grads_and_vars_clip = [[tf.clip_by_value(g, -self.clip_grad, self.clip_grad), v] for g, v in grads_and_vars]
self.train_op = optim.apply_gradients(grads_and_vars_clip, global_step=self.global_step)
def init_op(self):
self.init_op = tf.global_variables_initializer()
def add_summary(self, sess):
"""
:param sess:
:return:
"""
self.merged = tf.summary.merge_all()
self.file_writer = tf.summary.FileWriter(self.summary_path, sess.graph)
def train(self, train_data, dev_data, train_label, dev_label):
saver = tf.train.Saver(tf.global_variables())
with tf.Session(config=self.config) as sess:
sess.run(self.init_op)
self.add_summary(sess)
for epoch in range(self.epoch_num):
self.run_one_epoch(sess, [train_data, train_label], [dev_data, dev_label], epoch, saver)
def test(self, test):
saver = tf.train.Saver()
with tf.Session(config=self.config) as sess:
self.logger.info('=========== testing ===========')
saver.restore(sess, self.model_path)
label_list, seq_len_list = self.dev_one_epoch(sess, test)
self.evaluate(label_list, seq_len_list, test)
def demo_one(self, sess, sent):
"""
:param sess:
:param sent:
:return:
"""
label_list = []
for seqs, labels in batch_yield(sent, 1, self.vocab, self.tag2label, shuffle=False):
label_list_, _ = self.predict_one_batch(sess, seqs)
label_list.extend(label_list_)
label2tag = {}
for tag, label in self.tag2label.items():
label2tag[label] = tag if label != 0 else label
tag = [label2tag[label] for label in label_list[0]]
return tag
def run_one_epoch(self, sess, train, dev, epoch, saver):
train_length = np.array(train).shape[1]
num_batches = (train_length + self.batch_size - 1) // self.batch_size
print('num_batches :{}'.format(num_batches))
start_time = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
batches = batch_yield(train, self.batch_size, self.vocab, self.tag2label, shuffle=self.shuffle)
for step, (seqs, labels) in enumerate(batches):
# print(' processing: {} batch / {} batches.'.format(step + 1, num_batches) + '\r')
step_num = epoch * num_batches + step + 1
feed_dict, _ = self.get_feed_dict(seqs, labels, self.lr, self.dropout_keep_prob)
_, loss_train, summary, step_num_ = sess.run([self.train_op, self.loss, self.merged, self.global_step],feed_dict=feed_dict)
if step + 1 == 1 or (step + 1) % 300 == 1 or step + 1 == num_batches:
print('{} epoch {}, step {}, loss: {:.4}, global_step: {}'.format(start_time, epoch + 1, step + 1,loss_train, step_num))
self.file_writer.add_summary(summary, step_num)
saver.save(sess, self.model_path, global_step=step_num)
print('===========validation / test===========')
label_list_dev, seq_len_list_dev = self.dev_one_epoch(sess, dev)
self.evaluate(label_list_dev, seq_len_list_dev, dev, epoch)
def get_feed_dict(self, seqs, labels=None, lr=None, dropout=None):
"""
:param seqs:
:param labels:
:param lr:
:param dropout:
:return: feed_dict
"""
word_ids, seq_len_list = pad_sequences(seqs, pad_mark=0)
feed_dict = {self.word_ids: word_ids,
self.sequence_lengths: seq_len_list}
if labels is not None:
labels_, _ = pad_sequences(labels, pad_mark=0)
feed_dict[self.labels] = labels_
if lr is not None:
feed_dict[self.lr_pl] = lr
if dropout is not None:
feed_dict[self.dropout_pl] = dropout
return feed_dict, seq_len_list
def dev_one_epoch(self, sess, dev):
"""
:param sess:
:param dev:
:return:
"""
label_list, seq_len_list = [], []
for seqs, labels in batch_yield(dev, self.batch_size, self.vocab, self.tag2label, shuffle=False):
label_list_, seq_len_list_ = self.predict_one_batch(sess, seqs)
label_list.extend(label_list_)
seq_len_list.extend(seq_len_list_)
return label_list, seq_len_list
def predict_one_batch(self, sess, seqs):
"""
:param sess:
:param seqs:
:return: label_list
seq_len_list
"""
feed_dict, seq_len_list = self.get_feed_dict(seqs, dropout=1.0)
if self.CRF:
logits, transition_params = sess.run([self.logits, self.transition_params],
feed_dict=feed_dict)
label_list = []
for logit, seq_len in zip(logits, seq_len_list):
viterbi_seq, _ = viterbi_decode(logit[:seq_len], transition_params)
label_list.append(viterbi_seq)
return label_list, seq_len_list
else:
label_list = sess.run(self.labels_softmax_, feed_dict=feed_dict)
return label_list, seq_len_list
def evaluate(self, label_list, seq_len_list, data, epoch=None):
label2tag = {}
for tag, label in self.tag2label.items():
label2tag[label] = tag
label = data[1]
total = 0
true = 0
for index ,item in enumerate(label_list):
predict_result = [label2tag[label_] for label_ in item]
ground_truth = label[index]
assert len(predict_result) == len(ground_truth)
total += len(predict_result)
for index,item in enumerate(ground_truth):
if ground_truth[index] == predict_result[index]:
true += 1
print('Evaluate accuracy is :{}'.format(true/total))
上面是模型部分的全部,下面将逐个模块介绍。
placeholder
def add_placeholders(self):
self.word_ids = tf.placeholder(tf.int32, shape=[None, None], name="word_ids")
self.labels = tf.placeholder(tf.int32, shape=[None, None], name="labels")
self.sequence_lengths = tf.placeholder(tf.int32, shape=[None], name="sequence_lengths")
self.dropout_pl = tf.placeholder(dtype=tf.float32, shape=[], name="dropout")
self.lr_pl = tf.placeholder(dtype=tf.float32, shape=[], name="lr")
placeholder作为tensorflow基本的函数之一,其作用在于生成计算图的时候,既可以作为常量也可以作为变量,动态的将模型的输入数据不断的馈送到计算流图中,而模型初始化的时候,placeholder函数用于构建graph的时候在模型中占位,等建立session的时候,模型通过feed_dict()函数向占位符进行赋值。在本模型中,所需的占位符word_ids表示字符token的索引,labels表示标签的索引,sequence_lengths表示每一个batch不同句子的长度,dropout_pl表示随机失活比例,lr_pl表示学习率,用于反向传播,tf.int32或tf.float32表示placeholder的类型,shape表示维度,维度为None表示一维,但是不确定具体值。
lookup
def lookup_layer_op(self):
with tf.variable_scope("words"):
_word_embeddings = tf.Variable(self.embeddings,
dtype=tf.float32,
trainable=self.update_embedding,
name="_word_embeddings")
word_embeddings = tf.nn.embedding_lookup(params=_word_embeddings,
ids=self.word_ids,
name="word_embeddings")
self.word_embeddings = tf.nn.dropout(word_embeddings, self.dropout_pl)
该模块用于将索引值转为字符向量embedding,首先模型将送入的numpy形式的字符向量矩阵转变成tf变量,使用 tf.Variable()函数,数据类型为tf.float32,trainable表示模型在训练过程中是否更新embedding矩阵,tf.nn.embedding_lookup根据token索引,将原本输入的矩阵索引变为向量话表示。如下图,一句话有N个token,每个token有着唯一索引,用过lookup,根据embedding矩阵找到每个token对于的embedding向量,构建为 [n*m] 维矩阵。
BiLSTM_layer
def biLSTM_layer_op(self):
with tf.variable_scope("bi-lstm"):
# 使用双向LSTM作为网络单元
cell_fw = LSTMCell(self.hidden_dim)
cell_bw = LSTMCell(self.hidden_dim)
# 前向输出 后向输出 ,末态
(output_fw_seq, output_bw_seq), _ = tf.nn.bidirectional_dynamic_rnn(
cell_fw=cell_fw,
cell_bw=cell_bw,
inputs=self.word_embeddings,
sequence_length=self.sequence_lengths,
dtype=tf.float32)
# 前向后向相连
output = tf.concat([output_fw_seq, output_bw_seq], axis=-1)
# 增加dropout 防止过拟合
output = tf.nn.dropout(output, self.dropout_pl)
双向LSTM单元,这部分是模型的核心,cell_fw、cell_bw分别使用LSTMCell()函数构建LSTM单元。
LSTM结构如上图所示,中文名称为长短时记忆神经网络,通过tensorflow.contrib.rnn中内置的LSTM单元,cell_fw、cell_bw分别表示正向和反向的网络,通过tf.nn.bidirectional_dynamic_rnn单元计算正向和逆向的LSTM输出结果,通过tf.concat将正向和逆向的结果相连,再经过全连接层得到一个句子的隐层表示。全连接层代码如下:
# 以下为全连接操作
with tf.variable_scope("proj"):
W = tf.get_variable(name="W",
shape=[2 * self.hidden_dim, self.num_tags],
initializer=tf.contrib.layers.xavier_initializer(),
dtype=tf.float32)
b = tf.get_variable(name="b",
shape=[self.num_tags],
initializer=tf.zeros_initializer(),
dtype=tf.float32)
s = tf.shape(output)
output = tf.reshape(output, [-1, 2*self.hidden_dim])
pred = tf.matmul(output, W) + b
self.logits = tf.reshape(pred, [-1, s[1], self.num_tags])
损失函数
def loss_op(self):
# 使用CRF进行预测
if self.CRF:
log_likelihood, self.transition_params = crf_log_likelihood(inputs=self.logits,
tag_indices=self.labels,
sequence_lengths=self.sequence_lengths)
self.loss = -tf.reduce_mean(log_likelihood)
else:
losses = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=self.logits,
labels=self.labels)
mask = tf.sequence_mask(self.sequence_lengths)
losses = tf.boolean_mask(losses, mask)
self.loss = tf.reduce_mean(losses)
tf.summary.scalar("loss", self.loss)
损失函数方面,可以根据参数调整损失函数的使用,可以为CRF函数,也可以为交叉熵函数。条件随机场相较于交叉熵,更加适合这种序列标注的场景,例如本文中B标签后会跟I标签和E标签,而不会出现E I B标签这种排列方式,因此目标输出序列本身会带有一些上下文关联,比如E后面就不能接I和B。softmax交叉熵损失函数并没有考虑这种输出层面的上下文关联,CRF在输出端考虑了这种情况,因此使用条件随机场能够获得更好的模型效果。
trainstep_op
def trainstep_op(self):
# 训练配置
with tf.variable_scope("train_step"):
self.global_step = tf.Variable(0, name="global_step", trainable=False)
if self.optimizer == 'Adam':
optim = tf.train.AdamOptimizer(learning_rate=self.lr_pl)
elif self.optimizer == 'Adadelta':
optim = tf.train.AdadeltaOptimizer(learning_rate=self.lr_pl)
elif self.optimizer == 'Adagrad':
optim = tf.train.AdagradOptimizer(learning_rate=self.lr_pl)
elif self.optimizer == 'RMSProp':
optim = tf.train.RMSPropOptimizer(learning_rate=self.lr_pl)
elif self.optimizer == 'Momentum':
optim = tf.train.MomentumOptimizer(learning_rate=self.lr_pl, momentum=0.9)
elif self.optimizer == 'SGD':
optim = tf.train.GradientDescentOptimizer(learning_rate=self.lr_pl)
else:
optim = tf.train.GradientDescentOptimizer(learning_rate=self.lr_pl)
grads_and_vars = optim.compute_gradients(self.loss)
grads_and_vars_clip = [[tf.clip_by_value(g, -self.clip_grad, self.clip_grad), v] for g, v in grads_and_vars]
self.train_op = optim.apply_gradients(grads_and_vars_clip, global_step=self.global_step)
该模块定义了优化器类型,一般情况下我们选择Adam作为优化函数,并且设置了梯度截断防止反向传播过程中梯度消失或者梯度爆炸。
其余模块
在剩余代码中,分别定义了模型的初始化,以及定义了通过tensorboard进行训练测试摘要收集,以及模型的训练测试验证以及预测的函数,在此不一一介绍,模型的训练代码如下:
import tensorflow as tf
import numpy as np
import os, argparse, time, random
from model import BiLSTM_CRF
from utils import str2bool, get_logger
from data_helper import read_dictionary, random_embedding, read_files ,read_tag_id
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
parser = argparse.ArgumentParser(description='BiLSTM-CRF for Chinese NER task') # 参数描述
parser.add_argument('--data_path', type=str,
default=r'{projectPath}\NerData\train.txt',
help='train data source')
parser.add_argument('--word2id', type=str, default=r'{projectPath}\NerData\word_dic.pkl',
help='word2id source')
parser.add_argument('--tag2id', type=str, default=r'{projectPath}\NerData\label_dic.pkl',
help='word2id source')
parser.add_argument('--save_path', type=str,
default=r'{projectPath}\data_path_save',
help='test data source')
parser.add_argument('--batch_size', type=int, default=32, help='#sample of each minibatch')
parser.add_argument('--epoch', type=int, default=30, help='#epoch of training')
parser.add_argument('--hidden_dim', type=int, default=300, help='#dim of hidden state')
parser.add_argument('--optimizer', type=str, default='Adam',
help='Adam/Adadelta/Adagrad/RMSProp/Momentum/SGD')
parser.add_argument('--CRF', type=str2bool, default=True,
help='use CRF at the top layer. if False, use Softmax')
parser.add_argument('--lr', type=float, default=0.001, help='learning rate')
parser.add_argument('--clip', type=float, default=5.0, help='gradient clipping')
parser.add_argument('--dropout', type=float, default=0.5, help='dropout keep_prob')
parser.add_argument('--update_embedding', type=str2bool, default=True,
help='update embedding during training')
parser.add_argument('--pretrain_embedding', type=str, default='random',
help='use pretrained char embedding or init it randomly')
parser.add_argument('--embedding_dim', type=int, default=300,
help='random init char embedding_dim')
parser.add_argument('--shuffle', type=str2bool, default=True,
help='shuffle training data before each epoch')
parser.add_argument('--mode', type=str, default='test', help='train/test/demo')
parser.add_argument('--demo_model', type=str, default='1662621164',
help='model for test and demo')
args = parser.parse_args()
word2id = read_dictionary(args.word2id)
tag2label = read_tag_id(args.tag2id)
if args.pretrain_embedding == 'random':
embeddings = random_embedding(word2id, args.embedding_dim)
else:
embedding_path = 'pretrain_embedding.npy'
embeddings = np.array(np.load(embedding_path), dtype='float32')
# -----------------read data--------------------
lines, label, seq_length = read_files(args.data_path)
assert len(lines) == len(label)
index = int(len(lines) * 0.9)
train_data, dev_data = lines[:index], lines[index:]
train_label, dev_label = label[:index], label[index:]
paths = {}
timestamp = str(int(time.time())) if args.mode == 'train' else args.demo_model
output_path = os.path.join(args.save_path, timestamp)
if not os.path.exists(output_path): os.makedirs(output_path)
summary_path = os.path.join(output_path, "summaries")
paths['summary_path'] = summary_path
if not os.path.exists(summary_path): os.makedirs(summary_path)
model_path = os.path.join(output_path, "checkpoints/")
if not os.path.exists(model_path): os.makedirs(model_path)
ckpt_prefix = os.path.join(model_path, "model")
paths['model_path'] = ckpt_prefix
result_path = os.path.join(output_path, "results")
paths['result_path'] = result_path
if not os.path.exists(result_path): os.makedirs(result_path)
log_path = os.path.join(result_path, "log.txt")
paths['log_path'] = log_path
get_logger(log_path).info(str(args))
if args.mode == 'train':
model = BiLSTM_CRF(args, embeddings, tag2label, word2id, paths, config=config)
model.build_graph()
print("train data: {}".format(len(train_data)))
model.train(train_data, dev_data, train_label, dev_label)
## testing model
elif args.mode == 'test':
test_data = read_files(r"{projectPath}\NerData\test.txt")
ckpt_file = tf.train.latest_checkpoint(model_path)
print(ckpt_file)
paths['model_path'] = ckpt_file
model = BiLSTM_CRF(args, embeddings, tag2label, word2id, paths, config=config)
model.build_graph()
print("test data: {}".format(test_data))
model.test(test_data)
在下一篇文章中,将记录模型的训练函数详解,以及模型的训练过程,Thanks♪(・ω・)ノ