image.png
兩個句子從一開始就交互,這樣就會獲得更準確的關系楼雹。圖中可以將單詞與單詞之間的相似度看成是像素,那么對于兩個單詞數(shù)為M,N的句子珠十,其相似度矩陣就是M*N泥畅,然后荠诬!就可以用卷積搞事情了.
1.兩個詞之間的相似度,可以用cosine距離來計算位仁。也可以用點積:image.png
2.兩層CNN網(wǎng)絡進行特征提取柑贞。由于上一層的相似度矩陣Shape不一致,在第一層CNN后面進行maxpool的時候聂抢,要使用動態(tài)pool钧嘶。
- 最后用兩層全連接對CNN結果進行轉換,使用softmax得到最終分類概率琳疏。
with tf.name_scope('embeddings'):
self._m_token_embeddings = tf.Variable(
tf.truncated_normal(
[self._m_config["vocab_size"], self._m_config["embedding_dim"]],
stddev=0.1
),
name="token_embeddings"
)
embedded_sent1 = tf.nn.embedding_lookup(self._m_token_embeddings, self._m_ph_sent1)
embedded_sent2 = tf.nn.embedding_lookup(self._m_token_embeddings, self._m_ph_sent2)
dropout_embedded_sent1 = tf.nn.dropout(embedded_sent1, keep_prob=self._m_ph_keep_prob)
dropout_embedded_sent2 = tf.nn.dropout(embedded_sent2, keep_prob=self._m_ph_keep_prob)
# 構建相似性矩陣,并且使用CNN對齊分類
# sent.shape = [batch_size, sequence_length, dim]
picture = tf.matmul(dropout_embedded_sent1, dropout_embedded_sent2, transpose_b=True)
self._m_picture = tf.expand_dims(picture, axis=-1)
pooled_outputs = []
for i, filter_size in enumerate(self._m_config["filter_sizes"]):
with tf.name_scope("conv-max-pool-%s" % filter_size):
filter_shape = [filter_size, filter_size, 1, 1]
conv_weight = tf.Variable(tf.truncated_normal(filter_shape, stddev=0.1), name="W")
conv_features = tf.nn.conv2d(
input=self._m_picture,
filter=conv_weight,
strides=[1, 1, 1, 1],
padding="SAME")
maxpool_features = tf.nn.max_pool(
value=conv_features,
ksize=[1, 4, 4, 1],
strides=[1, 4, 4, 1],
padding='VALID',
name="pool")
pooled_outputs.append(tf.layers.flatten(tf.squeeze(maxpool_features, axis=3)))
self._m_cnn_features = tf.concat(pooled_outputs, 1)
self._m_cnn_features_dropout = tf.nn.dropout(self._m_cnn_features, self._m_ph_keep_prob)
with tf.name_scope("full_connected_layer"):
feature_size = self._m_cnn_features_dropout.shape.as_list()[1]
W = tf.get_variable(
name="W",
shape=[feature_size, 2],
initializer=tf.contrib.layers.xavier_initializer())
b = tf.Variable(tf.constant(0.1, shape=[2]), name="b")
self._m_logits = tf.nn.xw_plus_b(self._m_cnn_features_dropout, W, b)
with tf.name_scope("loss"):
cross_entropy = tf.nn.softmax_cross_entropy_with_logits_v2(
labels=self._m_ph_label, logits=self._m_logits)
self._m_loss = tf.reduce_mean(cross_entropy)
with tf.name_scope("accuracy"):
self._m_prediction = tf.argmax(self._m_logits, axis=1)
correct = tf.equal(self._m_prediction, tf.argmax(self._m_ph_label, axis=1))
self._m_accuracy = tf.reduce_mean(tf.cast(correct, tf.float32))
with tf.name_scope("optimizer"):
self._m_global_step = tf.Variable(0, name="global_step", trainable=False)
self._m_optimizer = tf.train.AdamOptimizer(self._m_config["learning_rate"])
self._m_train_op = self._m_optimizer.minimize(
self._m_loss, global_step=self._m_global_step)
用現(xiàn)在比較火的attention來做篱瞎,可以嗎牵素?下一章來繼續(xù)講