In [1]:
import tensorflow as tf
import numpy as np
import gensim
import sys
import datetime,time
from six.moves import cPickle as pickle

In [2]:
def log_time_model(s):
    now = datetime.datetime.now()
    print "[%s][%s.%s][%s]" %(time.strftime('%Y-%m-%d %H-%M-%S', time.localtime(time.time())), now.second, now.    microsecond, s)

In [4]:
def reformat(dataset, labels):
    dataset = dataset.reshape(
        (-1, WordVec_Length, Word_Length_Default * 2,num_channels)).astype(np.float32)
    return dataset

In [5]:
def reAdd(dataset,Filter_Width):
    # give pairs's distance,then distance = Filter_Length * File_Width 
    # and  return real input data x  =   [None, WordVec_Length, Word_Length_Default * 2 + Filter_Length,num_channels]
    b = np.zeros([Filter_Length,Filter_Width,num_channels])
    return np.insert(dataset, Word_Length_Default, values=b, axis=1)

In [6]:
def accuracy(predictions, labels):
    return (100.0 * np.sum(np.argmax(predictions, 1) == np.argmax(labels, 1)) / predictions.shape[0])

In [7]:
# define wordvec parameter 
WordVec_Length = 50
WordVec_Depth = 1

# define batch size 
BatchSize = 2

# define filter parameter
Filter_Length = WordVec_Length
Filter_Width = 3
num_channels = WordVec_Depth
### define filter depth, then equal next layer filter_curmat_depth
Filter_Depth = 9

# one question 's max word length , is equal with lenth of sentence. >50 的 sentence
Word_Length_Default = 50
# dim(y)
num_labels = 2

x = tf.placeholder(tf.float32,[BatchSize, Word_Length_Default * 2 + Filter_Width, WordVec_Length,num_channels],
                   name='x-input')
#  dimension[0] is batch size,dim[1] and dim[2] means pairs's size,dim[3] is depth of pairs
y = tf.placeholder(tf.float32, shape=(BatchSize, num_labels),name='y-input')


valid_x = tf.constant(valid_dataset)
valid_label = tf.constant(valid_labels)
test_x = tf.constant(test_dataset)

log_time_model("x.shape()" + str(x.get_shape()))


[2017-04-23 17-32-26][26.630982][x.shape()(2, 50, 103, 1)]

In [8]:
def inference(input_tensor):
    with tf.variable_scope('layer1-conv1'):

        conv1_filter_weights = tf.get_variable('weight',
                                              [Filter_Width,Filter_Length,num_channels,Filter_Depth],
                                              initializer=tf.truncated_normal_initializer(stddev=0.1))

        log_time_model("conv1_filter_weights = "+str(conv1_filter_weights.get_shape()))

        biases1 = tf.get_variable('biases',[Filter_Depth],initializer=tf.constant_initializer(0.1))

        log_time_model("biases1 = "+str(biases1.get_shape()))

        conv1 = tf.nn.conv2d(x, conv1_filter_weights, strides = [1,1,1,1],padding='VALID')

        log_time_model("conv1 = "+str(conv1.get_shape()))

        bias1 = tf.nn.bias_add(conv1, biases1)

        log_time_model("bias1 = "+str(bias1.get_shape()))

        actived_conv1 = tf.nn.relu(bias1)

        log_time_model("actived_conv1 = "+str(actived_conv1.get_shape()))

        s = (Word_Length_Default * 2 + Filter_Width) - Filter_Width +1
        # after conv , dim(2)'s size of conv 
        log_time_model(s)

        # pool1 对一个句子做pool
        pool1 = tf.nn.max_pool(actived_conv1[:,0:Word_Length_Default - Filter_Width +1,:,:],
                               ksize=[1,1,Word_Length_Default - Filter_Width +1,1],strides=[1,1,1,1],padding='VALID')

        log_time_model("begin: 0:"+str(Word_Length_Default - Filter_Width +1))

        ## pool2 对第2个句子做pool
        pool2 = tf.nn.max_pool(actived_conv1[:,Word_Length_Default +Filter_Width:s,:,:],
                               ksize=[1,s-Word_Length_Default - Filter_Width,1 ,1],strides=[1,1,1,1],padding='VALID')

        log_time_model("begin:"+str(Word_Length_Default +Filter_Width+1)+":"+str(s))

        log_time_model("pool1.get_shape() = "+ str(pool1.get_shape()))
        log_time_model("pool2.get_shape() = "+ str(pool2.get_shape()))

        simM = tf.get_variable('M',[Filter_Depth,Filter_Depth],initializer=tf.truncated_normal_initializer(stddev=0.1))
        # 定义相似度矩阵 simM
        log_time_model("simM = "+str(simM.get_shape()))


        pool1 = tf.reshape(pool1,[BatchSize,Filter_Depth])
        log_time_model("pool1.get_shape() = "+ str(pool1.get_shape()))

        simmat = tf.matmul(pool1,simM)
        log_time_model("simmat1 shape = "+str(simmat.get_shape()))

        simmat = tf.reshape(simmat,[BatchSize,1,1,Filter_Depth])
        log_time_model("simmat reshape = "+str(simmat.get_shape()))
        log_time_model("pool2.get_shape() = "+ str(pool2.get_shape()))

        simmat = tf.matmul(simmat,pool2,transpose_b=True)
        pool2 = tf.reshape(pool2,[BatchSize,Filter_Depth])
        simmat = tf.reshape(simmat,[BatchSize,1])
        log_time_model("pool1.get_shape() = "+ str(pool1.get_shape()))
        log_time_model(" pool2 shape = "+str(pool2.get_shape()))
        log_time_model(" simmat shape = "+str(simmat.get_shape()))

        out = tf.concat([pool1,simmat,pool2],1)
        log_time_model(" out shape = "+str(out.get_shape()))

        # NEXT STEP: MAXPOOL 定义 两个不需要训练的变量,maxpool ()
        # 定义一个需要训练的矩阵M
        # 两个变量矩阵相乘
        # 拉直拼接
        # 全连接
        # softmax

    fulljoin_num = 200

    with tf.variable_scope('layer2-fulllink'):
        weights = tf.get_variable('weight',[out.get_shape().as_list()[1],fulljoin_num],
                                         initializer=tf.truncated_normal_initializer(stddev=0.1))
        biases = tf.get_variable('biases',[fulljoin_num],initializer=tf.constant_initializer(0.1))

        hidden = tf.nn.relu(tf.matmul(out, weights) + biases)

        weights = tf.Variable(tf.truncated_normal([fulljoin_num, num_labels], stddev=0.1))

        biases = tf.Variable(tf.constant(0.1, shape=[num_labels]))

        out =tf.matmul(hidden, weights) + biases
        log_time_model(" out shape = "+str(out.get_shape()))
        
        return out
    
out = inference(x)
    
# Training computation.
loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=out, labels=y))

# Optimizer.
optimizer = tf.train.GradientDescentOptimizer(0.05).minimize(loss)

# Predictions for the training, validation, and test data.
train_prediction = tf.nn.softmax(out)
# valid_prediction = tf.nn.softmax(inference(valid_x))
# test_prediction = tf.nn.softmax(inference(test_x))


[2017-04-23 17-32-26][26.873075][conv1_filter_weights = (50, 3, 1, 9)]
[2017-04-23 17-32-26][26.877523][biases1 = (9,)]
[2017-04-23 17-32-26][26.879547][conv1 = (2, 1, 101, 9)]
[2017-04-23 17-32-26][26.880758][bias1 = (2, 1, 101, 9)]
[2017-04-23 17-32-26][26.881589][actived_conv1 = (2, 1, 101, 9)]
[2017-04-23 17-32-26][26.881620][101]
[2017-04-23 17-32-26][26.890663][begin: 0:48]
[2017-04-23 17-32-26][26.898382][begin:54:101]
[2017-04-23 17-32-26][26.898439][pool1.get_shape() = (2, 1, 1, 9)]
[2017-04-23 17-32-26][26.898469][pool2.get_shape() = (2, 1, 1, 9)]
[2017-04-23 17-32-26][26.908185][simM = (9, 9)]
[2017-04-23 17-32-26][26.911109][pool1.get_shape() = (2, 9)]
[2017-04-23 17-32-26][26.912573][simmat1 shape = (2, 9)]
[2017-04-23 17-32-26][26.915291][simmat reshape = (2, 1, 1, 9)]
[2017-04-23 17-32-26][26.915336][pool2.get_shape() = (2, 1, 1, 9)]
[2017-04-23 17-32-26][26.922973][pool1.get_shape() = (2, 9)]
[2017-04-23 17-32-26][26.923027][ pool2 shape = (2, 9)]
[2017-04-23 17-32-26][26.923054][ simmat shape = (2, 1)]
[2017-04-23 17-32-26][26.925887][ out shape = (2, 19)]
[2017-04-23 17-32-26][26.960022][ out shape = (2, 2)]

In [ ]:


In [9]:
# with tf.Session() as sess:
#     print sess.run(a.initializer)
#     print dir(a.get_shape()[1].value)
#     print a.get_shape()[1].value

In [10]:
# read dataset
def openFile():
    pickle_file = 'dataset.pickle'
    with open(pickle_file, 'rb') as f:
        data = pickle.load(f)
        ##第一个句子的样本矩阵为np.ndarray((samplesize, 300, 50), dtype=np.float32))
        train_dataset1 = data['train_dataset1'] 
        ##第二个句子的样本矩阵为np.ndarray((samplesize, 300, 50), dtype=np.float32))
        train_dataset2 = data['train_dataset2']
        ##类别标识np.ndarray((samplesize, dtype = np.int32))
        train_labels = data['train_labels']
        ##验证集
        valid_dataset1 = data['valid_dataset1'] 
        valid_dataset2 = data['valid_dataset2']
        valid_labels = data['valid_labels']
        ##测试集
        test_dataset1 = data['test_dataset1'] 
        test_dataset2 = data['test_dataset2']