In [ ]:
import tensorflow as tf
import numpy as np
from utils import *
from VDSH import *
from __future__ import print_function
filename = 'dataset/ng20.tfidf.mat'
data = Load_Dataset(filename)
latent_dim = 32
sess = get_session("0", 0.10) # choose the GPU and how much memory in percentage that we need
model = VDSH(sess, latent_dim, data.n_feas)
In [ ]:
# create an optimizer
learning_rate=0.001
decay_rate = 0.96
#decay_step = 10000
step = tf.Variable(0, trainable=False)
lr = tf.train.exponential_decay(learning_rate,
step,
10000,
decay_rate,
staircase=True, name="lr")
my_optimizer = tf.train.AdamOptimizer(learning_rate=lr) \
.minimize(model.cost, global_step=step)
init = tf.global_variables_initializer()
model.sess.run(init)
In [ ]:
total_epoch = 25
kl_weight = 0.
kl_inc = 1 / 5000. # set the annealing rate for KL loss
for epoch in xrange(total_epoch):
epoch_loss = []
for i in range(len(data.train)):
# get doc
doc = data.train[i]
word_indice = np.where(doc > 0)[0]
# indices
opt, loss = model.sess.run((my_optimizer, model.cost),
feed_dict={model.input_bow: doc.reshape((-1, data.n_feas)),
model.input_bow_idx: word_indice,
model.kl_weight: kl_weight,
model.keep_prob: 0.9})
kl_weight = min(kl_weight + kl_inc, 1.0)
epoch_loss.append(loss)
if i % 50 == 0:
print("\rEpoch:{}/{} {}/{}: Loss:{:.3f} AvgLoss:{:.3f}"
.format(epoch+1, total_epoch, i, len(data.train), loss, np.mean(epoch_loss)), end='')
In [ ]:
# run experiment here
zTrain = model.transform(data.train)
zTest = model.transform(data.test)
zTrain = np.array(zTrain)
zTest = np.array(zTest)
medHash = MedianHashing()
cbTrain = medHash.fit_transform(zTrain)
cbTest = medHash.transform(zTest)
TopK=100
print('Retrieve Top{} candidates using hamming distance'.format(TopK))
results = run_topK_retrieval_experiment(cbTrain, cbTest, data.gnd_train, data.gnd_test, TopK)