In [1]:
%matplotlib inline
%load_ext autoreload
%autoreload 2
import tensorflow as tf
from data import generate_standard_dataset
import numpy as np
normal_ls = generate_standard_dataset('/home/taivu/workspace/NudityDetection/Dataset/train/normal_1', 224, 224)
nudity_ls = generate_standard_dataset('/home/taivu/workspace/NudityDetection/Dataset/train/nude_1', 224, 224)
labels = np.zeros(4000, dtype = np.uint)
dataset = []
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(sess, coord)
for idx in range(4000):
if idx %2 == 0:
img = normal_ls.eval()
else:
img = nudity_ls.eval()
labels[idx] = 1
dataset.append(img)
coord.request_stop()
coord.join(threads, stop_grace_period_secs = 5)
sess.close()
dataset = np.array(dataset)
In [1]:
%matplotlib inline
%load_ext autoreload
%autoreload 2
import tensorflow as tf
from data import generate_standard_dataset
import numpy as np
normal_ls = generate_standard_dataset('/home/taivu/workspace/NudityDetection/Dataset/validation/normal', 224, 224)
nudity_ls = generate_standard_dataset('/home/taivu/workspace/NudityDetection/Dataset/validation/nude', 224, 224)
val_labels = np.zeros(400, dtype = np.uint)
val_dataset = []
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(sess, coord)
for idx in range(400):
if idx %2 == 0:
img = normal_ls.eval()
else:
img = nudity_ls.eval()
val_labels[idx] = 1
val_dataset.append(img)
coord.request_stop()
coord.join(threads, stop_grace_period_secs = 5)
sess.close()
val_dataset = np.array(val_dataset)
In [ ]:
%matplotlib inline
%load_ext autoreload
%autoreload 2
import tensorflow as tf
from data import generate_standard_dataset
import numpy as np
normal_ls = generate_standard_dataset('/home/taivu/workspace/NudityDetection/Dataset/test/normal_jpeg', 224, 224, '*jpeg')
nudity_ls = generate_standard_dataset('/home/taivu/workspace/NudityDetection/Dataset/test/nude_jpeg', 224, 224, '*.jpeg')
test_labels = np.zeros(981, dtype=np.uint)
test_dataset = []
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(sess, coord)
for idx in range(981):
if idx < 681:
img = normal_ls.eval()
else:
img = nudity_ls.eval()
test_labels[idx] = 1
test_dataset.append(img)
coord.request_stop()
coord.join(threads, stop_grace_period_secs=5)
sess.close()
test_dataset = np.array(test_dataset)
In [ ]:
%matplotlib inline
%load_ext autoreload
%autoreload 2
import os
import tensorflow as tf
from data import generate_standard_dataset
from data import _int64_feature
from data import _bytes_feature
normal_ls, _ = generate_standard_dataset('/media/taivu/Data/Collecting_Dataset/total_normal_images', 224, 224, '*.jpeg')
nudity_ls, ls_name = generate_standard_dataset('/media/taivu/Data/Collecting_Dataset/total_nudity_images', 224, 224)
dataset_dir = '/media/taivu/Data/Project/Train_Result/Dataset'
coord = tf.train.Coordinator()
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
threads = tf.train.start_queue_runners(sess, coord)
for batch in range(3, 8):
# Create a tfrecord file
file_name = '4000x224x224_batch_' + str(batch) + '.tfrecords'
file_path = os.path.join(dataset_dir, file_name)
writer = tf.python_io.TFRecordWriter(file_path)
for idx in range(4000):
img=None
lb=0
if idx % 2 == 0:
img = normal_ls.eval()
lb = 0
else:
img = nudity_ls.eval()
lb = 1
if img is not None:
raw_img = img.tostring()
example = tf.train.Example(features=tf.train.Features(
feature={'label': _int64_feature(int(lb)),
'image_raw': _bytes_feature(raw_img)}))
writer.write(example.SerializeToString())
writer.close()
coord.request_stop()
coord.join(threads)
sess.close()
In [15]:
print ls_name[9672]
In [2]:
from data import generate_tfrecords
generate_tfrecords('/home/taivu/workspace/Pycharm_Nudity_Detection/Dataset', dataset,
labels, '4000x224x224_batch_2')
In [2]:
from data import generate_tfrecords
generate_tfrecords('/home/taivu/workspace/Pycharm_Nudity_Detection/Dataset', val_dataset,
val_labels, '400x224x224_eval_batch_1')
In [4]:
from data import generate_tfrecords
generate_tfrecords('/home/taivu/workspace/Pycharm_Nudity_Detection/Dataset', test_dataset,
test_labels, '981x244x244_test')
In [1]:
%matplotlib inline
%load_ext autoreload
%autoreload 2
from data import generate_tfrecords
from data import extract_features
import tensorflow as tf
import numpy as np
#coord = tf.train.Coordinator()
with tf.Session() as sess:
img_batch, lb_batch = extract_features(sess, tfrecord_path='/home/taivu/workspace/Pycharm_Nudity_Detection/Dataset/4000x299x299_train_set.tfrecords', num_samples=4000)
img_batch_np = np.array(img_batch)
lb_batch_np = np.array(lb_batch)
generate_tfrecords('/home/taivu/workspace/Pycharm_Nudity_Detection/Dataset', img_batch_np, lb_batch_np,
'transfer_learning_train')
In [8]:
from data import generate_tfrecords
from data import extract_features
import tensorflow as tf
import numpy as np
with tf.Session() as sess:
val_img_batch, val_lb_batch = extract_features(sess, tfrecord_path='/home/taivu/workspace/Pycharm_Nudity_Detection/Dataset/1156x299x299_val_set.tfrecords', num_samples=1156)
val_img_batch_np = np.array(val_img_batch)
val_lb_batch_np = np.array(val_lb_batch)
generate_tfrecords('/home/taivu/workspace/Pycharm_Nudity_Detection/Dataset', val_img_batch_np, val_lb_batch_np,
'transfer_learning_val')
This temp code
In [10]:
from vng_model import _variable_with_weight_decay
from vng_model import _initialize_variable
from vng_model import inference_resnet
import tensorflow as tf
from tensorflow.contrib.slim.python.slim.nets import resnet_v1
slim = tf.contrib.slim
with tf.Graph().as_default() as g:
is_training = True
x = tf.placeholder(tf.float32, (None, 224, 224, 3), name='input_features')
y_ = tf.placeholder(tf.float32, (None,), name='labels')
with slim.arg_scope(resnet_v1.resnet_arg_scope(is_training)):
net, end_points = resnet_v1.resnet_v1_50(x)
net = tf.squeeze(net, [1, 2])
init = tf.global_variables_initializer()
saver = tf.train.Saver(tf.global_variables())
with tf.Session() as sess:
ckpt = tf.train.get_checkpoint_state('/home/taivu/workspace/Pycharm_Nudity_Detection/pretrain_weight')
if ckpt and ckpt.model_checkpoint_path:
saver.restore(sess, ckpt.model_checkpoint_path)
else:
print('No checkpoint file found')
# Additional layers
with tf.variable_scope('Additional_scope'):
with tf.variable_scope('FC_1') as scope:
weights = _variable_with_weight_decay('weights',
[2048, 1024],
0.04, 0.004)
biases = _initialize_variable('biases',
[1024],
tf.constant_initializer(0.1))
activate_1 = tf.nn.relu(tf.matmul(net, weights) + biases, name = scope.name)
with tf.variable_scope('Softmax') as scope:
weights = _variable_with_weight_decay('weights',
[1024, 2],
stddev=1/1024.0, wd=0.0)
biases = _initialize_variable('biases',
[2],
tf.constant_initializer(0.0))
softmax_classifier = tf.add(tf.matmul(activate_1, weights), biases, name=scope.name)
In [ ]:
var_list = g.get_collection(tf.GraphKeys.GLOBAL_VARIABLES)
#print len(var_list)
for idx, var in zip(range(len(var_list)), var_list):
print idx, var.name
# print var.name
In [6]:
%load_ext autoreload
%autoreload 2
import tensorflow as tf
import data as dt
import vng_model as md
import os
train_dir = '/media/taivu/Data/Project/Train_Result/Dataset'
batch_ls = []
for batch in range(2, 8):
name_batch = '4000x224x224_batch_' + str(batch) + '.tfrecords'
train_batch = os.path.join(train_dir, name_batch)
batch_ls.append(train_batch)
val_path = os.path.join(train_dir, '4000x224x224_batch_1.tfrecords')
with tf.Graph().as_default() as g1:
# ------------------------- BUILD THE GRAPH OF MODEL ---------------------------- #
x = tf.placeholder(tf.float32, (None, 224, 224, 3), name='input_features')
y_ = tf.placeholder(tf.int32, (None,), name='labels')
val_x = tf.placeholder(tf.float32, (None, 224, 224, 3), name='val_input_features')
val_y = tf.placeholder(tf.int32, (None,), name='val_labels')
tr_samples, tr_labels = dt.input_data(batch_ls, 40)
val_samples, val_labels = dt.input_data([val_path], 100, False)
logit = md.inference_resnet(x)
val_logit = md.inference_resnet(val_x, False, reuse=True)
# Define variables to output the predict of model and to evaluate one
resnet_var_ls = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope='resnet_v1_50')
resnet_weight_ls = []
for idx in range(0, 159, 3):
resnet_weight_ls.append(resnet_var_ls[idx])
loss = md.loss(logit, y_, resnet_weight_ls)
v_loss = md.loss(val_logit, val_y, resnet_weight_ls)
hat_y = tf.arg_max(val_logit, 1, name='predict_label')
correct_pre = tf.equal(tf.cast(hat_y, tf.int32), val_y)
accuracy = tf.reduce_mean(tf.cast(correct_pre, tf.float32))
# ------------------------------------- END -------------------------------------- #
# -------------------------------Optimizing process ------------------------------ #
resnet_var_ls = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope='resnet_v1_50')
add_var_ls = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope='additional_layers')
opt_1 = tf.train.GradientDescentOptimizer(0.01)
opt_2 = tf.train.GradientDescentOptimizer(5*0.01)
# Freeze the weights of from first to third blocks
grads = tf.gradients(loss, resnet_var_ls[153:] + add_var_ls)
# Do gradient descent only on a particular weight set
num_opt_resnet_layers = len(resnet_var_ls[153:])
grads_1 = grads[:num_opt_resnet_layers] # Do gradient for Resnet's layers
grads_2 = grads[num_opt_resnet_layers:] # Do gradient for Additional layers
train_opt_1 = opt_1.apply_gradients(zip(grads_1, resnet_var_ls[153:]))
train_opt_2 = opt_2.apply_gradients(zip(grads_2, add_var_ls))
train_opt = tf.group(train_opt_1, train_opt_2)
In [10]:
var_ls = g1.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)
for idx, var in zip(range(len(var_ls)), var_ls):
print idx, var.name
In [ ]:
import tensorflow as tf
import os
from vng_model import _variable_with_weight_decay
from vng_model import _initialize_variable
from vng_model import inference_resnet
from vng_model import loss
from data import input_data
train_path = os.path.join('/home/taivu/workspace/Pycharm_Nudity_Detection/Dataset',
'4000x224x224_batch_2.tfrecords')
checkpoint_dir = '/home/taivu/workspace/Pycharm_Nudity_Detection/pretrain_weight'
with tf.Graph().as_default():
is_training = True
x = tf.placeholder(tf.float32, (None, 224, 224, 3), name='input_features')
y_ = tf.placeholder(tf.float32, (None,), name='labels')
samples, labels = input_data(train_path, 32)
with tf.Session() as sess:
logit = inference_resnet(sess, x, checkpoint_dir, True)
loss = loss(logit, y_)
train_step = tf.train.RMSPropOptimizer(2e-3).minimize(loss)
coord = tf.train.Coordinator()
with tf.Session() as sess:
threads = tf.train.start_queue_runners(sess=sess, coord=coord)
sess.run(tf.global_variables_initializer())
for idx in range(10000):
tr_x, tr_y = sess.run([samples, labels])
_, loss_value = sess.run([train_step, loss], feed_dict={x:tr_x, y_:tr_y})
print('Step %d: %0.2f'%(idx, loss_value))
coord.request_stop()
coord.join(threads, stop_grace_period_secs=120)
sess.close()
In [ ]:
%load_ext autoreload
%autoreload 2
import tensorflow as tf
from train_model import train_resnet
train_resnet(True,val_dataset, val_labels)
In [ ]:
import os
from data import input_data
import tensorflow as tf
train_path = os.path.join('/home/taivu/workspace/Pycharm_Nudity_Detection/Dataset',
'4000x224x224_batch_2.tfrecords')
samples, labels = input_data(train_path, 128)
In [1]:
import tensorflow as tf
import os
v1 = tf.Variable(1., name='v1')
v2 = tf.Variable(2., name='v2')
a = tf.add(v1, v2)
all_saver = tf.train.Saver()
v2_saver = tf.train.Saver({"v2": v2})
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
all_saver.save(sess, '/home/taivu/workspace/NudityDetection/Test/'+'all_val.ckpt')
v2_saver.save(sess, '/home/taivu/workspace/NudityDetection/Test/'+'v2.ckpt')
In [3]:
import tensorflow as tf
import os
new_saver = tf.train.import_meta_graph(
'/home/taivu/workspace/NudityDetection/Test/all_val.ckpt.meta')
with tf.Session() as sess:
ckpt = tf.train.get_checkpoint_state('/home/taivu/workspace/NudityDetection/Test')
if ckpt and ckpt.model_checkpoint_path:
new_saver.restore(sess, ckpt.model_checkpoint_path)
else:
print('No checkpoint file found!')
all_vars = tf.trainable_variables()
for v in all_vars:
print(v.name)
In [1]:
import tensorflow as tf
from tensorflow.contrib.slim.python.slim.nets import resnet_v1
from vng_model import _variable_with_weight_decay
from vng_model import _initialize_variable
from vng_model import inference_resnet
import data as dt
slim = tf.contrib.slim
import os
#new_saver = tf.train.import_meta_graph(
# '/home/taivu/workspace/Pycharm_Nudity_Detection/checkpoint_model/model.ckpt-18960.meta')
test_path = os.path.join( '/home/taivu/workspace/Pycharm_Nudity_Detection/Dataset',
'981x244x244_test.tfrecords')
############## Load test set ###################
#coord = tf.train.Coordinator()
#with tf.Session() as sess:
# threads = tf.train.start_queue_runners(sess=sess, coord=coord)
# sess.run(tf.global_variables_initializer())
# val_data, val_lb = sess.run([val_samples, val_labels])
# coord.request_stop()
# coord.join(threads)
# sess.close()
################################################
with tf.Graph().as_default():
x = tf.placeholder(tf.float32, (None, 224, 224, 3),
name='input_features')
y_ = tf.placeholder(tf.int32, (None,), name='labels')
test_samples, test_labels = dt.input_data([test_path], 98, False)
logit = inference_resnet(x)
hat_y = tf.arg_max(logit, 1, name='predict_label')
correct_pre = tf.equal(tf.cast(hat_y, tf.int32), y_)
acc = tf.reduce_mean(tf.cast(correct_pre, tf.float32))
saver = tf.train.Saver(tf.global_variables())
coord = tf.train.Coordinator()
with tf.Session() as sess:
#sess.run(tf.global_variables_initializer())
ckpt = tf.train.get_checkpoint_state('/home/taivu/workspace/Pycharm_Nudity_Detection/checkpoint_model')
if ckpt and ckpt.model_checkpoint_path:
saver.restore(sess, ckpt.model_checkpoint_path)
else:
print('No checkpoint file found!')
threads = tf.train.start_queue_runners(sess=sess, coord=coord)
for idx in range(10):
print idx
test_data, test_lb = sess.run([test_samples, test_labels])
accuracy_test = sess.run(acc, feed_dict={x:test_data, y_: test_lb})
if idx == 0:
mean_acc = accuracy_test
else:
mean_acc = 1.0/(idx + 1)*(accuracy_test + idx*mean_acc)
coord.request_stop()
coord.join(threads)
sess.close()
print mean_acc
In [20]:
import tensorflow as tf
from tensorflow.contrib.slim.python.slim.nets import resnet_v1
from vng_model import _variable_with_weight_decay
from vng_model import _initialize_variable
import data as dt
slim = tf.contrib.slim
checkpoint_dir = '/home/taivu/workspace/Pycharm_Nudity_Detection/pretrain_weight'
with tf.Graph().as_default():
x = tf.placeholder(tf.float32, (None, 224, 224, 3), name='input_features')
y_ = tf.placeholder(tf.int32, (None,), name='labels')
with slim.arg_scope(resnet_v1.resnet_arg_scope(True)):
net, end_points = resnet_v1.resnet_v1_50(x)
net = tf.squeeze(net, [1, 2])
with tf.variable_scope('additional_layers'):
with tf.variable_scope('FC_1') as scope:
weights = _variable_with_weight_decay('weights',
[2048, 1024],
0.04, 0.004)
biases = _initialize_variable('biases',
[1024],
tf.constant_initializer(0.1))
activate_1 = tf.nn.relu(tf.matmul(net, weights) + biases, name=scope.name)
with tf.variable_scope('softmax') as scope:
weights = _variable_with_weight_decay('weights',
[1024, 2],
stddev=1 / 1024.0, wd=0.0)
biases = _initialize_variable('biases',
[2],
tf.constant_initializer(0.0))
softmax_classifier = tf.add(tf.matmul(activate_1, weights), biases, name=scope.name)
saver = tf.train.Saver(var_list=
tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='resnet_v1_50'))
ckpt = tf.train.get_checkpoint_state(checkpoint_dir)
with tf.Session() as sess:
if ckpt and ckpt.model_checkpoint_path:
saver.restore(sess, ckpt.model_checkpoint_path)
print('Successful!')
else:
print('Checkpoint not found!')
In [12]:
import numpy as np
import os
import tensorflow as tf
from data import _int64_feature
A_features = np.array(range(100))
B_features = np.array(range(100,200))
file_name_a = os.path.join('/home/taivu/Dropbox/Pycharm_Nudity_Detection', 'feature_a.tfrecords')
file_name_b = os.path.join('/home/taivu/Dropbox/Pycharm_Nudity_Detection', 'feature_b.tfrecords')
writer = tf.python_io.TFRecordWriter(file_name_a)
for idx in range(100):
example = tf.train.Example(features=tf.train.Features(
feature={'raw': _int64_feature(int(A_features[idx]))}
))
writer.write(example.SerializeToString())
writer.close()
In [13]:
writer = tf.python_io.TFRecordWriter(file_name_b)
for idx in range(100):
example = tf.train.Example(features=tf.train.Features(
feature={'raw': _int64_feature(int(B_features[idx]))}
))
writer.write(example.SerializeToString())
writer.close()
In [2]:
import tensorflow as tf
def read_and_decode(filename_queue):
reader = tf.TFRecordReader()
_, serialized_example = reader.read(filename_queue)
features = tf.parse_single_example(
serialized_example,
features={'raw':tf.FixedLenFeature([], tf.int64)})
obj = tf.cast(features['raw'], tf.int32)
return obj
def input_data(data_dir, batch_size):
filename_queue = tf.train.string_input_producer(data_dir)
obj = read_and_decode(filename_queue)
batch_obj = tf.train.batch(
[obj], batch_size=batch_size, capacity=10+3*batch_size)
return batch_obj
In [4]:
import os
import tensorflow as tf
batch_1 = os.path.join('/home/taivu/Dropbox/Pycharm_Nudity_Detection', 'feature_a.tfrecords')
batch_2 = os.path.join('/home/taivu/Dropbox/Pycharm_Nudity_Detection', 'feature_b.tfrecords')
bat_ob = input_data([batch_1, batch_2], 5)
coord = tf.train.Coordinator()
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
threads = tf.train.start_queue_runners(sess=sess, coord=coord)
for i in range(100):
sample = sess.run(bat_ob)
print sample
coord.request_stop()
coord.join(threads)
sess.close()
In [3]:
import math
print int(math.ceil(3.0/2))