In [7]:
import tensorflow as tf
import matplotlib.pyplot as plt
import csv
import os
import random
import numpy as np
from tensorflow.python.framework import ops
In [8]:
#---------------------------------------------------|
#-------------------1D-data-------------------------|
#---------------------------------------------------|
# Create graph session
ops.reset_default_graph()
sess = tf.Session()
# parameters for the run
data_size = 25
conv_size = 5
maxpool_size = 5
stride_size = 1
# ensure reproducibility
seed = 3
np.random.seed(seed)
tf.set_random_seed(seed)
#Gnerate 1D data
data_1d = np.random.normal(size=data_size)
# placeholder
x_input_1d = tf.placeholder(dtype=tf.float32, shape=[data_size])
#------Convolution-----
def conv_layer_1d(input_1d, my_filter, stride):
input_2d = tf.expand_dims(input_1d, 0)
input_3d = tf.expand_dims(input_2d, 0)
input_4d = tf.expand_dims(input_3d, 3)
convolution_output = tf.nn.conv2d(input_4d, filter=my_filter, strides=[1,1,stride, 1], padding="VALID")
conv_output_1d = tf.squeeze(convolution_output)
return (conv_output_1d)
my_filter = tf.Variable(tf.random_normal(shape=[1, conv_size, 1, 1]))
my_convolution_output = conv_layer_1d(x_input_1d, my_filter, stride=stride_size)
In [9]:
#-----activation------
def activation(input_1d):
return (tf.nn.relu(input_1d))
# create activation layer
my_activation_output = activation(my_convolution_output)
In [10]:
#-----max pool-----
def max_pool(input_1d, width, stride):
input_2d = tf.expand_dims(input_1d, 0)
input_3d = tf.expand_dims(input_2d, 0)
input_4d = tf.expand_dims(input_3d, 3)
pool_output = tf.nn.max_pool(input_4d, ksize=[1,1,width, 1], strides=[1, 1, stride, 1], padding='VALID')
pool_output_1d = tf.squeeze(pool_output)
return (pool_output_1d)
my_maxpool_output = max_pool(my_activation_output, width=maxpool_size, stride=stride_size)
In [11]:
#----fully connected-----
def fully_connected(input_layer, num_outputs):
weight_shape = tf.squeeze(tf.stack([tf.shape(input_layer), [num_outputs]]))
weight = tf.random_normal(weight_shape, stddev=0.1)
bias = tf.random_normal(shape=[num_outputs])
input_layer_2d = tf.expand_dims(input_layer, 0)
full_output = tf.add(tf.matmul(input_layer_2d, weight), bias)
full_output_1d = tf.squeeze(full_output)
return (full_output_1d)
my_full_output = fully_connected(my_maxpool_output, 5)
In [12]:
#run graph
#initialize variable
init = tf.global_variables_initializer()
sess.run(init)
feed_dict = {x_input_1d: data_1d}
print('>>>>> 1D data <<<<')
# convolution output
print('Input = array of length %d' % (x_input_1d.shape.as_list()[0]))
print('Convolution w/ filter, lengthn = %d, stride size = %d, results in an array of length %d:' % (conv_size, stride_size, my_convolution_output.shape.as_list()[0]))
print(sess.run(my_convolution_output, feed_dict=feed_dict))
# Activation output
print('\nInput = above array of length %d' % (my_activation_output.shape.as_list()[0]))
print('Maxpool, window length = %d, stride size = %d, results in the array of length %d' % (maxpool_size, stride_size, my_maxpool_output.shape.as_list()[0]))
print(sess.run(my_maxpool_output, feed_dict=feed_dict))
# fully connected output
print('\nInput = above array of length %d' % (my_maxpool_output.shape.as_list()[0]))
print('Fully connected layer on all 4 rows with %d' % (my_full_output.shape.as_list()[0]))
print(sess.run(my_full_output, feed_dict=feed_dict))
In [13]:
#---------------------------------------------------|
#-------------------2D-data-------------------------|
#---------------------------------------------------|
#reset graph
ops.reset_default_graph()
sess = tf.Session()
In [14]:
# parameters for the run
row_size = 10
col_size = 10
conv_size = 2
conv_stride_size = 2
maxpool_size = 2
maxpool_stride_size = 1
# ensure reproducibility
seed = 13
np.random.seed(seed)
tf.set_random_seed(seed)
# generate 2D data
data_size = [row_size, col_size]
data_2d = np.random.normal(size=data_size)
#--- placeholder---
x_input_2d = tf.placeholder(dtype=tf.float32, shape=data_size)
In [19]:
#----convolution----
def conv_layer_2d(input_2d, my_filter, stride_size):
input_3d = tf.expand_dims(input_2d, 0)
input_4d = tf.expand_dims(input_3d, 3)
convolution_output = tf.nn.conv2d(input_4d, filter=my_filter, strides=[1, stride_size, stride_size, 1], padding='VALID')
conv_output_2d = tf.squeeze(convolution_output)
return (conv_output_2d)
my_filter = tf.Variable(tf.random_normal(shape=[conv_size, conv_size, 1, 1]))
my_convolution_output = conv_layer_2d(x_input_2d, my_filter, stride_size=conv_stride_size)
In [21]:
#----activation----
def activation(input_1d):
return(tf.nn.relu(input_1d))
my_activation_output = activation(my_convolution_output)
In [30]:
#-----max pool---
def max_pool(input_2d, width, height, stride):
input_3d = tf.expand_dims(input_2d, 0)
input_4d = tf.expand_dims(input_3d, 3)
pool_output = tf.nn.max_pool(input_4d, ksize=[1, height, width, 1], strides=[1, stride, stride, 1], padding='VALID')
pool_output_2d = tf.squeeze(pool_output)
return (pool_output_2d)
my_maxpool_output = max_pool(my_activation_output, width=maxpool_size, height=maxpool_size, stride=maxpool_stride_size)
In [38]:
#-----fully connected----
def fully_connected(input_layer, num_outputs):
flat_input = tf.reshape(input_layer, [-1])
weight_shape = tf.squeeze(tf.stack([tf.shape(flat_input), [num_outputs]]))
weight = tf.random_normal(weight_shape, stddev=0.1)
bias = tf.random_normal(shape=[num_outputs])
input_2d = tf.expand_dims(flat_input, 0)
fully_output = tf.add(tf.matmul(input_2d, weight), bias)
full_output_2d = tf.squeeze(fully_output)
return (full_output_2d)
my_full_output = fully_connected(my_maxpool_output, 5)
In [39]:
#run graph
# initialize variables
init = tf.global_variables_initializer()
sess.run(init)
feed_dict = {x_input_2d: data_2d}
print('>>>>> 2D data: <<<<<')
# convolution output
print('Input = %s array' % (x_input_2d.shape.as_list()))
print('%s convolution, stride size = [%d, %d], results in the %s array' % (my_filter.shape.as_list()[:2], conv_stride_size,conv_stride_size, my_convolution_output.shape.as_list()))
print(sess.run(my_maxpool_output, feed_dict=feed_dict))
# activation output
print('\nInput = the above %s array' % (my_maxpool_output.shape.as_list()))
print('ReLU element wise returns %s arrays' % (my_activation_output.shape.as_list()))
print(sess.run(my_activation_output, feed_dict=feed_dict))
# max pool output
print('\nInput = the above %s array' % (my_activation_output.shape.as_list()))
print('MaxPool, stride_size =[%d, %d], results in %s output:' % (maxpool_stride_size, maxpool_stride_size, my_maxpool_output.shape.as_list()))
print(sess.run(my_maxpool_output, feed_dict=feed_dict))
# fully connected output
print('\nInput = the above %s array' % (my_maxpool_output.shape.as_list()))
print('Fully connected layer on all %d rows results in %s output:' % (my_maxpool_output.shape.as_list()[0], my_full_output.shape.as_list()[0]))
print(sess.run(my_full_output, feed_dict=feed_dict))
In [ ]: