In [7]:
import tensorflow as tf
import matplotlib.pyplot as plt
import csv
import os 
import random
import numpy as np
from tensorflow.python.framework import ops

In [8]:
#---------------------------------------------------|
#-------------------1D-data-------------------------|
#---------------------------------------------------|

# Create graph session
ops.reset_default_graph()
sess = tf.Session()

# parameters for the run
data_size = 25
conv_size = 5
maxpool_size = 5
stride_size = 1

# ensure reproducibility
seed = 3
np.random.seed(seed)
tf.set_random_seed(seed)

#Gnerate 1D data
data_1d = np.random.normal(size=data_size)

# placeholder
x_input_1d = tf.placeholder(dtype=tf.float32, shape=[data_size])

#------Convolution-----
def conv_layer_1d(input_1d, my_filter, stride):
    input_2d = tf.expand_dims(input_1d, 0)
    input_3d = tf.expand_dims(input_2d, 0)
    input_4d = tf.expand_dims(input_3d, 3)
    
    convolution_output = tf.nn.conv2d(input_4d, filter=my_filter, strides=[1,1,stride, 1], padding="VALID")
    conv_output_1d = tf.squeeze(convolution_output)
    return (conv_output_1d)
    
my_filter = tf.Variable(tf.random_normal(shape=[1, conv_size, 1, 1]))
my_convolution_output = conv_layer_1d(x_input_1d, my_filter, stride=stride_size)

In [9]:
#-----activation------
def activation(input_1d):
    return (tf.nn.relu(input_1d))

# create activation layer
my_activation_output = activation(my_convolution_output)

In [10]:
#-----max pool-----
def max_pool(input_1d, width, stride):
    input_2d = tf.expand_dims(input_1d, 0)
    input_3d = tf.expand_dims(input_2d, 0)
    input_4d = tf.expand_dims(input_3d, 3)
    pool_output = tf.nn.max_pool(input_4d, ksize=[1,1,width, 1], strides=[1, 1, stride, 1], padding='VALID')
    pool_output_1d = tf.squeeze(pool_output)
    return (pool_output_1d)

my_maxpool_output = max_pool(my_activation_output, width=maxpool_size, stride=stride_size)

In [11]:
#----fully connected-----
def fully_connected(input_layer, num_outputs):
    weight_shape = tf.squeeze(tf.stack([tf.shape(input_layer), [num_outputs]]))
    weight = tf.random_normal(weight_shape, stddev=0.1)
    bias = tf.random_normal(shape=[num_outputs])
    input_layer_2d = tf.expand_dims(input_layer, 0)
    full_output = tf.add(tf.matmul(input_layer_2d, weight), bias)
    full_output_1d = tf.squeeze(full_output)
    return (full_output_1d)

my_full_output = fully_connected(my_maxpool_output, 5)

In [12]:
#run graph
#initialize variable
init = tf.global_variables_initializer()
sess.run(init)

feed_dict = {x_input_1d: data_1d}
print('>>>>> 1D data <<<<')

# convolution output
print('Input = array of length %d' % (x_input_1d.shape.as_list()[0]))
print('Convolution w/ filter, lengthn = %d, stride size = %d, results in an array of length %d:' % (conv_size, stride_size, my_convolution_output.shape.as_list()[0]))
print(sess.run(my_convolution_output, feed_dict=feed_dict))

# Activation output
print('\nInput = above array of length %d' % (my_activation_output.shape.as_list()[0]))
print('Maxpool, window length = %d, stride size = %d, results in the array of length %d' % (maxpool_size, stride_size, my_maxpool_output.shape.as_list()[0]))
print(sess.run(my_maxpool_output, feed_dict=feed_dict))

# fully connected output
print('\nInput = above array of length %d' % (my_maxpool_output.shape.as_list()[0]))
print('Fully connected layer on all 4 rows with %d' % (my_full_output.shape.as_list()[0]))
print(sess.run(my_full_output, feed_dict=feed_dict))


>>>>> 1D data <<<<
Input = array of length 25
Convolution w/ filter, lengthn = 5, stride size = 1, results in an array of length 21:
[ 0.20388454  0.7204051   0.59722596 -0.19168641 -0.02843863  0.23616135
  0.46905193 -0.44914222  0.37728733 -0.41038391 -0.67565417 -0.37453118
 -0.12621951  0.96754086 -0.19212683  0.93351924  0.16185835 -0.48495594
  0.49288097  0.19455409 -0.41068366]

Input = above array of length 21
Maxpool, window length = 5, stride size = 1, results in the array of length 17
[ 0.7204051   0.7204051   0.59722596  0.46905193  0.46905193  0.46905193
  0.46905193  0.37728733  0.37728733  0.96754086  0.96754086  0.96754086
  0.96754086  0.96754086  0.93351924  0.93351924  0.49288097]

Input = above array of length 17
Fully connected layer on all 4 rows with 5
[-0.62756699 -0.75552887  0.03494836 -0.7527107   1.18131292]

In [13]:
#---------------------------------------------------|
#-------------------2D-data-------------------------|
#---------------------------------------------------|
#reset graph
ops.reset_default_graph()
sess = tf.Session()

In [14]:
# parameters for the run
row_size = 10 
col_size = 10
conv_size = 2
conv_stride_size = 2
maxpool_size = 2
maxpool_stride_size = 1


# ensure reproducibility
seed = 13
np.random.seed(seed)
tf.set_random_seed(seed)

# generate 2D data
data_size = [row_size, col_size]
data_2d = np.random.normal(size=data_size)

#--- placeholder---
x_input_2d = tf.placeholder(dtype=tf.float32, shape=data_size)

In [19]:
#----convolution----
def conv_layer_2d(input_2d, my_filter, stride_size):
    input_3d = tf.expand_dims(input_2d, 0)
    input_4d = tf.expand_dims(input_3d, 3)
    convolution_output = tf.nn.conv2d(input_4d, filter=my_filter, strides=[1, stride_size, stride_size, 1], padding='VALID')
    conv_output_2d = tf.squeeze(convolution_output)
    return (conv_output_2d)

my_filter = tf.Variable(tf.random_normal(shape=[conv_size, conv_size, 1, 1]))
my_convolution_output = conv_layer_2d(x_input_2d, my_filter, stride_size=conv_stride_size)

In [21]:
#----activation----
def activation(input_1d):
    return(tf.nn.relu(input_1d))

my_activation_output = activation(my_convolution_output)

In [30]:
#-----max pool---
def max_pool(input_2d, width, height, stride):
    input_3d = tf.expand_dims(input_2d, 0)
    input_4d = tf.expand_dims(input_3d, 3)
    pool_output = tf.nn.max_pool(input_4d, ksize=[1, height, width, 1], strides=[1, stride, stride, 1], padding='VALID')
    pool_output_2d = tf.squeeze(pool_output)
    return (pool_output_2d)

my_maxpool_output = max_pool(my_activation_output, width=maxpool_size, height=maxpool_size, stride=maxpool_stride_size)

In [38]:
#-----fully connected----
def fully_connected(input_layer, num_outputs):
    flat_input = tf.reshape(input_layer, [-1])
    weight_shape = tf.squeeze(tf.stack([tf.shape(flat_input), [num_outputs]]))
    weight = tf.random_normal(weight_shape, stddev=0.1)
    bias = tf.random_normal(shape=[num_outputs])
    input_2d = tf.expand_dims(flat_input, 0)
    fully_output = tf.add(tf.matmul(input_2d, weight), bias)
    full_output_2d = tf.squeeze(fully_output)
    return (full_output_2d)

my_full_output = fully_connected(my_maxpool_output, 5)

In [39]:
#run graph
# initialize variables
init = tf.global_variables_initializer()
sess.run(init)
feed_dict = {x_input_2d: data_2d}

print('>>>>> 2D data: <<<<<')

# convolution output
print('Input = %s array' % (x_input_2d.shape.as_list()))
print('%s convolution, stride size = [%d, %d], results in the %s array' % (my_filter.shape.as_list()[:2], conv_stride_size,conv_stride_size, my_convolution_output.shape.as_list()))
print(sess.run(my_maxpool_output, feed_dict=feed_dict))


# activation output
print('\nInput = the above %s array' % (my_maxpool_output.shape.as_list()))
print('ReLU element wise returns %s arrays' % (my_activation_output.shape.as_list()))
print(sess.run(my_activation_output, feed_dict=feed_dict))

# max pool output
print('\nInput = the above %s array' % (my_activation_output.shape.as_list()))
print('MaxPool, stride_size =[%d, %d], results in %s output:' % (maxpool_stride_size, maxpool_stride_size, my_maxpool_output.shape.as_list()))
print(sess.run(my_maxpool_output, feed_dict=feed_dict))

# fully connected output
print('\nInput = the above %s array' % (my_maxpool_output.shape.as_list()))
print('Fully connected layer on all %d rows results in %s output:' % (my_maxpool_output.shape.as_list()[0], my_full_output.shape.as_list()[0]))
print(sess.run(my_full_output, feed_dict=feed_dict))


>>>>> 2D data: <<<<<
Input = [10, 10] array
[2, 2] convolution, stride size = [2, 2], results in the [5, 5] array
[[ 1.51234519  1.51234519  1.5481838   2.4784441 ]
 [ 1.51234519  1.94314456  1.94314456  2.4784441 ]
 [ 0.          1.94314456  1.94314456  0.54362565]
 [ 2.50780892  0.44513056  0.54362565  0.81862581]]

Input = the above [4, 4] array
ReLU element wise returns [5, 5] arrays
[[ 0.          0.          0.65901303  1.5481838   1.62168312]
 [ 0.          1.51234519  0.          0.          2.4784441 ]
 [ 0.          0.          1.94314456  0.          0.        ]
 [ 0.          0.          0.44513056  0.54362565  0.        ]
 [ 2.50780892  0.          0.          0.          0.81862581]]

Input = the above [5, 5] array
MaxPool, stride_size =[1, 1], results in [4, 4] output:
[[ 1.51234519  1.51234519  1.5481838   2.4784441 ]
 [ 1.51234519  1.94314456  1.94314456  2.4784441 ]
 [ 0.          1.94314456  1.94314456  0.54362565]
 [ 2.50780892  0.44513056  0.54362565  0.81862581]]

Input = the above [4, 4] array
Fully connected layer on all 4 rows results in 5 output:
[ 1.06032324 -0.69213206 -0.94082755  0.01113656 -0.73912561]

In [ ]: