Find out all the relevant ops for creating neural networks in tensorflow. Since some categories only exist in the tensorflow documentation, but not in the code, it has to be categorized manually or scraped from the documentation.
We assume that all the relevant operations reside in the tf.nn namespace. We parse the documentation to get the right categories and then run them to get the respective type of the operations. We try to pass as least argument as possible to all function and reuse arguments where possible
In [2]:
import tensorflow as tf
dir(tf.nn)
Out[2]:
Download the documentation in markdown and parse it.
In [2]:
!wget https://github.com/tensorflow/tensorflow/blob/master/tensorflow/docs_src/api_guides/python/nn.md
In [3]:
import re
import pprint
import tensorflow as tf
import inspect
# Define basic input output.
def weight_variable(shape):
initial = tf.truncated_normal(shape, stddev=0.1)
return tf.Variable(initial)
x = tf.placeholder(tf.float32, shape=[None, 784])
W_conv1 = weight_variable([5, 5, 1, 32])
y_ = tf.placeholder(tf.float32, shape=[None, 10])
x_image = tf.reshape(x, [-1, 28, 28, 1])
# Regexes for finding headings and the function references.
section_header_regex = re.compile(r'##\s(.+)$')
function_reference_regex = re.compile(r'\*\s{3}@\{([\w\.]+?)\}')
def transform_heading(s: str):
return s.lower().replace(' ', '_') + '_fns'
categories = {}
current_heading = ''
with open('nn.md', 'r') as doc_file:
# Search for section headings.
for line in doc_file:
heading = section_header_regex.match(line)
if heading is not None:
current_heading = transform_heading(heading.group(1))
categories[current_heading] = []
elif current_heading:
fn = function_reference_regex.match(line)
if fn is not None:
categories[current_heading].append(fn.group(1))
# Arguments passed to the tensorflow functions.
default_filter = W_conv1
defaults = {
'features': [0.1, 0.1],
'padding': 'SAME',
'kernel_size': 3,
'x': x_image,
'keep_prob': 0.5,
'value': x_image,
'bias': [1] * 4,
'input': x_image,
'filter': default_filter,
'strides': [1, 2, 2, 1],
'stride': 2,
'depthwise_filter': default_filter,
'pointwise_filter': default_filter,
'filters': [3, 3, 28, 2],
'rate': 2,
'output_shape': [3],
'filter_sizes': [3, 3, 28, 2],
'out_backprop': [1, 28, 28, 1],
'input_sizes': [1, 3, 3, 3],
'ksize': [1.0, 2.0, 2.0, 1.0],
'pooling_ratio': [1.0, 1.44, 1.73, 1.0],
'window_shape': [2, 2],
'rates': [1, 2, 2, 1], # for morphological operations
'kernel': default_filter,
'dim': 0, # Normalization
'axes': [0],
'counts': 1,
'mean_ss': 0.5,
'variance_ss': 0.1,
'shift': 1,
'scale': 2.0,
'offset': 0.0,
'mean': 1.0,
'variance': 0.1,
'variance_epsilon': 0.01,
't': [1.0],
'targets': [1.0],
'log_input': [1.0],
'logits': [1.0],
'pos_weight': 1.0,
'cell': tf.contrib.rnn.BasicRNNCell(1), # rnn
'inputs': x,
'labels': y_,
'predictions': y_,
'k': 2.0,
}
# Ignore functions for which arguments that work cannot be found easily.
ignored_fns = {'convolution', 'atrous_conv2d', 'atrous_conv2d_transpose', 'conv2d_transpose',
'conv3d_transpose', 'conv1d', 'pool',
'with_space_to_batch', 'lrn', 'normalize_moments', 'weighted_moments',
'batch_norm_with_global_normalization',
'embedding_lookup', 'embedding_lookup_sparse',
'bidirectional_dynamic_rnn', 'raw_rnn',
'in_top_k'
}
# Remove unimportant categories.
del categories['connectionist_temporal_classification_(ctc)_fns']
del categories['candidate_sampling_fns']
# Basic operations that are not of interest.
base_ops = {'Add', 'Reshape', 'Const', 'Mul', 'Sub', 'Neg', 'Exp', 'Maximum', 'Floor', 'Assign', 'Identity',
'VariableV2', 'Sum', 'Squeeze'}
graph = tf.get_default_graph()
def transform_cat(cat_name):
return cat.replace('_fns', '_types')
# Iterate over all function categories and find the type of the ops that are created by them.
category_types = {}
function_to_optype = {}
op_count = 0
for cat in categories.keys():
current_cat_type = cat.replace('_fns', '_types')
category_types[current_cat_type] = []
current_cat_mapping = cat.replace('_fns', '_mapping')
function_to_optype[current_cat_mapping] = []
for fn_str in categories[cat]:
if not fn_str.replace('tf.nn.', '') in ignored_fns:
# Get the tensorflow function corresponding to the string.
fn_obj = eval(fn_str)
print(fn_obj)
sig = inspect.signature(fn_obj)
# Apply only those parameters that actually apply and don't have a default value.
applicalbe_args = {k: v for k, v in defaults.items()
if k in sig.parameters.keys() and sig.parameters[k].default == inspect._empty}
binding = sig.bind(**applicalbe_args)
binding.apply_defaults()
try:
# Call the function with the neccessary arguments.
fn_obj(**binding.arguments)
# Look at the type of the last op that was added to the operations, that is not a basic op.
ops = graph.get_operations()
num_new_ops = len(ops) - op_count
for i, op in enumerate(reversed(ops)):
if op.type not in base_ops:
category_types[current_cat_type].append(op.type)
function_to_optype[current_cat_mapping].append((fn_obj.__name__, op.type))
break
else:
if num_new_ops <= i:
continue
else:
break
# If the defined default values cause problems ignore the function.
except ValueError as e:
print(e)
pprint.pprint(category_types)
pprint.pprint(function_to_optype)
In [7]:
category_types_without_suffix = {}
for cat_type in category_types.keys():
category_types_without_suffix[cat_type.replace('_types', '')] = category_types[cat_type]
pprint.pprint(category_types_without_suffix)
This should be more or less are complete list of the relevant operation types. Now we can define a layer as a sequence of operation types.