In [1]:
%env CUDA_VISIBLE_DEVICES=1
%matplotlib inline


env: CUDA_VISIBLE_DEVICES=1

In [2]:
import matplotlib
import matplotlib.pyplot as plt
import os
import tensorflow as tf
import utils.plot_functions as pf
import numpy as np
from cae_model import cae
import time
import utils.entropy_funcs as ef

In [7]:
params = {}
#shitty hard coding
params["n_mem"] = 32768  #32768 #49152 for color, 32768 for grayscale

#general params
params["run_name"] = "test_run"
# params["file_location"] = "/media/tbell/datasets/natural_images.txt"
params["file_location"] = "/media/tbell/datasets/test_images.txt"
params["gpu_ids"] = ["0"]
params["output_location"] = os.path.expanduser("~")+"/CAE_Project/CAEs/model_outputs/"+params["run_name"]
params["num_threads"] = 6
params["num_epochs"] = 40
params["epoch_size"] = 49000
params["eval_interval"] = 100
params["seed"] = 1234567890

#checkpoint params
params["run_from_check"] = False
params["check_load_run_name"] = "train"
params["check_load_path"] = "/home/dpaiton/CAE_Project/CAEs/model_outputs/"+params["check_load_run_name"]+"/checkpoints/chkpt_-22800"

#image params
params["shuffle_inputs"] = True
params["batch_size"] = 100
params["img_shape_y"] = 256
params["num_colors"] = 1
params["downsample_images"] = True
params["downsample_method"] = "resize" # can be "crop" or "resize"

#learning rates
params["init_learning_rate"] = 5.0e-4
params["decay_steps"] = 10000#epoch_size*0.5*num_epochs #0.5*epoch_size
params["staircase"] = True
params["decay_rate"] = 0.9

#layer params
params["memristorify"] = False
params["god_damn_network"] = True
params["relu"] = False

#layer dimensions
params["input_channels"] = [params["num_colors"], 128, 128]
params["output_channels"] = [128, 128, 128]
params["patch_size_y"] = [9, 5, 5]
params["strides"] = [4, 2, 2]

#memristor params
params["GAMMA"] = 1.0  # slope of the out of bounds cost
params["mem_v_min"] = -1.0
params["mem_v_max"] = 1.0
params["gauss_chan"] = False

#entropy params
params["LAMBDA"] = 0.1
params["num_triangles"] = 20
params["mle_lr"] = 0.1
params["num_mle_steps"] = 5
params["quant_noise_scale"] = 1.0/128.0 # simulating quantizing u in {-1.0, 1.0} to uint8 (256 values)
mle_triangle_centers = np.linspace(params["mem_v_min"], params["mem_v_max"], params["num_triangles"])

cae_model = cae(params)

config = tf.ConfigProto()
config.gpu_options.allow_growth = True
config.allow_soft_placement = True
config.log_device_placement = False # for debugging - log devices used by each variable

with tf.Session(config=config, graph=cae_model.graph) as sess:
  sess.run(cae_model.init_op)
  if cae_model.params["run_from_check"] == True:
    cae_model.full_saver.restore(sess, cae_model.params["check_load_path"])
  # Coordinator manages threads, checks for stopping requests
  coord = tf.train.Coordinator()
  # queue_runners are created by helper functions tf.train.string_input_producer() and tf.train.batch_join()
  enqueue_threads = tf.train.start_queue_runners(sess, coord=coord, start=True)

  mem_std_eps = np.random.standard_normal((cae_model.params["effective_batch_size"], cae_model.params["n_mem"])).astype(np.float32)
  feed_dict={cae_model.memristor_std_eps:mem_std_eps, cae_model.triangle_centers:mle_triangle_centers}
  if not params["memristorify"] and not params["gauss_chan"]:
    quant_noise = np.random.uniform(-params["quant_noise_scale"], params["quant_noise_scale"],
      size=(cae_model.params["effective_batch_size"], cae_model.params["n_mem"]))
    feed_dict[cae_model.quantization_noise] = quant_noise
  t0 = time.time()
  for batch_id in range(100):
    # Update MLE estimate
    sess.run(cae_model.reset_mle_thetas, feed_dict)
    #old_thetas = sess.run(cae_model.mle_thetas, feed_dict)
    for mle_step in range(params["num_mle_steps"]):
      _, thetas = sess.run([cae_model.mle_update, cae_model.mle_thetas], feed_dict)
      #_, thetas, grads = sess.run([cae_model.mle_update, cae_model.mle_thetas, cae_model.mle_grads], feed_dict)
        #print("\n", mle_step, " - grads: ", grads.shape, "\t", np.mean(grads[0,:]), "\t", np.var(grads[0,:]))
        #mean_diff = np.mean(np.mean(np.square(old_thetas-thetas), axis=1))
        #mean_var = np.mean(np.var(np.square(old_thetas-thetas), axis=1))
        #print(mle_step, " - weight diffs: ", mean_diff, "\t", mean_var)
        #old_thetas = thetas

    #_, step, u_list = sess.run([cae_model.train_op, cae_model.global_step, cae_model.u_list])
    #latent_ent_list, u_list, ent_loss = sess.run([cae_model.latent_entropies, cae_model.u_list, cae_model.ent_loss], feed_dict)

    #tri_linspace = tf.stack([tf.linspace(tf.reduce_min(cae_model.u_list[int(params["num_layers"]/2)]),
    #  tf.reduce_max(cae_model.u_list[int(params["num_layers"]/2)]), num=1000),]*params["n_mem"], axis=1)
    #tri_evals, tri_lin_eval = sess.run([ef.eval_triangle(tri_linspace, ef.weights(cae_model.mle_thetas), cae_model.triangle_centers),
    #  tri_linspace], feed_dict)
    #u_vals = tf.reshape(u_list[int(params["num_layers"]/2)], (params["effective_batch_size"], params["n_mem"]))
    #u_tri_evals = sess.run(ef.eval_triangle(u_vals, ef.weights(cae_model.mle_thetas), cae_model.triangle_centers), feed_dict)
    #tri_heights, tri_centers = sess.run([ef.weights(cae_model.mle_thetas), cae_model.triangle_centers], feed_dict)
    _, ent_loss = sess.run([cae_model.train_op, cae_model.ent_loss], feed_dict)
    print(ent_loss)
  t1 = time.time()
  t_tot = t1-t0
  print("\n\ntotal_time: ",t_tot)
  print (ent_loss)
  
  coord.request_stop()
  coord.join(enqueue_threads)


78608.2
77410.2
68334.9
63541.8
60632.6
58648.8
57109.5
56014.5
55070.2
54309.5
53410.1
52924.0
52419.9
51956.6
51710.7
---------------------------------------------------------------------------
KeyboardInterrupt                         Traceback (most recent call last)
<ipython-input-7-555da72f853d> in <module>()
    105     #u_tri_evals = sess.run(ef.eval_triangle(u_vals, ef.weights(cae_model.mle_thetas), cae_model.triangle_centers), feed_dict)
    106     #tri_heights, tri_centers = sess.run([ef.weights(cae_model.mle_thetas), cae_model.triangle_centers], feed_dict)
--> 107     _, ent_loss = sess.run([cae_model.train_op, cae_model.ent_loss], feed_dict)
    108     print(ent_loss)
    109   t1 = time.time()

/home/rzarcone/tensorflow/lib/python3.4/site-packages/tensorflow/python/client/session.py in run(self, fetches, feed_dict, options, run_metadata)
    765     try:
    766       result = self._run(None, fetches, feed_dict, options_ptr,
--> 767                          run_metadata_ptr)
    768       if run_metadata:
    769         proto_data = tf_session.TF_GetBuffer(run_metadata_ptr)

/home/rzarcone/tensorflow/lib/python3.4/site-packages/tensorflow/python/client/session.py in _run(self, handle, fetches, feed_dict, options, run_metadata)
    963     if final_fetches or final_targets:
    964       results = self._do_run(handle, final_targets, final_fetches,
--> 965                              feed_dict_string, options, run_metadata)
    966     else:
    967       results = []

/home/rzarcone/tensorflow/lib/python3.4/site-packages/tensorflow/python/client/session.py in _do_run(self, handle, target_list, fetch_list, feed_dict, options, run_metadata)
   1013     if handle is None:
   1014       return self._do_call(_run_fn, self._session, feed_dict, fetch_list,
-> 1015                            target_list, options, run_metadata)
   1016     else:
   1017       return self._do_call(_prun_fn, self._session, handle, feed_dict,

/home/rzarcone/tensorflow/lib/python3.4/site-packages/tensorflow/python/client/session.py in _do_call(self, fn, *args)
   1020   def _do_call(self, fn, *args):
   1021     try:
-> 1022       return fn(*args)
   1023     except errors.OpError as e:
   1024       message = compat.as_text(e.message)

/home/rzarcone/tensorflow/lib/python3.4/site-packages/tensorflow/python/client/session.py in _run_fn(session, feed_dict, fetch_list, target_list, options, run_metadata)
   1002         return tf_session.TF_Run(session, options,
   1003                                  feed_dict, fetch_list, target_list,
-> 1004                                  status, run_metadata)
   1005 
   1006     def _prun_fn(session, handle, feed_dict, fetch_list):

KeyboardInterrupt: 
INFO:tensorflow:Error reported to Coordinator: <class 'tensorflow.python.framework.errors_impl.CancelledError'>, Enqueue operation was cancelled
	 [[Node: queue/input_producer/input_producer_EnqueueMany = QueueEnqueueManyV2[Tcomponents=[DT_STRING], timeout_ms=-1, _device="/job:localhost/replica:0/task:0/cpu:0"](queue/input_producer, queue/input_producer/RandomShuffle)]]

Caused by op 'queue/input_producer/input_producer_EnqueueMany', defined at:
  File "/usr/lib/python3.4/runpy.py", line 170, in _run_module_as_main
    "__main__", mod_spec)
  File "/usr/lib/python3.4/runpy.py", line 85, in _run_code
    exec(code, run_globals)
  File "/home/rzarcone/tensorflow/lib/python3.4/site-packages/ipykernel_launcher.py", line 16, in <module>
    app.launch_new_instance()
  File "/home/rzarcone/tensorflow/lib/python3.4/site-packages/traitlets/config/application.py", line 658, in launch_instance
    app.start()
  File "/home/rzarcone/tensorflow/lib/python3.4/site-packages/ipykernel/kernelapp.py", line 477, in start
    ioloop.IOLoop.instance().start()
  File "/home/rzarcone/tensorflow/lib/python3.4/site-packages/zmq/eventloop/ioloop.py", line 177, in start
    super(ZMQIOLoop, self).start()
  File "/home/rzarcone/tensorflow/lib/python3.4/site-packages/tornado/ioloop.py", line 888, in start
    handler_func(fd_obj, events)
  File "/home/rzarcone/tensorflow/lib/python3.4/site-packages/tornado/stack_context.py", line 277, in null_wrapper
    return fn(*args, **kwargs)
  File "/home/rzarcone/tensorflow/lib/python3.4/site-packages/zmq/eventloop/zmqstream.py", line 440, in _handle_events
    self._handle_recv()
  File "/home/rzarcone/tensorflow/lib/python3.4/site-packages/zmq/eventloop/zmqstream.py", line 472, in _handle_recv
    self._run_callback(callback, msg)
  File "/home/rzarcone/tensorflow/lib/python3.4/site-packages/zmq/eventloop/zmqstream.py", line 414, in _run_callback
    callback(*args, **kwargs)
  File "/home/rzarcone/tensorflow/lib/python3.4/site-packages/tornado/stack_context.py", line 277, in null_wrapper
    return fn(*args, **kwargs)
  File "/home/rzarcone/tensorflow/lib/python3.4/site-packages/ipykernel/kernelbase.py", line 283, in dispatcher
    return self.dispatch_shell(stream, msg)
  File "/home/rzarcone/tensorflow/lib/python3.4/site-packages/ipykernel/kernelbase.py", line 235, in dispatch_shell
    handler(stream, idents, msg)
  File "/home/rzarcone/tensorflow/lib/python3.4/site-packages/ipykernel/kernelbase.py", line 399, in execute_request
    user_expressions, allow_stdin)
  File "/home/rzarcone/tensorflow/lib/python3.4/site-packages/ipykernel/ipkernel.py", line 196, in do_execute
    res = shell.run_cell(code, store_history=store_history, silent=silent)
  File "/home/rzarcone/tensorflow/lib/python3.4/site-packages/ipykernel/zmqshell.py", line 533, in run_cell
    return super(ZMQInteractiveShell, self).run_cell(*args, **kwargs)
  File "/home/rzarcone/tensorflow/lib/python3.4/site-packages/IPython/core/interactiveshell.py", line 2717, in run_cell
    interactivity=interactivity, compiler=compiler, result=result)
  File "/home/rzarcone/tensorflow/lib/python3.4/site-packages/IPython/core/interactiveshell.py", line 2821, in run_ast_nodes
    if self.run_code(code, result):
  File "/home/rzarcone/tensorflow/lib/python3.4/site-packages/IPython/core/interactiveshell.py", line 2881, in run_code
    exec(code_obj, self.user_global_ns, self.user_ns)
  File "<ipython-input-7-555da72f853d>", line 61, in <module>
    cae_model = cae(params)
  File "/home/rzarcone/CAE_Project/CAEs/cae_model.py", line 13, in __init__
    self.construct_graph()
  File "/home/rzarcone/CAE_Project/CAEs/cae_model.py", line 265, in construct_graph
    capacity=self.params["capacity"])
  File "/home/rzarcone/tensorflow/lib/python3.4/site-packages/tensorflow/python/training/input.py", line 230, in string_input_producer
    cancel_op=cancel_op)
  File "/home/rzarcone/tensorflow/lib/python3.4/site-packages/tensorflow/python/training/input.py", line 162, in input_producer
    enq = q.enqueue_many([input_tensor])
  File "/home/rzarcone/tensorflow/lib/python3.4/site-packages/tensorflow/python/ops/data_flow_ops.py", line 367, in enqueue_many
    self._queue_ref, vals, name=scope)
  File "/home/rzarcone/tensorflow/lib/python3.4/site-packages/tensorflow/python/ops/gen_data_flow_ops.py", line 1538, in _queue_enqueue_many_v2
    name=name)
  File "/home/rzarcone/tensorflow/lib/python3.4/site-packages/tensorflow/python/framework/op_def_library.py", line 763, in apply_op
    op_def=op_def)
  File "/home/rzarcone/tensorflow/lib/python3.4/site-packages/tensorflow/python/framework/ops.py", line 2327, in create_op
    original_op=self._default_original_op, op_def=op_def)
  File "/home/rzarcone/tensorflow/lib/python3.4/site-packages/tensorflow/python/framework/ops.py", line 1226, in __init__
    self._traceback = _extract_stack()

CancelledError (see above for traceback): Enqueue operation was cancelled
	 [[Node: queue/input_producer/input_producer_EnqueueMany = QueueEnqueueManyV2[Tcomponents=[DT_STRING], timeout_ms=-1, _device="/job:localhost/replica:0/task:0/cpu:0"](queue/input_producer, queue/input_producer/RandomShuffle)]]


In [9]:
def calc_hist(u_val, num_bins):
    value_range = (np.min(u_val), np.max(u_val))
    hist, bin_edges = np.histogram(u_val, range=value_range, bins=num_bins)
    hist = hist/np.sum(hist)
    return bin_edges, hist

In [10]:
def plot_hists(latent_vals, u_val, num_plots, tri_x, tri_eval, tri_centers, tri_heights):
    fig, ax = plt.subplots(num_plots, 3, figsize=(20, 5*num_plots))
    for plot in range(num_plots):
        entropy = latent_vals[plot]
        bin_edges, hist = calc_hist(u_val[plot], 20)
        print ("gauss_ent", plot, " ", 0.5*np.log(2*np.pi*np.e*np.var(u_val[plot])))
        print("u_var", plot, " ", np.var(u_val[plot]))
        ax[plot, 0].scatter(bin_edges[:-1], hist)
        for index in np.arange(len(hist[:-1])):
          x_points = [bin_edges[index], bin_edges[index+1]]
          y_points = [hist[index], hist[index+1]]
          ax[plot, 0].plot(x_points, y_points, linewidth=2, color="k")
        ax[plot, 0].set_title("Entropy="+str(round(entropy,4)), fontsize=18)
        ylim = ax[plot, 0].get_ylim()
        ax[plot, 0].set_ylim((0, ylim[1]))
        ax[plot, 1].bar(tri_x[plot], np.sum(tri_eval[plot], axis=1), width=0.01)
        ax[plot, 2].bar(tri_centers, tri_heights[plot], width=0.01)
    plt.show()

In [11]:
num_plots = 5
rand_val_index = np.random.choice(len(latent_ent_list), num_plots, replace=False)
latent_vals = [latent_ent_list[idx] for idx in rand_val_index]
u_vals = np.reshape(u_list[int(params["num_layers"]/2)], newshape=(params["batch_size"], params["n_mem"]))
u_val = [u_vals[:,idx] for idx in rand_val_index]
tri_eval = [u_tri_evals[:, idx, :] for idx in rand_val_index]
tri_x = u_val
#tri_eval = [tri_evals[:, idx, :] for idx in rand_val_index]
#tri_x = [tri_lin_eval[:, idx] for idx in rand_val_index]
tri_height = [tri_heights[idx,:] for idx in rand_val_index]
plot_hists(latent_vals, u_val, num_plots, tri_x, tri_eval, tri_centers, tri_height)


gauss_ent 0   1.69027545047
u_var 0   1.7206
gauss_ent 1   1.77129901853
u_var 1   2.02328
gauss_ent 2   1.83603598634
u_var 2   2.30296
gauss_ent 3   2.01878301497
u_var 3   3.31908
gauss_ent 4   2.0423505694
u_var 4   3.47928

In [ ]: