In [2]:
%autoreload 2

# to make life easier, import all the needed stuff from workspace
# With autoreload enabled this also has the added benefit of making imported functionality
# available to the notebook without the need to rerun the importing cell.
from workspace import *

Loading Model


In [3]:
from zipfile import ZipFile

In [8]:
with open("char-rnn-200.tar", 'rb') as src:
    main_loop = load(src)

In [20]:
main_loop.profile.total


Out[20]:
defaultdict(int,
            {('after_training',): 0,
             ('after_training', 'Checkpoint'): 0,
             ('after_training', 'DataStreamMonitoring'): 4.118200013181195e-05,
             ('after_training', 'FinishAfter'): 2.182299976993818e-05,
             ('after_training', 'Printing'): 0.0005432170000858605,
             ('before_training',): 0.0001758740000070702,
             ('before_training', 'Checkpoint'): 2.6424000012070792e-05,
             ('before_training',
              'DataStreamMonitoring'): 4.230599999743845e-05,
             ('before_training', 'FinishAfter'): 1.5802999996594735e-05,
             ('before_training', 'Printing'): 5.913699999382516e-05,
             ('initialization',): 12.519453603999992,
             ('training',): 5399.65846744,
             ('training', 'after_epoch'): 328.7014877679999,
             ('training', 'after_epoch', 'Checkpoint'): 0.00019791700060523,
             ('training',
              'after_epoch',
              'DataStreamMonitoring'): 328.69686887100033,
             ('training',
              'after_epoch',
              'FinishAfter'): 0.00023483199993279413,
             ('training', 'after_epoch', 'Printing'): 0.0039027599998462392,
             ('training', 'before_epoch'): 53.11035492600064,
             ('training',
              'before_epoch',
              'Checkpoint'): 0.00018916799987778177,
             ('training',
              'before_epoch',
              'DataStreamMonitoring'): 53.10889230800095,
             ('training',
              'before_epoch',
              'FinishAfter'): 0.00010663299957514027,
             ('training', 'before_epoch', 'Printing'): 0.0009676560000286827,
             ('training', 'epoch'): 5017.845672419999,
             ('training', 'epoch', 'after_batch'): 57.47655860902859,
             ('training',
              'epoch',
              'after_batch',
              'Checkpoint'): 57.07437302599712,
             ('training',
              'epoch',
              'after_batch',
              'DataStreamMonitoring'): 0.11911356801181228,
             ('training',
              'epoch',
              'after_batch',
              'FinishAfter'): 0.043368901008420835,
             ('training',
              'epoch',
              'after_batch',
              'Printing'): 0.16034961599800113,
             ('training', 'epoch', 'before_batch'): 0.41696648099394906,
             ('training',
              'epoch',
              'before_batch',
              'Checkpoint'): 0.06858534100032898,
             ('training',
              'epoch',
              'before_batch',
              'DataStreamMonitoring'): 0.07985220100096058,
             ('training',
              'epoch',
              'before_batch',
              'FinishAfter'): 0.04068121399637903,
             ('training',
              'epoch',
              'before_batch',
              'Printing'): 0.15726627699314122,
             ('training', 'epoch', 'read_data'): 2.253004564981211,
             ('training', 'epoch', 'train'): 4957.513394170987})

In [ ]:

Playing with Main Loops


In [11]:
from word_embeddings import main_loop

In [3]:
main_loop.run()


-------------------------------------------------------------------------------
BEFORE FIRST EPOCH
-------------------------------------------------------------------------------
Training status:
	 batch_interrupt_received: False
	 epoch_interrupt_received: False
	 epoch_started: True
	 epochs_done: 0
	 iterations_done: 0
	 received_first_batch: False
	 resumed_from: None
	 training_started: True
Log records from the iteration 0:
	 test_simple_entropy_apply_cost: 5.717057105039682


-------------------------------------------------------------------------------
AFTER ANOTHER EPOCH
-------------------------------------------------------------------------------
Training status:
	 batch_interrupt_received: False
	 epoch_interrupt_received: False
	 epoch_started: False
	 epochs_done: 1
	 iterations_done: 23
	 received_first_batch: True
	 resumed_from: None
	 training_started: True
Log records from the iteration 23:
	 test_simple_entropy_apply_cost: 5.430238805431428
	 training_finish_requested: True


-------------------------------------------------------------------------------
TRAINING HAS BEEN FINISHED:
-------------------------------------------------------------------------------
Training status:
	 batch_interrupt_received: False
	 epoch_interrupt_received: False
	 epoch_started: False
	 epochs_done: 1
	 iterations_done: 23
	 received_first_batch: True
	 resumed_from: None
	 training_started: True
Log records from the iteration 23:
	 test_simple_entropy_apply_cost: 5.430238805431428
	 training_finish_requested: True
	 training_finished: True