In [1]:
%pylab inline
import pylab as plt
import numpy as np
import cPickle as pkl

# check pretraining progress
model  = 'model_un16_bpe2k_uni_en-zh.npz.current'
config = pkl.load(open('.pretraining/{}.pkl'.format(model)))
for c in config:
    print '{}: {}'.format(c, config[c])

data   = np.load('.pretraining/{}.npz'.format(model))
error  = data['history_errs']

plt.plot(error)
plt.xlabel('Iterations (x 1000)')
plt.ylabel('Valid Error')

plt.show()


Populating the interactive namespace from numpy and matplotlib
decoder: gru_cond
decay_c: 0.0
patience: 1000
max_epochs: 5000
dispFreq: 50
overwrite: False
validFreq: 1000
clip_c: 1.0
n_words_src: 20000
saveto: .pretraining/model_un16_bpe2k_uni_en-zh.npz
valid_batch_size: 64
n_words: 20000
optimizer: adadelta
alpha_c: 0.0
batch_size: 64
encoder: gru
lrate: 0.0001
valid_datasets: ['/misc/kcgscratch1/ChoGroup/thoma_data/un16/devset.un16.en-zh.en.c0.tok.bpe20k.np', '/misc/kcgscratch1/ChoGroup/thoma_data/un16/devset.un16.en-zh.zh.c0.tok.bpe20k.np']
dim: 1028
use_dropout: False
datasets: ['/misc/kcgscratch1/ChoGroup/thoma_data/un16/train.un16.en-zh.en.c0.tok.clean.bpe20k.np', '/misc/kcgscratch1/ChoGroup/thoma_data/un16/train.un16.en-zh.zh.c0.tok.clean.bpe20k.np']
dim_word: 512
sampleFreq: 99
dictionaries: ['/misc/kcgscratch1/ChoGroup/thoma_data/un16/train.un16.en-zh.en.c0.tok.clean.bpe20k.vocab.pkl', '/misc/kcgscratch1/ChoGroup/thoma_data/un16/train.un16.en-zh.zh.c0.tok.clean.bpe20k.vocab.pkl']
reload_: False
maxlen: 50
finish_after: 10000000
saveFreq: 1000

In [2]:
%pylab inline
import pylab as plt
import numpy as np
import cPickle as pkl

# check pretraining progress
model  = 'model_un16_bpe2k_uni_zh-en.npz.current'
config = pkl.load(open('.pretraining/{}.pkl'.format(model)))
for c in config:
    print '{}: {}'.format(c, config[c])

data   = np.load('.pretraining/{}.npz'.format(model))
error  = data['history_errs']

plt.plot(error)
plt.xlabel('Iterations (x 1000)')
plt.ylabel('Valid Error')
plt.show()


Populating the interactive namespace from numpy and matplotlib
decoder: gru_cond
decay_c: 0.0
patience: 1000
max_epochs: 5000
dispFreq: 50
overwrite: False
validFreq: 1000
clip_c: 1.0
n_words_src: 20000
saveto: .pretraining/model_un16_bpe2k_uni_zh-en.npz
valid_batch_size: 64
n_words: 20000
optimizer: adadelta
alpha_c: 0.0
batch_size: 64
encoder: gru
lrate: 0.0001
valid_datasets: ['/misc/kcgscratch1/ChoGroup/thoma_data/un16/devset.un16.en-zh.zh.c0.tok.bpe20k.np', '/misc/kcgscratch1/ChoGroup/thoma_data/un16/devset.un16.en-zh.en.c0.tok.bpe20k.np']
dim: 1028
use_dropout: False
datasets: ['/misc/kcgscratch1/ChoGroup/thoma_data/un16/train.un16.en-zh.zh.c0.tok.clean.bpe20k.np', '/misc/kcgscratch1/ChoGroup/thoma_data/un16/train.un16.en-zh.en.c0.tok.clean.bpe20k.np']
dim_word: 512
sampleFreq: 99
dictionaries: ['/misc/kcgscratch1/ChoGroup/thoma_data/un16/train.un16.en-zh.zh.c0.tok.clean.bpe20k.vocab.pkl', '/misc/kcgscratch1/ChoGroup/thoma_data/un16/train.un16.en-zh.en.c0.tok.clean.bpe20k.vocab.pkl']
reload_: False
maxlen: 50
finish_after: 10000000
saveFreq: 1000

In [10]:
import itertools 
a = [1, 2, 3, 4]
b = list(itertools.chain.from_iterable(itertools.repeat(x, 4) for x in a))
print b


[1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 4, 4, 4, 4]

In [13]:
c = a + b

In [14]:
c[:] = [1,2,3]

In [15]:
c


Out[15]:
[1, 2, 3]

In [ ]: