In [1]:
#!export PYTHONPATH=./:$PYTHONPATH
import librnn
from librnn.pylearn2.datasets.music import MusicSequence

In [2]:
dataset = MusicSequence('jsb')
testset = MusicSequence('jsb', which_set='test')

In [5]:
for d,b in dataset.iterator(1, dataset.get_num_examples(), mode='sequential',
                 data_specs=dataset.get_data_specs()):
    d
    #print len(d)
    print d.shape
    #print b.shape


---------------------------------------------------------------------------
ValueError                                Traceback (most recent call last)
<ipython-input-5-6e70b87d936c> in <module>()
      1 for d,b in dataset.iterator(10, dataset.get_num_examples(), mode='sequential',
----> 2                  data_specs=dataset.get_data_specs()):
      3     d
      4     #print len(d)
      5     print d.shape

/home/eders/Copy/python/sandbox/kd/PreTRNN/librnn/pylearn2/datasets/music.pyc in iterator(self, batch_size, num_batches, rng, data_specs, return_tuple, mode)
    108                  data_specs=None, return_tuple=False, mode=None):
    109         subset_iterator = self._create_subset_iterator(
--> 110             mode=mode, batch_size=batch_size, num_batches=num_batches, rng=rng
    111         )
    112         # This should be fixed to allow iteration with default data_specs

/home/eders/Copy/python/sandbox/kd/PreTRNN/librnn/pylearn2/datasets/music.pyc in _create_subset_iterator(self, mode, batch_size, num_batches, rng)
    102             rng = make_np_rng()
    103         return subset_iterator(self.get_num_examples(), batch_size,
--> 104                                num_batches, rng)
    105 
    106     @wraps(VectorSpacesDataset.iterator)

/home/eders/python/pylearn2/pylearn2/utils/iteration.pyc in __init__(self, dataset_size, batch_size, num_batches, rng)
    392                                      "batches were requested" %
    393                                      (self._dataset_size, max_num_batches,
--> 394                                       batch_size, num_batches))
    395             else:
    396                 num_batches = np.ceil(self._dataset_size / batch_size)

ValueError: dataset of 229 examples can only provide 23 batches with batch_size 10, but 229 batches were requested

In [4]:
from pylearn2.sandbox.rnn.models.rnn import RNN, GRU, Recurrent
from pylearn2.models.mlp import Sigmoid, CompositeLayer, Linear, MLP, Tanh, CompositeLayer, FlattenerLayer
from pylearn2.space import VectorSpace, CompositeSpace
from pylearn2.sandbox.rnn.space import SequenceDataSpace

In [5]:
dimZ = 10
dimX = 96
bsize = 1
dimH = 48
IH_model = Mmodel = RNN(
    batch_size=bsize,
    input_space=SequenceDataSpace(
      space=VectorSpace(
        dim=dimX
      )
    ),
    layers= [
      GRU(
        layer_name='h_1',
        dim=dimH,
        irange=0.1,
        weight_noise=1
      ),
      CompositeLayer(
        layer_name='comp',
        layers=[
          Linear(
            layer_name='m_z',
            dim=dimZ,
            irange=0.1
          ),
          Linear(
            layer_name='ls_z',
            dim=dimZ,
            irange=0.1
          ),
        ]
    )
    ]
)

HI_model = model = RNN(
    batch_size=bsize,
    input_space=SequenceDataSpace(
      space=VectorSpace(
        dim=dimZ
      )
    ),
    layers= [
      Tanh(layer_name='H_decoder', dim=dimH, irange=.01),
      Sigmoid(layer_name='x_hat', dim=dimX, irange=.01)
    ]
)

In [26]:
import vpcn_rnn; reload(vpcn_rnn)
from vpcn_rnn import VPCN
import numpy as np
import time
import gzip, cPickle
from theano import tensor as T
import top; reload(top)

L = 1
learning_rate = .001
momentum = .9

encoder = vpcn_rnn.VPCN(IH_model, HI_model, 'rmsprop', learning_rate, momentum)
encoder.initParams()
encoder.createGradientFunctions()

print "Initializing weights and biases"
lowerbound = np.array([])
testlowerbound = np.array([])

begin = time.time()
for j in xrange(200):
    encoder.lowerbound = 0
    print 'Iteration:', j
    encoder.iterate(dataset)
    end = time.time()
    print("Iteration %d, lower bound = %.2f,"
          " time = %.2fs"
          % (j, encoder.lowerbound*bsize/dataset.get_num_examples(), end - begin))
    begin = end

    if j % 5 == 0:
        #print "### Calculating test lowerbound "
        testlowerbound = np.append(testlowerbound,encoder.getLowerBound(testset))
        print "### Test lowerbound: %f ###" % testlowerbound[-1]
        print ">>> learning rate: %f" % encoder.opt.lr.get_value()


Initializing weights and biases
Iteration: 0
$> Compiling optimizer.
Iteration 0, lower bound = 577.77, time = 13.24s
### Test lowerbound: 588.762130 ###
>>> learning rate: 0.001000
Iteration: 1
Iteration 1, lower bound = 576.30, time = 3.98s
Iteration: 2
Iteration 2, lower bound = 578.06, time = 3.52s
Iteration: 3
Iteration 3, lower bound = 576.80, time = 3.53s
Iteration: 4
Iteration 4, lower bound = 575.77, time = 3.53s
Iteration: 5
Iteration 5, lower bound = 576.27, time = 3.52s
### Test lowerbound: 587.881639 ###
>>> learning rate: 0.001000
Iteration: 6
Iteration 6, lower bound = 575.50, time = 3.99s
Iteration: 7
Iteration 7, lower bound = 575.20, time = 3.54s
Iteration: 8
Iteration 8, lower bound = 576.18, time = 3.53s
Iteration: 9
Iteration 9, lower bound = 575.50, time = 3.55s
Iteration: 10
Iteration 10, lower bound = 574.92, time = 3.54s
### Test lowerbound: 585.011423 ###
>>> learning rate: 0.001000
Iteration: 11
Iteration 11, lower bound = 575.00, time = 3.99s
Iteration: 12
Iteration 12, lower bound = 574.31, time = 3.53s
Iteration: 13
Iteration 13, lower bound = 574.41, time = 3.53s
Iteration: 14
Iteration 14, lower bound = 574.30, time = 3.52s
Iteration: 15
Iteration 15, lower bound = 573.42, time = 5.36s
### Test lowerbound: 588.266692 ###
>>> learning rate: 0.001000
Iteration: 16
Iteration 16, lower bound = 574.82, time = 7.00s
Iteration: 17
Iteration 17, lower bound = 573.13, time = 5.20s
Iteration: 18
Iteration 18, lower bound = 573.00, time = 3.72s
Iteration: 19
Iteration 19, lower bound = 573.22, time = 3.64s
Iteration: 20
Iteration 20, lower bound = 572.83, time = 3.84s
### Test lowerbound: 584.241369 ###
>>> learning rate: 0.001000
Iteration: 21
Iteration 21, lower bound = 572.37, time = 4.44s
Iteration: 22
Iteration 22, lower bound = 572.71, time = 4.02s
Iteration: 23
Iteration 23, lower bound = 571.46, time = 3.71s
Iteration: 24
Iteration 24, lower bound = 571.20, time = 3.70s
Iteration: 25
Iteration 25, lower bound = 572.45, time = 3.63s
### Test lowerbound: 583.087491 ###
>>> learning rate: 0.001000
Iteration: 26
Iteration 26, lower bound = 571.87, time = 4.14s
Iteration: 27
Iteration 27, lower bound = 572.86, time = 3.65s
Iteration: 28
Iteration 28, lower bound = 572.02, time = 3.68s
Iteration: 29
Iteration 29, lower bound = 572.70, time = 3.65s
Iteration: 30
Iteration 30, lower bound = 571.35, time = 3.62s
### Test lowerbound: 583.911575 ###
>>> learning rate: 0.001000
Iteration: 31
Iteration 31, lower bound = 571.26, time = 4.08s
Iteration: 32
Iteration 32, lower bound = 571.17, time = 3.66s
Iteration: 33
Iteration 33, lower bound = 572.05, time = 3.64s
Iteration: 34
Iteration 34, lower bound = 570.45, time = 3.60s
Iteration: 35
Iteration 35, lower bound = 570.35, time = 3.57s
### Test lowerbound: 585.620910 ###
>>> learning rate: 0.001000
Iteration: 36
Iteration 36, lower bound = 570.52, time = 4.14s
Iteration: 37
Iteration 37, lower bound = 568.75, time = 3.59s
Iteration: 38
Iteration 38, lower bound = 568.95, time = 3.60s
Iteration: 39
Iteration 39, lower bound = 568.36, time = 3.67s
Iteration: 40
Iteration 40, lower bound = 566.08, time = 3.64s
### Test lowerbound: 588.354630 ###
>>> learning rate: 0.001000
Iteration: 41
Iteration 41, lower bound = 567.99, time = 4.04s
Iteration: 42
Iteration 42, lower bound = 563.75, time = 3.69s
Iteration: 43
Iteration 43, lower bound = 564.01, time = 3.68s
Iteration: 44
Iteration 44, lower bound = 563.76, time = 4.32s
Iteration: 45
Iteration 45, lower bound = 565.25, time = 5.03s
### Test lowerbound: 580.981690 ###
>>> learning rate: 0.001000
Iteration: 46
Iteration 46, lower bound = 563.30, time = 5.54s
Iteration: 47
Iteration 47, lower bound = 562.49, time = 4.80s
Iteration: 48
Iteration 48, lower bound = 563.46, time = 5.00s
Iteration: 49
Iteration 49, lower bound = 563.17, time = 5.37s
Iteration: 50
Iteration 50, lower bound = 563.04, time = 4.87s
### Test lowerbound: 581.878391 ###
>>> learning rate: 0.001000
Iteration: 51
Iteration 51, lower bound = 563.71, time = 5.30s
Iteration: 52
Iteration 52, lower bound = 562.45, time = 5.03s
Iteration: 53
Iteration 53, lower bound = 562.24, time = 5.07s
Iteration: 54
Iteration 54, lower bound = 560.47, time = 5.30s
Iteration: 55
Iteration 55, lower bound = 561.35, time = 5.10s
### Test lowerbound: 581.321374 ###
>>> learning rate: 0.001000
Iteration: 56
Iteration 56, lower bound = 561.77, time = 6.53s
Iteration: 57
Iteration 57, lower bound = 562.44, time = 5.30s
Iteration: 58
Iteration 58, lower bound = 561.72, time = 4.94s
Iteration: 59
Iteration 59, lower bound = 561.68, time = 4.59s
Iteration: 60
Iteration 60, lower bound = 560.46, time = 3.64s
### Test lowerbound: 577.985692 ###
>>> learning rate: 0.001000
Iteration: 61
Iteration 61, lower bound = 560.06, time = 4.08s
Iteration: 62
Iteration 62, lower bound = 560.20, time = 3.55s
Iteration: 63
Iteration 63, lower bound = 560.80, time = 3.55s
Iteration: 64
Iteration 64, lower bound = 559.66, time = 3.55s
Iteration: 65
Iteration 65, lower bound = 560.47, time = 3.54s
### Test lowerbound: 580.350158 ###
>>> learning rate: 0.001000
Iteration: 66
Iteration 66, lower bound = 560.52, time = 3.99s
Iteration: 67
Iteration 67, lower bound = 558.43, time = 3.56s
Iteration: 68
Iteration 68, lower bound = 558.60, time = 3.53s
Iteration: 69
Iteration 69, lower bound = 557.74, time = 3.52s
Iteration: 70
Iteration 70, lower bound = 559.36, time = 3.52s
### Test lowerbound: 576.655895 ###
>>> learning rate: 0.001000
Iteration: 71
Iteration 71, lower bound = 557.50, time = 3.97s
Iteration: 72
Iteration 72, lower bound = 556.64, time = 3.51s
Iteration: 73
Iteration 73, lower bound = 558.63, time = 3.51s
Iteration: 74
Iteration 74, lower bound = 557.26, time = 3.51s
Iteration: 75
Iteration 75, lower bound = 556.96, time = 3.53s
### Test lowerbound: 576.685113 ###
>>> learning rate: 0.001000
Iteration: 76
Iteration 76, lower bound = 556.70, time = 3.97s
Iteration: 77
Iteration 77, lower bound = 558.27, time = 3.52s
Iteration: 78
Iteration 78, lower bound = 557.15, time = 3.52s
Iteration: 79
Iteration 79, lower bound = 558.00, time = 3.52s
Iteration: 80
Iteration 80, lower bound = 556.34, time = 3.52s
### Test lowerbound: 570.067454 ###
>>> learning rate: 0.001000
Iteration: 81
Iteration 81, lower bound = 555.82, time = 3.97s
Iteration: 82
Iteration 82, lower bound = 555.60, time = 3.52s
Iteration: 83
Iteration 83, lower bound = 555.51, time = 3.53s
Iteration: 84
Iteration 84, lower bound = 555.69, time = 3.52s
Iteration: 85
Iteration 85, lower bound = 554.57, time = 3.52s
### Test lowerbound: 573.918423 ###
>>> learning rate: 0.001000
Iteration: 86
Iteration 86, lower bound = 555.80, time = 3.97s
Iteration: 87
Iteration 87, lower bound = 554.99, time = 3.52s
Iteration: 88
Iteration 88, lower bound = 556.26, time = 3.52s
Iteration: 89
Iteration 89, lower bound = 555.69, time = 3.52s
Iteration: 90
Iteration 90, lower bound = 555.97, time = 3.52s
### Test lowerbound: 569.606148 ###
>>> learning rate: 0.001000
Iteration: 91
Iteration 91, lower bound = 555.81, time = 3.97s
Iteration: 92
Iteration 92, lower bound = 554.57, time = 3.53s
Iteration: 93
Iteration 93, lower bound = 554.41, time = 3.52s
Iteration: 94
Iteration 94, lower bound = 555.14, time = 3.52s
Iteration: 95
Iteration 95, lower bound = 554.52, time = 3.52s
### Test lowerbound: 577.163464 ###
>>> learning rate: 0.001000
Iteration: 96
Iteration 96, lower bound = 555.41, time = 3.96s
Iteration: 97
Iteration 97, lower bound = 554.99, time = 3.52s
Iteration: 98
Iteration 98, lower bound = 553.36, time = 3.52s
Iteration: 99
Iteration 99, lower bound = 553.35, time = 3.52s
Iteration: 100
Iteration 100, lower bound = 553.17, time = 3.52s
### Test lowerbound: 573.598744 ###
>>> learning rate: 0.001000
Iteration: 101
Iteration 101, lower bound = 552.82, time = 3.97s
Iteration: 102
Iteration 102, lower bound = 553.09, time = 3.51s
Iteration: 103
Iteration 103, lower bound = 552.80, time = 3.53s
Iteration: 104
Iteration 104, lower bound = 553.05, time = 3.52s
Iteration: 105
Iteration 105, lower bound = 553.20, time = 3.57s
### Test lowerbound: 573.440768 ###
>>> learning rate: 0.001000
Iteration: 106
Iteration 106, lower bound = 553.29, time = 3.97s
Iteration: 107
Iteration 107, lower bound = 552.71, time = 3.52s
Iteration: 108
Iteration 108, lower bound = 551.27, time = 3.53s
Iteration: 109
Iteration 109, lower bound = 553.39, time = 3.52s
Iteration: 110
Iteration 110, lower bound = 552.05, time = 3.52s
### Test lowerbound: 574.255641 ###
>>> learning rate: 0.001000
Iteration: 111
Iteration 111, lower bound = 553.58, time = 3.97s
Iteration: 112
Iteration 112, lower bound = 553.15, time = 3.52s
Iteration: 113
Iteration 113, lower bound = 551.57, time = 3.52s
Iteration: 114
Iteration 114, lower bound = 551.70, time = 3.52s
Iteration: 115
Iteration 115, lower bound = 551.86, time = 3.52s
### Test lowerbound: 572.591988 ###
>>> learning rate: 0.001000
Iteration: 116
Iteration 116, lower bound = 551.00, time = 3.97s
Iteration: 117
Iteration 117, lower bound = 552.48, time = 3.53s
Iteration: 118
Iteration 118, lower bound = 552.31, time = 3.53s
Iteration: 119
Iteration 119, lower bound = 552.42, time = 3.52s
Iteration: 120
Iteration 120, lower bound = 551.39, time = 3.52s
### Test lowerbound: 571.395018 ###
>>> learning rate: 0.001000
Iteration: 121
Iteration 121, lower bound = 552.37, time = 3.97s
Iteration: 122
Iteration 122, lower bound = 554.54, time = 3.55s
Iteration: 123
Iteration 123, lower bound = 552.85, time = 3.53s
Iteration: 124
Iteration 124, lower bound = 552.66, time = 3.53s
Iteration: 125
Iteration 125, lower bound = 552.62, time = 3.52s
### Test lowerbound: 568.482276 ###
>>> learning rate: 0.001000
Iteration: 126
Iteration 126, lower bound = 551.46, time = 3.97s
Iteration: 127
Iteration 127, lower bound = 552.42, time = 3.52s
Iteration: 128
Iteration 128, lower bound = 552.94, time = 3.53s
Iteration: 129
Iteration 129, lower bound = 551.67, time = 3.52s
Iteration: 130
Iteration 130, lower bound = 550.91, time = 3.53s
### Test lowerbound: 569.676359 ###
>>> learning rate: 0.001000
Iteration: 131
Iteration 131, lower bound = 551.50, time = 3.97s
Iteration: 132
Iteration 132, lower bound = 552.79, time = 3.52s
Iteration: 133
Iteration 133, lower bound = 551.95, time = 3.53s
Iteration: 134
Iteration 134, lower bound = 550.32, time = 3.53s
Iteration: 135
Iteration 135, lower bound = 552.05, time = 3.52s
### Test lowerbound: 572.097944 ###
>>> learning rate: 0.001000
Iteration: 136
Iteration 136, lower bound = 552.33, time = 3.98s
Iteration: 137
Iteration 137, lower bound = 552.27, time = 3.52s
Iteration: 138
Iteration 138, lower bound = 551.74, time = 3.53s
Iteration: 139
Iteration 139, lower bound = 551.64, time = 3.52s
Iteration: 140
Iteration 140, lower bound = 550.51, time = 3.52s
### Test lowerbound: 574.124655 ###
>>> learning rate: 0.001000
Iteration: 141
Iteration 141, lower bound = 549.98, time = 3.98s
Iteration: 142
Iteration 142, lower bound = 550.39, time = 3.52s
Iteration: 143
Iteration 143, lower bound = 552.18, time = 3.51s
Iteration: 144
Iteration 144, lower bound = 550.74, time = 3.51s
Iteration: 145
Iteration 145, lower bound = 550.51, time = 3.51s
### Test lowerbound: 568.037495 ###
>>> learning rate: 0.001000
Iteration: 146
Iteration 146, lower bound = 550.77, time = 3.97s
Iteration: 147
Iteration 147, lower bound = 551.30, time = 3.52s
Iteration: 148
Iteration 148, lower bound = 550.40, time = 3.52s
Iteration: 149
Iteration 149, lower bound = 550.64, time = 3.50s
Iteration: 150
Iteration 150, lower bound = 549.81, time = 3.53s
### Test lowerbound: 573.066777 ###
>>> learning rate: 0.001000
Iteration: 151
Iteration 151, lower bound = 552.18, time = 3.95s
Iteration: 152
Iteration 152, lower bound = 550.90, time = 3.51s
Iteration: 153
Iteration 153, lower bound = 551.48, time = 3.51s
Iteration: 154
Iteration 154, lower bound = 550.29, time = 3.52s
Iteration: 155
Iteration 155, lower bound = 549.24, time = 3.52s
### Test lowerbound: 571.869093 ###
>>> learning rate: 0.001000
Iteration: 156
Iteration 156, lower bound = 550.59, time = 3.97s
Iteration: 157
Iteration 157, lower bound = 550.39, time = 3.51s
Iteration: 158
Iteration 158, lower bound = 549.83, time = 3.53s
Iteration: 159
Iteration 159, lower bound = 551.93, time = 3.52s
Iteration: 160
Iteration 160, lower bound = 550.31, time = 3.52s
### Test lowerbound: 569.507074 ###
>>> learning rate: 0.001000
Iteration: 161
Iteration 161, lower bound = 550.61, time = 3.96s
Iteration: 162
Iteration 162, lower bound = 550.39, time = 3.52s
Iteration: 163
Iteration 163, lower bound = 551.09, time = 3.51s
Iteration: 164
Iteration 164, lower bound = 550.74, time = 3.52s
Iteration: 165
Iteration 165, lower bound = 550.68, time = 3.51s
### Test lowerbound: 573.199086 ###
>>> learning rate: 0.001000
Iteration: 166
Iteration 166, lower bound = 550.22, time = 3.98s
Iteration: 167
Iteration 167, lower bound = 550.50, time = 3.52s
Iteration: 168
Iteration 168, lower bound = 550.15, time = 3.52s
Iteration: 169
Iteration 169, lower bound = 552.62, time = 3.52s
Iteration: 170
Iteration 170, lower bound = 551.31, time = 3.52s
### Test lowerbound: 570.849008 ###
>>> learning rate: 0.001000
Iteration: 171
Iteration 171, lower bound = 551.26, time = 3.95s
Iteration: 172
Iteration 172, lower bound = 549.80, time = 3.52s
Iteration: 173
Iteration 173, lower bound = 550.14, time = 3.51s
Iteration: 174
Iteration 174, lower bound = 551.45, time = 3.52s
Iteration: 175
Iteration 175, lower bound = 551.85, time = 3.53s
### Test lowerbound: 570.061188 ###
>>> learning rate: 0.001000
Iteration: 176
Iteration 176, lower bound = 551.34, time = 3.97s
Iteration: 177
Iteration 177, lower bound = 549.86, time = 3.52s
Iteration: 178
Iteration 178, lower bound = 550.29, time = 3.52s
Iteration: 179
Iteration 179, lower bound = 550.74, time = 3.50s
Iteration: 180
Iteration 180, lower bound = 548.54, time = 3.52s
### Test lowerbound: 571.531962 ###
>>> learning rate: 0.001000
Iteration: 181
Iteration 181, lower bound = 550.51, time = 3.97s
Iteration: 182
Iteration 182, lower bound = 548.70, time = 3.53s
Iteration: 183
Iteration 183, lower bound = 551.28, time = 3.52s
Iteration: 184
Iteration 184, lower bound = 549.75, time = 3.53s
Iteration: 185
Iteration 185, lower bound = 549.17, time = 3.51s
### Test lowerbound: 570.417854 ###
>>> learning rate: 0.001000
Iteration: 186
Iteration 186, lower bound = 550.05, time = 3.97s
Iteration: 187
Iteration 187, lower bound = 549.85, time = 3.52s
Iteration: 188
Iteration 188, lower bound = 549.18, time = 3.52s
Iteration: 189
Iteration 189, lower bound = 549.78, time = 3.52s
Iteration: 190
Iteration 190, lower bound = 549.19, time = 3.51s
### Test lowerbound: 568.217932 ###
>>> learning rate: 0.001000
Iteration: 191
Iteration 191, lower bound = 548.83, time = 3.95s
Iteration: 192
Iteration 192, lower bound = 551.12, time = 3.52s
Iteration: 193
Iteration 193, lower bound = 550.11, time = 3.51s
Iteration: 194
Iteration 194, lower bound = 549.63, time = 3.52s
Iteration: 195
Iteration 195, lower bound = 547.58, time = 3.51s
### Test lowerbound: 566.749225 ###
>>> learning rate: 0.001000
Iteration: 196
Iteration 196, lower bound = 549.64, time = 3.97s
Iteration: 197
Iteration 197, lower bound = 550.48, time = 3.51s
Iteration: 198
Iteration 198, lower bound = 551.22, time = 3.52s
Iteration: 199
Iteration 199, lower bound = 550.45, time = 3.52s

In [27]:
%matplotlib inline
from pylab import imshow, plot

In [28]:
import theano
def decoder(HI_model):
    Z = T.tensor3()
    return theano.function([Z], HI_model.fprop(Z))

def encoder(IH_model, dimZ, deterministic=True):
    X     = T.tensor3()
    bsize = IH_model.batch_size
    m, ls = IH_model.fprop(X)
    zero = np.zeros((bsize, dimZ)).astype('float32')
    if deterministic:
        Z = m
    else:
        raise NotImplementedError('use deterministic = True for now...'
                                  )
    return theano.function([X], Z)

fdecoder = decoder(HI_model)
fencoder = encoder(IH_model, 20)

In [29]:
X = fdecoder(np.random.normal(0,1,(100,1,dimZ)).astype('float32'))
Z = fencoder(d)
Xh = fdecoder(Z)

In [30]:
_ = plot(Z[:,0,:].T)



In [37]:
from pylab import subplot, title
subplot(121)
title('hat')
_ = imshow(-(Xh[:,0,:]>.5).T,cmap='gray')
subplot(122)
title('original')
_ = imshow(-d[:,0,:].T, cmap='gray')



In [ ]: