In [5]:
import collections
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import sklearn
import seaborn as sns
from sklearn import svm
from my_rbm import Rbm
import six.moves.cPickle as pickle
import sys
from pandas import *
from sklearn.preprocessing import OneHotEncoder
from yadlt.models.rbm_models import dbn
%matplotlib inline

In [2]:
mnist = input_data.read_data_sets("MNIST_data/", one_hot=False)
train_images = mnist.train.images
test_images = mnist.test.images


Extracting MNIST_data/train-images-idx3-ubyte.gz
Extracting MNIST_data/train-labels-idx1-ubyte.gz
Extracting MNIST_data/t10k-images-idx3-ubyte.gz
Extracting MNIST_data/t10k-labels-idx1-ubyte.gz

In [3]:
num_hidden = 64
num_epochs=1
rbm = Rbm(num_hidden=num_hidden, num_classes=10, num_features=784, learning_rate=0.01)
rbm.init_rbm()
rbm.fit(mnist.train.images, mnist.test.images, num_epochs=num_epochs)


Number of features: 784
Number of classes: 10
logit shape:  (?, 10)
batch_labels shape:  (?, 10)
epoch: 0
batch_number: 0
batch_number: 1000
batch_number: 2000
batch_number: 3000
batch_number: 4000
batch_number: 5000
rec_loss: 0.161828

In [28]:
probs_index = dict()
probs = dict()
probs_avg = dict()
probs_min = dict()
probs_max = dict()
x = [i for i in range(num_hidden)]
for i in range(10):
    probs_index[i] = [idx for idx, label in enumerate(mnist.train.labels) if i == mnist.train.labels[idx]]
    probs[i] = [rbm.get_h_prob_out([mnist.train.images[idx]])[0] for idx in probs_index[i]]
    probs_avg[i] = np.average(probs[i], axis=0)
    probs_min[i] = np.min(probs[i], axis=0)
    probs_max[i] = np.max(probs[i], axis=0)

In [30]:
num = 0
sorted_indexes = np.argsort(probs_avg[num])
for i in range(10):
    probs_avg[i] = [probs_avg[i][j] for j in sorted_indexes]
col_num = 10
plt.figure()
plt.suptitle('Average of activation probabilities after %s epochs, sorted by number: %s' % (num_epochs, str(num)))
for cidx in range(col_num):
    plt.subplot(col_num, 1, cidx + 1, axisbg='white')
    plt.bar(x, probs_avg[cidx])
    plt.xticks(())
    plt.yticks(())



In [31]:
num = 0
sorted_indexes = np.argsort(probs_min[num])
for i in range(10):
    probs_min[i] = [probs_min[i][j] for j in sorted_indexes]
col_num = 10
plt.figure()
plt.suptitle('Minimum of activation probabilities after %s epochs, sorted by number: %s' % (num_epochs, str(num)))
for cidx in range(col_num):
    plt.subplot(col_num, 1, cidx + 1, axisbg='white')
    plt.bar(x, probs_min[cidx])
    plt.xticks(())
    plt.yticks(())



In [32]:
num = 0
sorted_indexes = np.argsort(probs_max[num])
for i in range(10):
    probs_max[i] = [probs_max[i][j] for j in sorted_indexes]
col_num = 10
plt.figure()
plt.suptitle('Maximum of activation probabilities after %s epochs, sorted by number: %s' % (num_epochs, str(num)))
for cidx in range(col_num):
    plt.subplot(col_num, 1, cidx + 1, axisbg='white')
    plt.bar(x, probs_max[cidx])
    plt.xticks(())
    plt.yticks(())


Supervised training of the network


In [4]:
mnist_oh = input_data.read_data_sets("MNIST_data/", one_hot=True)
rbm.fit_predictor(train_data=mnist_oh.train.images, train_labels=mnist_oh.train.labels,
                  test_data=mnist_oh.test.images, test_labels=mnist_oh.test.labels, num_steps=30000)


Extracting MNIST_data/train-images-idx3-ubyte.gz
Extracting MNIST_data/train-labels-idx1-ubyte.gz
Extracting MNIST_data/t10k-images-idx3-ubyte.gz
Extracting MNIST_data/t10k-labels-idx1-ubyte.gz
Initialized fit predictor.
Adding run metadata for 0
step 0, training accuracy 0
Adding run metadata for 1000
step 1000, training accuracy 0.9
Adding run metadata for 2000
step 2000, training accuracy 1
Adding run metadata for 3000
step 3000, training accuracy 0.9
Adding run metadata for 4000
step 4000, training accuracy 1
Adding run metadata for 5000
step 5000, training accuracy 1
Adding run metadata for 6000
step 6000, training accuracy 0.9
Adding run metadata for 7000
step 7000, training accuracy 0.9
Adding run metadata for 8000
step 8000, training accuracy 1
Adding run metadata for 9000
step 9000, training accuracy 1
Adding run metadata for 10000
step 10000, training accuracy 1
Adding run metadata for 11000
step 11000, training accuracy 1
Adding run metadata for 12000
step 12000, training accuracy 0.9
Adding run metadata for 13000
step 13000, training accuracy 1
Adding run metadata for 14000
step 14000, training accuracy 0.8
Adding run metadata for 15000
step 15000, training accuracy 0.9
Adding run metadata for 16000
step 16000, training accuracy 1
Adding run metadata for 17000
step 17000, training accuracy 0.9
---------------------------------------------------------------------------
KeyboardInterrupt                         Traceback (most recent call last)
<ipython-input-4-a65ee607ac7a> in <module>()
      1 mnist_oh = input_data.read_data_sets("MNIST_data/", one_hot=True)
      2 rbm.fit_predictor(train_data=mnist_oh.train.images, train_labels=mnist_oh.train.labels,
----> 3                   test_data=mnist_oh.test.images, test_labels=mnist_oh.test.labels, num_steps=30000)

/home/kkari/UniStuff/HetedikFelev/bsc_thesis/my_rbm.py in fit_predictor(self, train_data, train_labels, test_data, test_labels, num_steps)
    378                                            self.batch_labels: batch_labels,
    379                                            self.h_rand: np.random.rand(batch_data.shape[0],
--> 380                                                                        self.num_hidden)
    381                                            })
    382 

/home/kkari/DevTools/anaconda3/lib/python3.5/site-packages/tensorflow/python/client/session.py in run(self, fetches, feed_dict, options, run_metadata)
    380     try:
    381       result = self._run(None, fetches, feed_dict, options_ptr,
--> 382                          run_metadata_ptr)
    383       if run_metadata:
    384         proto_data = tf_session.TF_GetBuffer(run_metadata_ptr)

/home/kkari/DevTools/anaconda3/lib/python3.5/site-packages/tensorflow/python/client/session.py in _run(self, handle, fetches, feed_dict, options, run_metadata)
    608     # Validate and process feed_dict.
    609     if feed_dict:
--> 610       feed_dict = nest.flatten_dict_items(feed_dict)
    611       for feed, feed_val in feed_dict.items():
    612         for subfeed, subfeed_val in _feed_fn(feed, feed_val):

/home/kkari/DevTools/anaconda3/lib/python3.5/site-packages/tensorflow/python/util/nest.py in flatten_dict_items(dictionary)
    172   flat_dictionary = {}
    173   for i, v in six.iteritems(dictionary):
--> 174     if not is_sequence(i):
    175       if i in flat_dictionary:
    176         raise ValueError(

/home/kkari/DevTools/anaconda3/lib/python3.5/site-packages/tensorflow/python/util/nest.py in is_sequence(seq)
     77     True if the sequence is a not a string and is a collections.Sequence.
     78   """
---> 79   return (isinstance(seq, collections.Sequence)
     80           and not isinstance(seq, six.string_types))
     81 

/home/kkari/DevTools/anaconda3/lib/python3.5/abc.py in __instancecheck__(cls, instance)
    180         # Inline the cache checking
    181         subclass = instance.__class__
--> 182         if subclass in cls._abc_cache:
    183             return True
    184         subtype = type(instance)

/home/kkari/DevTools/anaconda3/lib/python3.5/_weakrefset.py in __contains__(self, item)
     68         return len(self.data) - len(self._pending_removals)
     69 
---> 70     def __contains__(self, item):
     71         try:
     72             wr = ref(item)

KeyboardInterrupt: 

In [35]:
probs_index = dict()
probs = dict()
probs_avg = dict()
probs_min = dict()
probs_max = dict()
x = [i for i in range(num_hidden)]
for i in range(10):
    probs_index[i] = [idx for idx, label in enumerate(mnist.train.labels) if i == mnist.train.labels[idx]]
    probs[i] = [rbm.get_h_prob_out([mnist.train.images[idx]])[0] for idx in probs_index[i]]
    probs_avg[i] = np.average(probs[i], axis=0)
    probs_min[i] = np.min(probs[i], axis=0)
    probs_max[i] = np.max(probs[i], axis=0)

In [37]:
num = 0
sorted_indexes = np.argsort(probs_avg[num])
for i in range(10):
    probs_avg[i] = [probs_avg[i][j] for j in sorted_indexes]
col_num = 10
plt.figure()
plt.suptitle('Average of activation probabilities after %s epochs, sorted by number: %s' % (num_epochs, str(num)))
for cidx in range(col_num):
    plt.subplot(col_num, 1, cidx + 1, axisbg='white')
    plt.bar(x, probs_avg[cidx])
    plt.xticks(())
    plt.yticks(())



In [36]:
num = 0
sorted_indexes = np.argsort(probs_min[num])
for i in range(10):
    probs_min[i] = [probs_min[i][j] for j in sorted_indexes]
col_num = 10
plt.figure()
plt.suptitle('Minimum of activation probabilities after %s epochs, sorted by number: %s' % (num_epochs, str(num)))
for cidx in range(col_num):
    plt.subplot(col_num, 1, cidx + 1, axisbg='white')
    plt.bar(x, probs_min[cidx])
    plt.xticks(())
    plt.yticks(())



In [42]:
probs_avg_df = DataFrame(probs_avg)

In [69]:
probs_avg_df.transpose().corr()


Out[69]:
0 1 2 3 4 5 6 7 8 9 ... 54 55 56 57 58 59 60 61 62 63
0 1.000000 0.669042 0.418570 0.222329 0.647025 0.619631 0.606989 0.543353 0.782769 0.529347 ... 0.103553 -0.361527 0.080108 -0.653948 0.227883 -0.470991 -0.949238 -0.540120 0.131150 -0.067109
1 0.669042 1.000000 0.549112 0.075652 0.753273 0.303137 0.465598 0.495137 0.533745 0.293193 ... 0.116128 -0.635368 0.087410 -0.486661 0.256625 -0.561310 -0.746701 -0.510512 0.333811 0.104873
2 0.418570 0.549112 1.000000 -0.185191 0.561698 -0.030789 0.155540 0.650647 0.123531 -0.108695 ... -0.274066 -0.417579 -0.006962 -0.149156 0.302339 -0.149599 -0.499478 -0.086957 0.257925 0.260783
3 0.222329 0.075652 -0.185191 1.000000 -0.112140 0.772406 0.707559 0.317283 0.459944 0.477441 ... -0.195728 0.249248 -0.256648 -0.208650 -0.546299 0.073794 -0.113217 -0.486346 -0.822305 -0.847012
4 0.647025 0.753273 0.561698 -0.112140 1.000000 0.321192 0.236155 0.595954 0.407163 0.361322 ... 0.234158 -0.833358 0.285121 -0.548696 0.359021 -0.654823 -0.712152 -0.529866 0.518643 0.104780
5 0.619631 0.303137 -0.030789 0.772406 0.321192 1.000000 0.764252 0.569337 0.663486 0.751402 ... -0.231727 -0.215190 -0.065869 -0.548402 -0.448679 -0.469680 -0.511474 -0.813924 -0.492453 -0.566691
6 0.606989 0.465598 0.155540 0.707559 0.236155 0.764252 1.000000 0.452697 0.474979 0.495404 ... -0.254514 -0.152963 -0.093904 -0.267818 -0.105884 -0.366157 -0.647848 -0.539184 -0.467814 -0.294254
7 0.543353 0.495137 0.650647 0.317283 0.595954 0.569337 0.452697 1.000000 0.255963 0.467774 ... -0.437126 -0.569416 -0.250660 -0.171654 -0.087432 -0.422635 -0.563339 -0.433757 -0.213727 -0.208000
8 0.782769 0.533745 0.123531 0.459944 0.407163 0.663486 0.474979 0.255963 1.000000 0.448256 ... 0.207064 -0.024591 0.192417 -0.887511 -0.194131 -0.231208 -0.592605 -0.720156 -0.006940 -0.413669
9 0.529347 0.293193 -0.108695 0.477441 0.361322 0.751402 0.495404 0.467774 0.448256 1.000000 ... 0.111622 -0.395532 -0.478221 -0.408925 -0.051572 -0.562377 -0.503512 -0.542132 -0.319417 -0.439723
10 0.575098 0.249522 -0.154757 0.811989 -0.031290 0.841596 0.839351 0.277748 0.661308 0.638569 ... -0.170538 0.141810 -0.250276 -0.403004 -0.351915 -0.213034 -0.474891 -0.561692 -0.621987 -0.525137
11 -0.011434 0.218232 -0.328488 0.391333 0.178530 0.339532 -0.041634 -0.038858 0.462289 0.437049 ... 0.386214 -0.091385 -0.051745 -0.539120 -0.390764 -0.144346 0.144946 -0.542325 -0.076232 -0.600084
12 0.343768 0.620690 0.399231 0.105891 0.427780 0.248028 0.050034 0.404763 0.506496 0.433626 ... 0.041226 -0.383430 -0.306177 -0.563724 -0.128112 -0.349034 -0.285291 -0.494456 0.102703 -0.175814
13 -0.064502 0.061628 0.334111 0.279930 -0.264973 -0.146370 0.011948 0.198551 0.009495 -0.021322 ... -0.044119 0.378666 -0.685870 0.247691 0.043396 0.575765 0.087998 0.353129 -0.428935 -0.394600
14 0.294825 0.373473 -0.044230 0.064090 0.338893 0.389484 0.007930 0.055978 0.620846 0.379687 ... 0.062135 -0.307094 0.208956 -0.819318 -0.407061 -0.530788 -0.153650 -0.765927 0.235242 -0.079901
15 0.475429 0.542506 0.812232 0.119138 0.280114 0.059481 0.372158 0.607557 0.195437 0.044887 ... -0.215974 -0.126173 -0.386262 0.033097 0.317166 0.076647 -0.542680 0.082731 -0.104450 0.004759
16 0.412146 0.230462 0.372453 0.365260 0.313095 0.669467 0.490007 0.858516 0.175328 0.418448 ... -0.735605 -0.430385 -0.142794 -0.102578 -0.409429 -0.489438 -0.395037 -0.504493 -0.375458 -0.121944
17 0.663131 0.234555 0.434236 0.504594 0.154781 0.638813 0.546225 0.728962 0.510827 0.462280 ... -0.417524 0.017016 -0.378864 -0.249958 -0.196619 -0.063130 -0.551596 -0.310008 -0.485572 -0.372402
18 -0.188504 -0.105250 -0.021620 0.617278 -0.379543 0.329384 0.531630 0.318502 -0.252851 0.255583 ... -0.600807 0.123124 -0.627494 0.448911 -0.354973 0.045094 0.103368 0.002328 -0.851069 -0.334622
19 0.857808 0.814610 0.591099 0.113789 0.624602 0.328794 0.553658 0.568264 0.536939 0.305243 ... 0.110971 -0.393170 -0.052381 -0.320774 0.448088 -0.318039 -0.908404 -0.240112 0.178754 0.055505
20 -0.217262 0.198827 0.519141 -0.329950 -0.139659 -0.581965 -0.304937 -0.084000 -0.126751 -0.725651 ... -0.208856 0.201620 0.104807 0.168532 0.036744 0.408162 0.199274 0.321137 0.252569 0.334712
21 0.382921 0.092276 0.372777 0.603507 -0.136274 0.519342 0.678381 0.560384 0.236357 0.305697 ... -0.589942 0.185099 -0.492613 0.036193 -0.230821 0.068761 -0.358538 -0.148115 -0.713454 -0.324206
22 0.538843 0.160352 -0.171738 0.745073 -0.047638 0.844276 0.744559 0.195493 0.721395 0.544585 ... -0.236824 0.181423 -0.050705 -0.556242 -0.503893 -0.236059 -0.387698 -0.678195 -0.538113 -0.468953
23 0.075694 0.408719 0.117675 -0.123900 0.572910 0.186384 0.135091 0.474504 -0.247450 0.399113 ... -0.094074 -0.892245 -0.075577 0.103918 0.156727 -0.758243 -0.265752 -0.265230 0.196093 0.223895
24 0.293779 0.137006 -0.019201 0.587688 0.332941 0.842360 0.456522 0.572390 0.404968 0.504152 ... -0.421973 -0.334090 0.160537 -0.451557 -0.668722 -0.493478 -0.184992 -0.829935 -0.368664 -0.455476
25 0.386422 0.678424 0.434947 -0.404262 0.758666 0.048941 -0.103058 0.323592 0.280485 0.305437 ... 0.175924 -0.805047 0.111215 -0.537261 0.244543 -0.720566 -0.444080 -0.450076 0.652114 0.364364
26 0.179385 0.368142 -0.086991 -0.170628 0.271391 -0.077690 -0.131430 -0.393950 0.464760 0.165613 ... 0.663653 -0.105019 0.171274 -0.651271 0.214487 -0.195530 -0.138438 -0.294920 0.491685 0.035151
27 0.734175 0.403977 0.389624 0.445038 0.370792 0.747927 0.811395 0.605642 0.534951 0.330565 ... -0.483339 -0.239629 0.187828 -0.419809 -0.201173 -0.448633 -0.705553 -0.621805 -0.236910 -0.061085
28 0.338728 0.212067 0.265287 0.286595 -0.090667 0.295269 0.435871 0.460509 0.087719 0.532622 ... -0.324932 -0.053849 -0.844146 0.138922 0.077207 -0.135658 -0.389777 0.038435 -0.486846 -0.104061
29 0.781869 0.652639 0.695850 -0.048421 0.591133 0.195024 0.429683 0.370874 0.564054 -0.067722 ... 0.036622 -0.232758 0.397619 -0.486690 0.360678 -0.193683 -0.785627 -0.285432 0.374020 0.224272
... ... ... ... ... ... ... ... ... ... ... ... ... ... ... ... ... ... ... ... ... ...
34 0.108595 0.526711 0.730434 -0.376873 0.351598 -0.491583 -0.149165 0.125372 -0.022983 -0.330919 ... 0.230104 -0.195760 -0.061935 0.008404 0.576299 0.150067 -0.229427 0.271592 0.460397 0.300655
35 -0.055977 0.134672 0.384855 -0.553634 -0.088835 -0.490489 -0.217064 -0.274406 -0.061497 -0.680397 ... -0.264604 0.078053 0.431428 -0.050404 0.074242 0.038820 0.025083 0.132572 0.493014 0.722687
36 0.662580 0.451660 0.189015 0.311401 0.259714 0.570219 0.856194 0.412250 0.290237 0.460501 ... -0.287199 -0.317620 -0.109315 -0.122328 0.153920 -0.548853 -0.767695 -0.341644 -0.207164 0.142652
37 -0.175946 -0.278290 -0.441171 0.605538 -0.224486 0.470295 0.423411 0.231358 -0.259158 0.440134 ... -0.271189 -0.008045 -0.381476 0.405814 -0.308730 -0.092519 0.122883 -0.069060 -0.729152 -0.468546
38 -0.017666 -0.240947 -0.035264 0.200536 -0.455934 0.125500 0.220091 0.185501 -0.223196 0.377760 ... -0.413996 0.187340 -0.857501 0.404755 -0.046056 0.045396 -0.024161 0.271484 -0.605655 -0.056226
39 -0.357090 -0.337723 -0.794192 0.565796 -0.426086 0.316064 0.217512 -0.192792 -0.170715 0.319253 ... -0.001348 0.213736 -0.256915 0.307599 -0.412487 0.043158 0.366632 -0.051659 -0.621123 -0.513085
40 -0.395635 -0.526107 -0.048976 0.350965 -0.359475 0.138974 -0.162414 0.205665 -0.213277 0.220411 ... -0.329936 0.268506 -0.567617 0.178022 -0.471197 0.313155 0.486056 0.050077 -0.635872 -0.540440
41 -0.372100 -0.371480 0.107731 0.464954 -0.463137 0.066201 0.218936 0.192046 -0.314101 -0.354509 ... -0.658609 0.433177 -0.111811 0.486374 -0.475442 0.493503 0.380829 0.168755 -0.687324 -0.316551
42 0.084582 -0.246201 -0.545522 0.748786 -0.228026 0.594721 0.398037 -0.164398 0.471236 0.203597 ... 0.073828 0.465465 0.256146 -0.345013 -0.560575 0.190876 0.105778 -0.437331 -0.516146 -0.712144
43 -0.727113 -0.397978 -0.088734 -0.452435 -0.157661 -0.666820 -0.494796 -0.314896 -0.837292 -0.636018 ... 0.033654 -0.021230 0.261214 0.676009 0.229615 0.274864 0.563486 0.572358 0.246786 0.337576
44 0.165563 0.340839 0.581331 -0.832796 0.590290 -0.486551 -0.469130 0.105075 -0.111267 -0.248558 ... 0.247485 -0.548733 0.292376 -0.146638 0.608692 -0.272604 -0.263754 0.143749 0.889312 0.671435
45 0.046559 -0.312399 -0.595482 0.256719 -0.377965 0.274001 0.144004 -0.337461 0.256174 -0.147298 ... -0.118678 0.452178 0.473899 -0.106134 -0.472190 0.117381 0.120922 -0.171184 -0.189695 -0.123259
46 -0.411029 -0.283008 -0.438344 -0.147104 -0.062764 -0.050652 -0.104557 -0.298048 -0.370219 -0.299503 ... -0.167194 -0.157896 0.629609 0.167591 -0.271902 -0.252529 0.353103 -0.127767 0.161498 0.271448
47 0.164295 -0.274392 -0.645023 0.100063 -0.154879 0.394996 0.043142 -0.226004 0.290869 0.227099 ... -0.023865 0.117449 0.345748 -0.285580 -0.402562 -0.260189 -0.002774 -0.336815 -0.037629 -0.042153
48 0.006306 -0.036208 -0.461577 -0.378027 0.221910 -0.087284 -0.372591 -0.501273 0.205865 -0.031776 ... 0.552150 -0.150652 0.651895 -0.460972 0.051455 -0.279515 0.070856 -0.245272 0.632962 0.207024
49 0.199880 0.364690 0.568395 -0.763405 0.370848 -0.598925 -0.297329 -0.116869 -0.059987 -0.503025 ... 0.271403 -0.231772 0.375469 -0.051110 0.705591 -0.016706 -0.309534 0.315677 0.830630 0.731442
50 -0.032789 -0.428110 -0.485496 0.378095 -0.296219 0.343179 0.072670 -0.392530 0.407633 -0.010634 ... 0.054764 0.522776 0.435050 -0.469258 -0.551285 0.201716 0.255011 -0.378856 -0.232846 -0.408711
51 -0.640528 -0.575105 -0.027233 -0.102551 -0.444160 -0.569568 -0.521737 -0.414996 -0.386282 -0.669982 ... 0.131141 0.616665 0.113695 0.346231 -0.032293 0.852470 0.691385 0.543557 -0.065936 -0.186966
52 -0.109730 0.135487 0.159800 -0.612709 0.362971 -0.405227 -0.231173 0.062945 -0.533742 -0.322582 ... 0.043227 -0.550272 0.355781 0.415003 0.501548 -0.298045 -0.097860 0.299176 0.575977 0.646246
53 0.102947 -0.126744 -0.434603 -0.317503 0.259499 0.126567 -0.248056 -0.065242 -0.016370 0.234012 ... 0.211947 -0.379150 0.358365 -0.132773 0.031288 -0.474558 -0.070514 -0.168921 0.384345 0.214026
54 0.103553 0.116128 -0.274066 -0.195728 0.234158 -0.231727 -0.254514 -0.437126 0.207064 0.111622 ... 1.000000 0.025495 0.118188 -0.253419 0.574314 0.129535 -0.095298 0.153874 0.456841 -0.122594
55 -0.361527 -0.635368 -0.417579 0.249248 -0.833358 -0.215190 -0.152963 -0.569416 -0.024591 -0.395532 ... 0.025495 1.000000 -0.084082 0.231787 -0.274151 0.861080 0.520382 0.428592 -0.448102 -0.339366
56 0.080108 0.087410 -0.006962 -0.256648 0.285121 -0.065869 -0.093904 -0.250660 0.192417 -0.478221 ... 0.118188 -0.084082 1.000000 -0.365909 -0.054001 -0.121265 -0.032105 -0.280112 0.557633 0.297852
57 -0.653948 -0.486661 -0.149156 -0.208650 -0.548696 -0.548402 -0.267818 -0.171654 -0.887511 -0.408925 ... -0.253419 0.231787 -0.365909 1.000000 0.166495 0.418686 0.492369 0.805769 -0.253365 0.220673
58 0.227883 0.256625 0.302339 -0.546299 0.359021 -0.448679 -0.105884 -0.087432 -0.194131 -0.051572 ... 0.574314 -0.274151 -0.054001 0.166495 1.000000 -0.038846 -0.416474 0.478305 0.564707 0.460647
59 -0.470991 -0.561310 -0.149599 0.073794 -0.654823 -0.469680 -0.366157 -0.422635 -0.231208 -0.562377 ... 0.129535 0.861080 -0.121265 0.418686 -0.038846 1.000000 0.570546 0.652516 -0.288081 -0.311044
60 -0.949238 -0.746701 -0.499478 -0.113217 -0.712152 -0.511474 -0.647848 -0.563339 -0.592605 -0.503512 ... -0.095298 0.520382 -0.032105 0.492369 -0.416474 0.570546 1.000000 0.438668 -0.202939 -0.105943
61 -0.540120 -0.510512 -0.086957 -0.486346 -0.529866 -0.813924 -0.539184 -0.433757 -0.720156 -0.542132 ... 0.153874 0.428592 -0.280112 0.805769 0.478305 0.652516 0.438668 1.000000 0.069372 0.309510
62 0.131150 0.333811 0.257925 -0.822305 0.518643 -0.492453 -0.467814 -0.213727 -0.006940 -0.319417 ... 0.456841 -0.448102 0.557633 -0.253365 0.564707 -0.288081 -0.202939 0.069372 1.000000 0.684342
63 -0.067109 0.104873 0.260783 -0.847012 0.104780 -0.566691 -0.294254 -0.208000 -0.413669 -0.439723 ... -0.122594 -0.339366 0.297852 0.220673 0.460647 -0.311044 -0.105943 0.309510 0.684342 1.000000

64 rows × 64 columns


In [68]:
sns.heatmap(probs_avg_df.transpose().corr())


Out[68]:
<matplotlib.axes._subplots.AxesSubplot at 0x7f9e926740b8>

In [43]:
row_num = 1
col_num = 10
plt.figure(figsize=(col_num * 2, row_num + 1))
for ridx in range(row_num):
    for cidx in range(col_num):
        plt.subplot(row_num, col_num, (ridx * col_num + cidx) + 1, axisbg='white')
        plt.bar(x, probs[ridx][cidx])
        plt.xticks(())
        plt.yticks(())



In [117]:
row_num = 1
col_num = 10
plt.figure(figsize=(col_num * 2, row_num + 1))
for ridx in range(row_num):
    for cidx in range(col_num):
        plt.subplot(row_num, col_num, (ridx * col_num + cidx) + 1, axisbg='white')
        plt.bar(x, probs[ridx + 1][cidx])
        plt.xticks(())
        plt.yticks(())



In [125]:
row_num = 1
col_num = 10
number_offset = 3
plt.figure(figsize=(col_num * 2, row_num + 1))
for ridx in range(row_num):
    for cidx in range(col_num):
        plt.subplot(row_num, col_num, (ridx * col_num + cidx) + 1, axisbg='white')
        plt.bar(x, probs[ridx + number_offset][cidx])
        plt.xticks(())
        plt.yticks(())



In [126]:
row_num = 1
col_num = 10
offset = 3
plt.figure(figsize=(col_num * 2, row_num + 1))
for ridx in range(row_num):
    for cidx in range(col_num):
        plt.subplot(row_num, col_num, (ridx * col_num + cidx) + 1, axisbg='white')
        plt.imshow(mnist.train.images[probs_index[ridx + offset][cidx]].reshape(28,28))
        plt.xticks(())
        plt.yticks(())



In [7]:
last_output = np.empty((0, rbm.get_h_prob_out([train_images[0]]).shape[1]))
last_output.shape
for i in range(mnist.train.images.shape[0]):
    last_output = np.vstack((last_output, rbm.get_h_prob_out([mnist.train.images[i]])))
    if i % 1000 == 0:
        print('done with: %s' % i)


done with: 0
done with: 1000
done with: 2000
done with: 3000
done with: 4000
done with: 5000
done with: 6000
done with: 7000
done with: 8000
done with: 9000
done with: 10000
done with: 11000
done with: 12000
done with: 13000
done with: 14000
done with: 15000
done with: 16000
done with: 17000
done with: 18000
done with: 19000
done with: 20000
done with: 21000
done with: 22000
done with: 23000
done with: 24000
done with: 25000
done with: 26000
done with: 27000
done with: 28000
done with: 29000
done with: 30000
done with: 31000
done with: 32000
done with: 33000
done with: 34000
done with: 35000
done with: 36000
done with: 37000
done with: 38000
done with: 39000
done with: 40000
done with: 41000
done with: 42000
done with: 43000
done with: 44000
done with: 45000
done with: 46000
done with: 47000
done with: 48000
done with: 49000
done with: 50000
done with: 51000
done with: 52000
done with: 53000
done with: 54000

In [8]:
last_output_test = np.empty((0, rbm.get_h_prob_out([mnist.test.images[0]]).shape[1]))
last_output_test.shape
for i in range(mnist.test.images.shape[0]):
    last_output_test = np.vstack((last_output_test, rbm.get_h_prob_out([mnist.test.images[i]])))
    if i % 999 == 0:
        print('done with: %s' % (i + 1))


done with: 1
done with: 1000
done with: 1999
done with: 2998
done with: 3997
done with: 4996
done with: 5995
done with: 6994
done with: 7993
done with: 8992
done with: 9991

In [18]:
sklearn.learn.datasets
last_output.shape


---------------------------------------------------------------------------
AttributeError                            Traceback (most recent call last)
<ipython-input-18-bcbfc9d39f64> in <module>()
----> 1 sklearn.learn.datasets
      2 last_output.shape

AttributeError: module 'sklearn' has no attribute 'learn'

In [4]:
clf = svm.SVC(decision_function_shape='ovr', kernel='poly', degree=4, coef0=1.0)
clf.fit(mnist.train.images, mnist.train.labels)
clf.score(mnist.test.images, y_true=mnist.test.labels)


---------------------------------------------------------------------------
TypeError                                 Traceback (most recent call last)
<ipython-input-4-abc682d071eb> in <module>()
      1 clf = svm.SVC(decision_function_shape='ovr', kernel='poly', degree=4, coef0=1.0)
      2 clf.fit(mnist.train.images, mnist.train.labels)
----> 3 clf.score(mnist.test.images, y_true=mnist.test.labels)

TypeError: score() got an unexpected keyword argument 'y_true'

In [11]:
clf = svm.SVC(decision_function_shape='ovr', kernel='sigmoid', coef0=1.0)
clf.fit(last_output, mnist.train.labels)
y_pred = clf.predict(last_output_test)
sklearn.metrics.accuracy_score(y_pred=y_pred, y_true=mnist.test.labels)


Out[11]:
0.1135

In [12]:
clf = svm.SVC(decision_function_shape='ovr', kernel='sigmoid', coef0=0.0)
clf.fit(last_output, mnist.train.labels)
y_pred = clf.predict(last_output_test)
sklearn.metrics.accuracy_score(y_pred=y_pred, y_true=mnist.test.labels)


Out[12]:
0.1135

In [16]:
clf = svm.SVC(decision_function_shape='ovr', kernel='poly', degree=2, coef0=0.0)
clf.fit(last_output, mnist.train.labels)
y_pred = clf.predict(last_output_test)
sklearn.metrics.accuracy_score(y_pred=y_pred, y_true=mnist.test.labels)


Out[16]:
0.94159999999999999

In [17]:
clf = svm.SVC(decision_function_shape='ovr', kernel='poly', degree=2, coef0=1.0)
clf.fit(last_output, mnist.train.labels)
y_pred = clf.predict(last_output_test)
sklearn.metrics.accuracy_score(y_pred=y_pred, y_true=mnist.test.labels)


Out[17]:
0.9395

In [21]:
clf_raw = svm.SVC(decision_function_shape='ovr', kernel='linear')
clf_raw.fit(mnist.train.images, mnist.train.labels)


Out[21]:
SVC(C=1.0, cache_size=200, class_weight=None, coef0=0.0,
  decision_function_shape='ovo', degree=3, gamma='auto', kernel='linear',
  max_iter=-1, probability=False, random_state=None, shrinking=True,
  tol=0.001, verbose=False)

In [23]:
y_pred_raw = clf_raw.predict(mnist.test.images)
sklearn.metrics.accuracy_score(y_pred=y_pred_raw, y_true=mnist.test.labels)


Out[23]:
0.93930000000000002

In [45]:
a = np.asarray([[1,2,3], [4,5,6], [7,8,9]])

with open("foo.csv", 'ab') as f:
    np.savetxt(f, dw, delimiter=',')

In [22]:
import sys
sys.getsizeof(dw_0) / 1024 / 1024 * mnist.test.images.shape[0]


Out[22]:
7657.16552734375
  • raw mnist: 0.9393
  • mnist linear, 64 hidden prob: 0.9239
  • mnist rbf, 64 hidden prob: 0.9472
  • ploy coef0=0, d=3: 0.9409
  • ploy coef0=0, d=2: 0.9416
  • poly coef0=1, d=3: 0.9542
  • ploy coef0=0, d=2: 0.9395
  • sigomid coef0=0: 0.1135
  • sigomid coef0=1: 0.1135

Train MLP without pretraining


In [14]:
mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)
train_images = mnist.train.images
test_images = mnist.test.images


Extracting MNIST_data/train-images-idx3-ubyte.gz
Extracting MNIST_data/train-labels-idx1-ubyte.gz
Extracting MNIST_data/t10k-images-idx3-ubyte.gz
Extracting MNIST_data/t10k-labels-idx1-ubyte.gz

In [5]:
num_hidden = 64
rbm = Rbm(num_hidden=num_hidden, num_classes=10, num_features=784, learning_rate=0.01)
rbm.init_rbm()


Number of features: 784
Number of classes: 10
logit shape:  (?, 10)
batch_labels shape:  (?, 10)

In [12]:
rbm.fit(mnist.train.images, mnist.test.images, num_epochs=1)
rbm.fit_predictor(train_data=mnist.train.images, 
                  train_labels=mnist.train.labels, 
                  test_data=mnist.test.images, 
                  test_labels=mnist.test.labels)


epoch: 0
batch_number: 0
batch_number: 1000
batch_number: 2000
batch_number: 3000
batch_number: 4000
batch_number: 5000
rec_loss: 0.160837
Initialized fit predictor.
---------------------------------------------------------------------------
IndexError                                Traceback (most recent call last)
<ipython-input-12-06a162b12a95> in <module>()
      3                   train_labels=mnist.train.labels,
      4                   test_data=mnist.test.images,
----> 5                   test_labels=mnist.test.labels)

/home/kkari/UniStuff/HetedikFelev/bsc_thesis/my_rbm.py in fit_predictor(self, train_data, train_labels, test_data, test_labels, num_steps)
    372             # Generate a minibatch.
    373             batch_data = train_dataset_permuted[offset:(offset + self.batch_size), :]
--> 374             batch_labels = train_labels_permuted[offset:(offset + self.batch_size), :]
    375 
    376             self.tf_session.run(self.optimizer,

IndexError: too many indices for array

In [24]:
rbm = Rbm(num_hidden=128, num_classes=10, num_features=784, learning_rate=0.01)
rbm.init_rbm()
rbm.fit(mnist.train.images, mnist.test.images, num_epochs=10)
rbm.fit_predictor(train_data=mnist.train.images, train_labels=mnist.train.labels,
                  test_data=mnist.test.images, test_labels=mnist.test.labels, num_steps=30000)


Number of features: 784
Number of classes: 10
logit shape:  (?, 10)
batch_labels shape:  (?, 10)
epoch: 0
batch_number: 0
batch_number: 1000
batch_number: 2000
batch_number: 3000
batch_number: 4000
batch_number: 5000
rec_loss: 0.135416
epoch: 1
batch_number: 0
batch_number: 1000
batch_number: 2000
batch_number: 3000
batch_number: 4000
batch_number: 5000
rec_loss: 0.12244
epoch: 2
batch_number: 0
batch_number: 1000
batch_number: 2000
batch_number: 3000
batch_number: 4000
batch_number: 5000
rec_loss: 0.11567
epoch: 3
batch_number: 0
batch_number: 1000
batch_number: 2000
batch_number: 3000
batch_number: 4000
batch_number: 5000
rec_loss: 0.110905
epoch: 4
batch_number: 0
batch_number: 1000
batch_number: 2000
batch_number: 3000
batch_number: 4000
batch_number: 5000
rec_loss: 0.107334
epoch: 5
batch_number: 0
batch_number: 1000
batch_number: 2000
batch_number: 3000
batch_number: 4000
batch_number: 5000
rec_loss: 0.104298
epoch: 6
batch_number: 0
batch_number: 1000
batch_number: 2000
batch_number: 3000
batch_number: 4000
batch_number: 5000
rec_loss: 0.101884
epoch: 7
batch_number: 0
batch_number: 1000
batch_number: 2000
batch_number: 3000
batch_number: 4000
batch_number: 5000
rec_loss: 0.0999768
epoch: 8
batch_number: 0
batch_number: 1000
batch_number: 2000
batch_number: 3000
batch_number: 4000
batch_number: 5000
rec_loss: 0.098325
epoch: 9
batch_number: 0
batch_number: 1000
batch_number: 2000
batch_number: 3000
batch_number: 4000
batch_number: 5000
rec_loss: 0.0969732
Initialized fit predictor.
Adding run metadata for 0
step 0, training accuracy 0.1
Adding run metadata for 1000
step 1000, training accuracy 0.9
Adding run metadata for 2000
step 2000, training accuracy 1
Adding run metadata for 3000
step 3000, training accuracy 0.8
Adding run metadata for 4000
step 4000, training accuracy 1
Adding run metadata for 5000
step 5000, training accuracy 0.8
Adding run metadata for 6000
step 6000, training accuracy 1
Adding run metadata for 7000
step 7000, training accuracy 1
Adding run metadata for 8000
step 8000, training accuracy 0.7
Adding run metadata for 9000
step 9000, training accuracy 1
Adding run metadata for 10000
step 10000, training accuracy 1
Adding run metadata for 11000
step 11000, training accuracy 1
Adding run metadata for 12000
step 12000, training accuracy 0.9
Adding run metadata for 13000
step 13000, training accuracy 0.8
Adding run metadata for 14000
step 14000, training accuracy 1
Adding run metadata for 15000
step 15000, training accuracy 0.9
Adding run metadata for 16000
step 16000, training accuracy 1
Adding run metadata for 17000
step 17000, training accuracy 1
Adding run metadata for 18000
step 18000, training accuracy 0.9
Adding run metadata for 19000
step 19000, training accuracy 1
Adding run metadata for 20000
step 20000, training accuracy 0.8
Adding run metadata for 21000
step 21000, training accuracy 1
Adding run metadata for 22000
step 22000, training accuracy 1
Adding run metadata for 23000
step 23000, training accuracy 1
Adding run metadata for 24000
step 24000, training accuracy 1
Adding run metadata for 25000
step 25000, training accuracy 1
Adding run metadata for 26000
step 26000, training accuracy 0.9
Adding run metadata for 27000
step 27000, training accuracy 1
Adding run metadata for 28000
step 28000, training accuracy 1
Adding run metadata for 29000
step 29000, training accuracy 1
test accuracy 0.9693

CIFAR10


In [2]:
def unpickle(file):
    fo = open(file, 'rb')
    dict = pickle.load(fo, encoding='latin-1')
    fo.close()
    return dict

def from_flat_to_3d(image):
#     print(image.shape)
    return np.dstack((image[0:1024].reshape(32,32),
                       image[1024:2048].reshape(32,32),
                       image[2048:3072].reshape(32,32)))

cifar_test = unpickle('cifar-10-batches-py/test_batch')
cifar_test['data'] = cifar_test['data'].astype(np.float32) / 255
cifar_test['data_3d'] = np.array([from_flat_to_3d(image) for image in cifar_test['data']])

cifar = unpickle('cifar-10-batches-py/data_batch_1')
for i in range(2, 6):
    tmp = unpickle('cifar-10-batches-py/data_batch_' + str(i))
    cifar['data'] = np.vstack((cifar['data'], tmp['data']))
    cifar['labels'] = np.concatenate((cifar['labels'], tmp['labels']))
    
cifar['data'] = cifar['data'].astype(np.float32) / 255
cifar['data_3d'] = np.array([from_flat_to_3d(image) for image in cifar['data']])

# cifar['data_bw'] = (cifar['data'][:,0:1024] + cifar['data'][:,1024:2048] + cifar['data'][:, 2048:3072]) / 3 
# cifar_test['data_bw'] = (cifar_test['data'][:,0:1024] + cifar_test['data'][:,1024:2048] + cifar_test['data'][:, 2048:3072]) / 3 

enc = OneHotEncoder()
cifar['labels_oh'] = enc.fit_transform(cifar['labels'].reshape(-1, 1))
cifar['labels_oh'] = cifar['labels_oh'].toarray()

cifar_test['labels'] = np.array(cifar_test['labels'])
cifar_test['labels_oh'] = enc.fit_transform(cifar_test['labels'].reshape(-1, 1))
cifar_test['labels_oh'] = cifar_test['labels_oh'].toarray()

# pca = PCA(whiten=True)
# cifar['data_bw_whitened'] = pca.fit_transform(cifar['data_bw'])
# cifar_test['data_bw_whitened'] = pca.fit_transform(cifar_test['data_bw'])

In [8]:
print(cifar['data'].shape)
print(cifar['labels'].shape)


(50000, 3072)
(50000, 10)

In [9]:
plt.imshow(cifar['data'][3][0:1024].reshape(32,32))


Out[9]:
<matplotlib.image.AxesImage at 0x7f2c7d75fba8>

In [4]:
num_hidden = 761
num_epochs=10
rbm = Rbm(num_hidden=num_hidden, num_classes=10, num_features=3072, learning_rate=0.001)
rbm.init_rbm()
rbm.fit(cifar['data'], cifar_test['data'], num_epochs=num_epochs)


Number of features: 3072
Number of classes: 10
logit shape:  (?, 10)
batch_labels shape:  (?, 10)
epoch: 0
batch_number: 0
batch_number: 1000
batch_number: 2000
batch_number: 3000
batch_number: 4000
rec_loss: 136.153
epoch: 1
batch_number: 0
---------------------------------------------------------------------------
KeyboardInterrupt                         Traceback (most recent call last)
<ipython-input-4-08df24bec2d5> in <module>()
      3 rbm = Rbm(num_hidden=num_hidden, num_classes=10, num_features=3072, learning_rate=0.001)
      4 rbm.init_rbm()
----> 5 rbm.fit(cifar['data'], cifar_test['data'], num_epochs=num_epochs)

/home/kkari/UniStuff/HetedikFelev/bsc_thesis/my_rbm.py in fit(self, train_dataset, validation_dataset, num_epochs)
    321                 self.tf_session.run(
    322                     self.updates,
--> 323                     feed_dict=self._create_feed_dict(batch)
    324                 )
    325 

/home/kkari/DevTools/anaconda3/lib/python3.5/site-packages/tensorflow/python/client/session.py in run(self, fetches, feed_dict, options, run_metadata)
    380     try:
    381       result = self._run(None, fetches, feed_dict, options_ptr,
--> 382                          run_metadata_ptr)
    383       if run_metadata:
    384         proto_data = tf_session.TF_GetBuffer(run_metadata_ptr)

/home/kkari/DevTools/anaconda3/lib/python3.5/site-packages/tensorflow/python/client/session.py in _run(self, handle, fetches, feed_dict, options, run_metadata)
    653     movers = self._update_with_movers(feed_dict_string, feed_map)
    654     results = self._do_run(handle, target_list, unique_fetches,
--> 655                            feed_dict_string, options, run_metadata)
    656 
    657     # User may have fetched the same tensor multiple times, but we

/home/kkari/DevTools/anaconda3/lib/python3.5/site-packages/tensorflow/python/client/session.py in _do_run(self, handle, target_list, fetch_list, feed_dict, options, run_metadata)
    721     if handle is None:
    722       return self._do_call(_run_fn, self._session, feed_dict, fetch_list,
--> 723                            target_list, options, run_metadata)
    724     else:
    725       return self._do_call(_prun_fn, self._session, handle, feed_dict,

/home/kkari/DevTools/anaconda3/lib/python3.5/site-packages/tensorflow/python/client/session.py in _do_call(self, fn, *args)
    728   def _do_call(self, fn, *args):
    729     try:
--> 730       return fn(*args)
    731     except errors.OpError as e:
    732       message = compat.as_text(e.message)

/home/kkari/DevTools/anaconda3/lib/python3.5/site-packages/tensorflow/python/client/session.py in _run_fn(session, feed_dict, fetch_list, target_list, options, run_metadata)
    710         return tf_session.TF_Run(session, options,
    711                                  feed_dict, fetch_list, target_list,
--> 712                                  status, run_metadata)
    713 
    714     def _prun_fn(session, handle, feed_dict, fetch_list):

KeyboardInterrupt: 

In [8]:
last_output = np.empty((0, rbm.get_h_prob_out([cifar['data'][0]]).shape[1]))
last_output.shape
for i in range(cifar['data'].shape[0]):
    last_output = np.vstack((last_output, rbm.get_h_prob_out([cifar['data'][i]])))
    if i % 1000 == 0:
        print('done with: %s' % i)


done with: 0
done with: 1000
done with: 2000
done with: 3000
done with: 4000
done with: 5000
done with: 6000
done with: 7000
done with: 8000
done with: 9000
done with: 10000
done with: 11000
done with: 12000
done with: 13000
done with: 14000
done with: 15000
done with: 16000
done with: 17000
done with: 18000
done with: 19000
done with: 20000
done with: 21000
done with: 22000
done with: 23000
done with: 24000
done with: 25000
done with: 26000
done with: 27000
done with: 28000
done with: 29000
done with: 30000
done with: 31000
done with: 32000
done with: 33000
done with: 34000
done with: 35000
done with: 36000
done with: 37000
done with: 38000
done with: 39000
done with: 40000
done with: 41000
done with: 42000
done with: 43000
done with: 44000
done with: 45000
done with: 46000
done with: 47000
done with: 48000
done with: 49000
done with: 1000
done with: 1999
done with: 2998
done with: 3997
done with: 4996
done with: 5995
done with: 6994
done with: 7993
done with: 8992
done with: 9991

In [14]:
last_output_test = np.empty((0, rbm.get_h_prob_out([cifar_test['data'][0]]).shape[1]))
last_output_test.shape
for i in range(cifar_test['data'].shape[0]):
    last_output_test = np.vstack((last_output_test, rbm.get_h_prob_out([cifar_test['data'][i]])))
    if i % 999 == 0:
        print('done with: %s' % (i + 1))


done with: 1
done with: 1000
done with: 1999
done with: 2998
done with: 3997
done with: 4996
done with: 5995
done with: 6994
done with: 7993
done with: 8992
done with: 9991

In [11]:
clf = svm.SVC(decision_function_shape='ovr', kernel='poly', degree=3, coef0=1.0)
clf.fit(last_output, cifar['labels'])


---------------------------------------------------------------------------
ValueError                                Traceback (most recent call last)
<ipython-input-11-35e720ec1782> in <module>()
      2 clf.fit(last_output, cifar['labels'])
      3 y_pred = clf.predict(last_output_test)
----> 4 sklearn.metrics.accuracy_score(y_pred=y_pred, y_true=cifar_test['labels'])

/home/kkari/DevTools/anaconda3/lib/python3.5/site-packages/sklearn/metrics/classification.py in accuracy_score(y_true, y_pred, normalize, sample_weight)
    170 
    171     # Compute accuracy for each possible representation
--> 172     y_type, y_true, y_pred = _check_targets(y_true, y_pred)
    173     if y_type.startswith('multilabel'):
    174         differing_labels = count_nonzero(y_true - y_pred, axis=1)

/home/kkari/DevTools/anaconda3/lib/python3.5/site-packages/sklearn/metrics/classification.py in _check_targets(y_true, y_pred)
     70     y_pred : array or indicator matrix
     71     """
---> 72     check_consistent_length(y_true, y_pred)
     73     type_true = type_of_target(y_true)
     74     type_pred = type_of_target(y_pred)

/home/kkari/DevTools/anaconda3/lib/python3.5/site-packages/sklearn/utils/validation.py in check_consistent_length(*arrays)
    174     if len(uniques) > 1:
    175         raise ValueError("Found arrays with inconsistent numbers of samples: "
--> 176                          "%s" % str(uniques))
    177 
    178 

ValueError: Found arrays with inconsistent numbers of samples: [ 9999 10000]

In [15]:
clf.score(last_output_test, cifar_test['labels'])


Out[15]:
0.10000000000000001

In [13]:
cifar_test['labels'].shape
last_output_test.shape


Out[13]:
(9999, 761)

In [19]:
num_hidden = 256
num_epochs=10
from my_rbm import Rbm
rbm = Rbm(num_hidden=num_hidden, num_classes=10, num_features=3072, learning_rate=0.01)
rbm.init_rbm()
rbm.fit(cifar['data'], cifar['labels'], num_epochs=0)
rbm.fit_predictor(cifar['data'], cifar['labels'], cifar_test['data'], cifar_test['labels'], num_steps=30000)


Number of features: 3072
Number of classes: 10
logit shape:  (?, 10)
batch_labels shape:  (?, 10)
Initialized fit predictor.
Adding run metadata for 0
step 0, training accuracy 0.1
Adding run metadata for 1000
step 1000, training accuracy 0.1
Adding run metadata for 2000
step 2000, training accuracy 0.2
Adding run metadata for 3000
step 3000, training accuracy 0
Adding run metadata for 4000
step 4000, training accuracy 0.1
Adding run metadata for 5000
step 5000, training accuracy 0.1
Adding run metadata for 6000
step 6000, training accuracy 0.2
Adding run metadata for 7000
step 7000, training accuracy 0.1
Adding run metadata for 8000
step 8000, training accuracy 0.1
Adding run metadata for 9000
step 9000, training accuracy 0
Adding run metadata for 10000
step 10000, training accuracy 0
Adding run metadata for 11000
step 11000, training accuracy 0
Adding run metadata for 12000
step 12000, training accuracy 0.1
Adding run metadata for 13000
step 13000, training accuracy 0.2
Adding run metadata for 14000
step 14000, training accuracy 0
Adding run metadata for 15000
step 15000, training accuracy 0.1
Adding run metadata for 16000
step 16000, training accuracy 0.1
Adding run metadata for 17000
step 17000, training accuracy 0.3
Adding run metadata for 18000
step 18000, training accuracy 0.2
Adding run metadata for 19000
step 19000, training accuracy 0.1
Adding run metadata for 20000
step 20000, training accuracy 0
Adding run metadata for 21000
step 21000, training accuracy 0.2
Adding run metadata for 22000
step 22000, training accuracy 0
Adding run metadata for 23000
step 23000, training accuracy 0.3
Adding run metadata for 24000
step 24000, training accuracy 0.1
Adding run metadata for 25000
step 25000, training accuracy 0
Adding run metadata for 26000
step 26000, training accuracy 0.3
Adding run metadata for 27000
step 27000, training accuracy 0.2
Adding run metadata for 28000
step 28000, training accuracy 0
Adding run metadata for 29000
step 29000, training accuracy 0
test accuracy 0.1

In [12]:
rbm_layers = [761]
rbm_learning_rate = [0.001]
rbm_num_epochs = [10]
rbm_batch_size = [25]
rbm_gibbs_k = [1]

srbm = dbn.DeepBeliefNetwork(
    do_pretrain=True,
    rbm_layers=rbm_layers, dataset='cifar',
    finetune_act_func='relu', rbm_learning_rate=rbm_learning_rate,
    verbose=True, rbm_num_epochs=rbm_num_epochs, rbm_gibbs_k=rbm_gibbs_k,
    rbm_gauss_visible=False, rbm_stddev=0.1,
    momentum=0.5, rbm_batch_size=rbm_batch_size, finetune_learning_rate=0.01,
    finetune_num_epochs=10, finetune_batch_size=25,
    finetune_opt='gradient_descent', finetune_loss_func='softmax_cross_entropy',
    finetune_dropout=1)


Creating /home/kkari/.yadlt/models/ directory to save/restore models
Creating /home/kkari/.yadlt/data/ directory to save model generated data
Creating /home/kkari/.yadlt/logs/ directory to save tensorboard logs
Creating /home/kkari/.yadlt/models/rbm-1 directory to save/restore models
Creating /home/kkari/.yadlt/data/rbm-1 directory to save model generated data
Creating /home/kkari/.yadlt/logs/rbm-1 directory to save tensorboard logs

In [ ]:
srbm.pretrain(cifar['data'], cifar_test['data'])


Training layer 1...
Tensorboard logs dir for this run is /home/kkari/.yadlt/logs/rbm-1/run1
Reconstruction loss at step 0: 136.153

In [ ]: