In [9]:
from nolearn.dbn import DBN

from sklearn.cross_validation import cross_val_score
from sklearn.datasets import load_iris
from sklearn.preprocessing import scale

iris = load_iris()
clf = DBN(
    [4, 4, 3],
    learn_rates=0.3,
    epochs=50,
    )

scores = cross_val_score(clf, scale(iris.data), iris.target, cv=10)
print "Accuracy: %0.2f (+/- %0.2f)" % (scores.mean(), scores.std() / 2)


Accuracy: 0.40 (+/- 0.24)

In [14]:
from sklearn.cross_validation import train_test_split
from sklearn.datasets import fetch_mldata

mnist = fetch_mldata('MNIST original')

In [16]:
X_train, X_test, y_train, y_test = train_test_split(
    mnist.data / 255.0, mnist.target)

In [36]:
print mnist.target.shape, mnist.target.dtype
print mnist.data.shape, mnist.data.dtype


(70000L,) float64
(70000L, 784L) uint8

In [33]:
# import the necessary packages
from sklearn.cross_validation import train_test_split
from sklearn.metrics import classification_report
from sklearn.metrics import confusion_matrix
from sklearn import datasets

from nolearn.dbn import DBN



clf = DBN(
    [-1, 300, -1],
    learn_rates=0.3,
    learn_rate_decays=0.9,
    epochs=2,
    verbose=1,
    )

In [34]:
clf.fit(X_train, y_train)


[DBN] fitting X.shape=(52500L, 784L)
[DBN] layers [784L, 300, 10L]
[DBN] Fine-tune...
100%
Epoch 1:
100%
  loss 0.280437254881
  err  0.0826981707317
  (0:00:06)
Epoch 2:
  loss 0.182890987452
  err  0.0513528963415
  (0:00:05)

In [35]:
from sklearn.metrics import classification_report
#from sklearn.metrics import zero_one_score

y_pred = clf.predict(X_test)
#print "Accuracy:", zero_one_score(y_test, y_pred)
print "Classification report:"
print classification_report(y_test, y_pred)


Classification report:
             precision    recall  f1-score   support

        0.0       0.90      0.98      0.94      1695
        1.0       0.99      0.96      0.98      1919
        2.0       0.95      0.96      0.96      1774
        3.0       0.93      0.95      0.94      1772
        4.0       0.96      0.96      0.96      1729
        5.0       0.94      0.94      0.94      1636
        6.0       0.96      0.95      0.96      1715
        7.0       0.97      0.95      0.96      1818
        8.0       0.92      0.91      0.91      1660
        9.0       0.95      0.91      0.93      1782

avg / total       0.95      0.95      0.95     17500


In [ ]:
# scale the data to the range [0, 1] and then construct the training
# and testing splits
(trainX, testX, trainY, testY) = X_train, X_test, y_train, y_test
#train_test_split( features , targets , test_size = 0.33)

print trainX.shape
print trainY.shape

dbn = DBN(
[trainX.shape[1], 80, 80, trainY.shape[0]],
learn_rates = 0.3,
learn_rate_decays = 0.9,
epochs = 10,
verbose = 1)
dbn.fit(trainX, trainY)

# compute the predictions for the test data and show a classification
# report
preds = dbn.predict(testX)

In [6]:
dir(nolearn)


Out[6]:
['__builtins__',
 '__doc__',
 '__file__',
 '__name__',
 '__package__',
 '__path__',
 'dbn']

In [54]:
wid = 10
hig = 5
ix_ct = 2

src = np.arange(wid*hig, dtype=float).reshape((hig, wid))
dst = np.empty((ix_ct, wid), dtype=float)
ix = np.random.random_integers(0, hig-1, (ix_ct, wid))

for i in range(ix.shape[1]):
    j = ix[:,i]
    dst[:,i] = src[j,i]
print dst

print src[ix,np.arange(ix.shape[1])]


[[ 10.  31.  42.  33.  24.  15.  26.  27.   8.  49.]
 [ 30.  41.  32.  23.   4.  25.  16.  27.  38.  29.]]
[[ 10.  31.  42.  33.  24.  15.  26.  27.   8.  49.]
 [ 30.  41.  32.  23.   4.  25.  16.  27.  38.  29.]]

In [ ]:


In [ ]: