In [1]:
from time import time
import numpy as np
from sklearn.cross_validation import train_test_split
from sklearn.datasets import fetch_mldata
from sklearn.metrics import classification_report
from sklearn.metrics import accuracy_score
from sklearn.metrics import confusion_matrix
from sknn.mlp import Classifier, Layer
In [2]:
np.set_printoptions(precision=4)
np.set_printoptions(suppress=True)
In [3]:
mnist = fetch_mldata('mnist-original')
X_train, X_test, y_train, y_test = train_test_split(
(mnist.data / 255.0).astype(np.float32),
mnist.target.astype(np.int32),
test_size=0.33, random_state=1234)
In [4]:
print "test data : %s rows %s columns" %(X_train.shape[0], X_train.shape[1])
print "test data : %s rows %s columns" %(X_test.shape[0], X_test.shape[1])
In [5]:
clf = Classifier(
layers=[Layer("Rectifier", units=300), Layer("Softmax")],
learning_rate=0.02,
batch_size=100,
n_iter=2,
verbose=1,
)
clf.fit(X_train, y_train)
Out[5]:
In [6]:
print [l for l in clf.mlp.layers]
inf = [(l.get_weights(), l.get_biases()) for l in clf.mlp.layers]
In [7]:
clf.mlp.layers[0].get_weights()
clf.mlp.layers[0].get_weights()
Out[7]:
In [8]:
l = Layer("Rectifier", units=300)
clf1 = Classifier(layers=[l])
In [10]:
clf1._initialize(X_test)
In [11]:
clf1.mlp.layers[0].set_weights(inf[0][0])
clf1.mlp.layers[0].set_biases(inf[0][1])
In [12]:
clf1.predict(X_test)
In [14]:
clf1.mlp.layers[0].set_weights(inf[0][0])
clf1.mlp.layers[0].set_biases(inf[0][1])
In [15]:
clf1.predict(X_test)