In [1]:
%matplotlib inline
import warnings
warnings.filterwarnings('ignore')
warnings.simplefilter('ignore', UserWarning)
import os,sys,re
from matplotlib import cm
import matplotlib.pyplot as plt
import numpy as np
from sklearn.cross_validation import train_test_split
read_path = '/home/mckc/Face oNN/Complete Face Data/'
#read_path = '/home/mckc/Face oNN/Complete Face Data/'
save_path = '/home/mckc/latest/'
os.chdir(read_path)
os.getcwd()


Out[1]:
'/home/mckc/Face oNN/Complete Face Data'

In [2]:
folders = os.listdir(os.getcwd())

In [3]:
def PCA_comp(data):
    from time import *
    from sklearn.decomposition import PCA
    n_components = 150
    
    h,w = 96,96

    print("Extracting the top %d eigenfaces from %d faces"
      % (n_components, image_data.shape[0]))
    t0 = time()
    pca = PCA(n_components=n_components, whiten=True).fit(images_geq)
    print("done in %0.3fs" % (time() - t0))

    #eigenfaces = pca.components_.reshape((n_components, h, w))

    print("Projecting the input data on the eigenfaces orthonormal basis")
    t0 = time()
    X = pca.transform(data)
    print X.shape
    print("done in %0.3fs" % (time() - t0))
    return(X)


<ipython-input-3-010fc55b4022>:1: SyntaxWarning: import * only allowed at module level
  def PCA_comp(data):

In [4]:
#K-mediods file
import numpy as np
import random

def kMedoids(D, k, tmax=100):
    # determine dimensions of distance matrix D
    m, n = D.shape

    if k > n:
        raise Exception('too many medoids')
    # randomly initialize an array of k medoid indices
    M = np.arange(n)
    np.random.shuffle(M)
    M = np.sort(M[:k])

    # create a copy of the array of medoid indices
    Mnew = np.copy(M)

    # initialize a dictionary to represent clusters
    C = {}
    for t in xrange(tmax):
        # determine clusters, i. e. arrays of data indices
        J = np.argmin(D[:,M], axis=1)
        for kappa in range(k):
            C[kappa] = np.where(J==kappa)[0]
        # update cluster medoids
        for kappa in range(k):
            J = np.mean(D[np.ix_(C[kappa],C[kappa])],axis=1)
            j = np.argmin(J)
            Mnew[kappa] = C[kappa][j]
        np.sort(Mnew)
        # check for convergence
        if np.array_equal(M, Mnew):
            break
        M = np.copy(Mnew)
    else:
        # final update of cluster memberships
        J = np.argmin(D[:,M], axis=1)
        for kappa in range(k):
            C[kappa] = np.where(J==kappa)[0]

# return results
    return M, C
#directs = os.listdir('K:/COMMON/face onn/') import cv2 from cv2 import resize all_images = [] subject = [] directs = os.listdir(path) for dir in directs: files = os.listdir(path+dir) os.chdir(path+dir) for i in files: image = resize(cv2.imread(i,0),(96,96)) #image = cv2.imread(i,0) # Global equalize image_g_eq = exposure.equalize_hist(image) # Equalization image_eql = rank.equalize(image, selem=selem) images.append(image) images_geq.append(image_g_eq) images_eql.append(image_eql) size.append(image.shape[0]) size = np.array(size) image_data = np.array(images).reshape(-1,9216) images_geq = np.array(images_geq).reshape(-1,9216) images_eql = np.array(images_eql).reshape(-1,9216) X = PCA_comp(image_g_eq)

In [5]:
folder = folders[10]
folder


Out[5]:
'Praba'

In [6]:
import cv2
from cv2 import resize
from skimage import exposure
from skimage.morphology import disk
from skimage.filters import rank
selem = disk(30)
size = []
images_geq = []
images_eql = []
images = []

os.chdir(read_path+folder)
files = os.listdir(read_path+folder)
#files = os.listdir(read_path)

In [7]:
for i in files:
    image = resize(cv2.imread(i,0),(96,96))
    #image = cv2.imread(i,0)
    # Global equalize
    image_g_eq = exposure.equalize_hist(image)

    # Equalization
    image_eql = rank.equalize(image, selem=selem)
    images.append(image)
    images_geq.append(image_g_eq)
    images_eql.append(image_eql)
    size.append(image.shape[0])
size = np.array(size)
image_data = np.array(images).reshape(-1,9216)
images_geq = np.array(images_geq).reshape(-1,9216)
images_eql = np.array(images_eql).reshape(-1,9216)
X  = PCA_comp(images_geq)
X.shape


Extracting the top 150 eigenfaces from 164 faces
done in 0.166s
Projecting the input data on the eigenfaces orthonormal basis
(164, 150)
done in 0.052s
Out[7]:
(164, 150)
from matplotlib import pyplot as plt from scipy.cluster.hierarchy import dendrogram, linkage # generate the linkage matrix Z = linkage(X, 'ward') from scipy.cluster.hierarchy import cophenet from scipy.spatial.distance import pdist c, coph_dists = cophenet(Z, pdist(X)) c
plt.figure(figsize=(25, 10)) plt.title('Hierarchical Clustering Dendrogram') plt.xlabel('sample index') plt.ylabel('distance') dendrogram( Z, leaf_rotation=90., # rotates the x axis labels leaf_font_size=8., # font size for the x axis labels ) plt.show()

In [1]:
import warnings
#warnings.simplefilter("ignore")
warnings.filterwarnings("ignore",category=ImportWarning) #DeprecationWarning

In [1]:
import tflearn
from tflearn.layers.core import input_data, dropout, fully_connected
from tflearn.layers.conv import conv_2d, max_pool_2d,upsample_2d
from tflearn.layers.normalization import local_response_normalization
from tflearn.layers.estimator import regression
from tflearn.data_utils import  to_categorical
import tensorflow as tf
tf.reset_default_graph()


#from keras.callbacks import EarlyStopping
#early_stopping = EarlyStopping(monitor='val_loss', patience=2)

####
encoder = input_data(shape=[None, 96, 96, 1], name='input')

encoder = conv_2d(encoder, 32, 3, activation='relu' ,padding='same')
encoder = conv_2d(encoder, 64, 3, activation='relu',padding='same')
encoder = max_pool_2d(encoder, 2,padding='same')


encoder = conv_2d(encoder, 64, 3, activation='relu',padding='same')
encoder = conv_2d(encoder, 128, 3, activation='relu',padding='same')
encoder = max_pool_2d(encoder, 2,padding='same')

encoder = conv_2d(encoder, 128, 3, activation='relu',padding='same')
encoder = conv_2d(encoder, 192, 3, activation='relu',padding='same')
encoder = max_pool_2d(encoder, 2,padding='same')

encoder = conv_2d(encoder, 256, 3, activation='relu',padding='same')
encoder = max_pool_2d(encoder, 2,padding='same')

encoder = conv_2d(encoder, 128, 3, activation='relu',padding='same')
encoder = max_pool_2d(encoder, 2,padding='same')

#encoder = flatten(encoder, name='Flatten')

#decoder = reshape(encoder, (-1,256,3,3), name='Reshape')
decoder = conv_2d(encoder, 128, 3, activation='relu',padding='same')
decoder = upsample_2d(decoder, 2)

decoder = conv_2d(decoder, 256, 3, activation='relu',padding='same')
decoder = upsample_2d(decoder, 2)

decoder = conv_2d(decoder, 192, 3, activation='relu',padding='same')
decoder = conv_2d(decoder, 128, 3, activation='relu',padding='same')
decoder = upsample_2d(decoder, 2)

decoder = conv_2d(decoder, 128, 3, activation='relu',padding='same')
decoder = conv_2d(decoder, 64, 3, activation='relu',padding='same')
decoder = upsample_2d(decoder, 2)

decoder = conv_2d(decoder, 64, 3, activation='relu',padding='same')
decoder = conv_2d(decoder, 32, 3, activation='relu',padding='same')
decoder = upsample_2d(decoder, 2)

decoder = conv_2d(decoder, 1, 3, activation='sigmoid',padding='same')
net = tflearn.regression(decoder, optimizer='adam', learning_rate=0.0001,
                         loss='mean_square', metric=None)

# Training the auto encoder
print('initialising')
model = tflearn.DNN(net, tensorboard_verbose=1,tensorboard_dir='/home/mckc/tf_logs')
#model.load('/home/mckc/cluster.tflearn')


initialising
WARNING:tensorflow:From /home/mckc/anaconda/lib/python2.7/site-packages/tflearn/summaries.py:46 in get_summary.: scalar_summary (from tensorflow.python.ops.logging_ops) is deprecated and will be removed after 2016-11-30.
Instructions for updating:
Please switch to tf.summary.scalar. Note that tf.summary.scalar uses the node name instead of the tag. This means that TensorFlow will automatically de-duplicate summary names based on the scope they are created in. Also, passing a tensor or list of tags to a scalar summary op is no longer supported.
WARNING:tensorflow:From /home/mckc/anaconda/lib/python2.7/site-packages/tflearn/summaries.py:46 in get_summary.: scalar_summary (from tensorflow.python.ops.logging_ops) is deprecated and will be removed after 2016-11-30.
Instructions for updating:
Please switch to tf.summary.scalar. Note that tf.summary.scalar uses the node name instead of the tag. This means that TensorFlow will automatically de-duplicate summary names based on the scope they are created in. Also, passing a tensor or list of tags to a scalar summary op is no longer supported.
WARNING:tensorflow:From /home/mckc/anaconda/lib/python2.7/site-packages/tflearn/summaries.py:44 in get_summary.: histogram_summary (from tensorflow.python.ops.logging_ops) is deprecated and will be removed after 2016-11-30.
Instructions for updating:
Please switch to tf.summary.histogram. Note that tf.summary.histogram uses the node name instead of the tag. This means that TensorFlow will automatically de-duplicate summary names based on their scope.
WARNING:tensorflow:From /home/mckc/anaconda/lib/python2.7/site-packages/tflearn/summaries.py:44 in get_summary.: histogram_summary (from tensorflow.python.ops.logging_ops) is deprecated and will be removed after 2016-11-30.
Instructions for updating:
Please switch to tf.summary.histogram. Note that tf.summary.histogram uses the node name instead of the tag. This means that TensorFlow will automatically de-duplicate summary names based on their scope.
WARNING:tensorflow:From /home/mckc/anaconda/lib/python2.7/site-packages/tflearn/summaries.py:44 in get_summary.: histogram_summary (from tensorflow.python.ops.logging_ops) is deprecated and will be removed after 2016-11-30.
Instructions for updating:
Please switch to tf.summary.histogram. Note that tf.summary.histogram uses the node name instead of the tag. This means that TensorFlow will automatically de-duplicate summary names based on their scope.
WARNING:tensorflow:From /home/mckc/anaconda/lib/python2.7/site-packages/tflearn/summaries.py:44 in get_summary.: histogram_summary (from tensorflow.python.ops.logging_ops) is deprecated and will be removed after 2016-11-30.
Instructions for updating:
Please switch to tf.summary.histogram. Note that tf.summary.histogram uses the node name instead of the tag. This means that TensorFlow will automatically de-duplicate summary names based on their scope.
WARNING:tensorflow:From /home/mckc/anaconda/lib/python2.7/site-packages/tflearn/summaries.py:44 in get_summary.: histogram_summary (from tensorflow.python.ops.logging_ops) is deprecated and will be removed after 2016-11-30.
Instructions for updating:
Please switch to tf.summary.histogram. Note that tf.summary.histogram uses the node name instead of the tag. This means that TensorFlow will automatically de-duplicate summary names based on their scope.
WARNING:tensorflow:From /home/mckc/anaconda/lib/python2.7/site-packages/tflearn/summaries.py:44 in get_summary.: histogram_summary (from tensorflow.python.ops.logging_ops) is deprecated and will be removed after 2016-11-30.
Instructions for updating:
Please switch to tf.summary.histogram. Note that tf.summary.histogram uses the node name instead of the tag. This means that TensorFlow will automatically de-duplicate summary names based on their scope.
WARNING:tensorflow:From /home/mckc/anaconda/lib/python2.7/site-packages/tflearn/summaries.py:44 in get_summary.: histogram_summary (from tensorflow.python.ops.logging_ops) is deprecated and will be removed after 2016-11-30.
Instructions for updating:
Please switch to tf.summary.histogram. Note that tf.summary.histogram uses the node name instead of the tag. This means that TensorFlow will automatically de-duplicate summary names based on their scope.
WARNING:tensorflow:From /home/mckc/anaconda/lib/python2.7/site-packages/tflearn/summaries.py:44 in get_summary.: histogram_summary (from tensorflow.python.ops.logging_ops) is deprecated and will be removed after 2016-11-30.
Instructions for updating:
Please switch to tf.summary.histogram. Note that tf.summary.histogram uses the node name instead of the tag. This means that TensorFlow will automatically de-duplicate summary names based on their scope.
WARNING:tensorflow:From /home/mckc/anaconda/lib/python2.7/site-packages/tflearn/summaries.py:44 in get_summary.: histogram_summary (from tensorflow.python.ops.logging_ops) is deprecated and will be removed after 2016-11-30.
Instructions for updating:
Please switch to tf.summary.histogram. Note that tf.summary.histogram uses the node name instead of the tag. This means that TensorFlow will automatically de-duplicate summary names based on their scope.
WARNING:tensorflow:From /home/mckc/anaconda/lib/python2.7/site-packages/tflearn/summaries.py:44 in get_summary.: histogram_summary (from tensorflow.python.ops.logging_ops) is deprecated and will be removed after 2016-11-30.
Instructions for updating:
Please switch to tf.summary.histogram. Note that tf.summary.histogram uses the node name instead of the tag. This means that TensorFlow will automatically de-duplicate summary names based on their scope.
WARNING:tensorflow:From /home/mckc/anaconda/lib/python2.7/site-packages/tflearn/summaries.py:44 in get_summary.: histogram_summary (from tensorflow.python.ops.logging_ops) is deprecated and will be removed after 2016-11-30.
Instructions for updating:
Please switch to tf.summary.histogram. Note that tf.summary.histogram uses the node name instead of the tag. This means that TensorFlow will automatically de-duplicate summary names based on their scope.
WARNING:tensorflow:From /home/mckc/anaconda/lib/python2.7/site-packages/tflearn/summaries.py:44 in get_summary.: histogram_summary (from tensorflow.python.ops.logging_ops) is deprecated and will be removed after 2016-11-30.
Instructions for updating:
Please switch to tf.summary.histogram. Note that tf.summary.histogram uses the node name instead of the tag. This means that TensorFlow will automatically de-duplicate summary names based on their scope.
WARNING:tensorflow:From /home/mckc/anaconda/lib/python2.7/site-packages/tflearn/summaries.py:44 in get_summary.: histogram_summary (from tensorflow.python.ops.logging_ops) is deprecated and will be removed after 2016-11-30.
Instructions for updating:
Please switch to tf.summary.histogram. Note that tf.summary.histogram uses the node name instead of the tag. This means that TensorFlow will automatically de-duplicate summary names based on their scope.
WARNING:tensorflow:From /home/mckc/anaconda/lib/python2.7/site-packages/tflearn/summaries.py:44 in get_summary.: histogram_summary (from tensorflow.python.ops.logging_ops) is deprecated and will be removed after 2016-11-30.
Instructions for updating:
Please switch to tf.summary.histogram. Note that tf.summary.histogram uses the node name instead of the tag. This means that TensorFlow will automatically de-duplicate summary names based on their scope.
WARNING:tensorflow:From /home/mckc/anaconda/lib/python2.7/site-packages/tflearn/summaries.py:44 in get_summary.: histogram_summary (from tensorflow.python.ops.logging_ops) is deprecated and will be removed after 2016-11-30.
Instructions for updating:
Please switch to tf.summary.histogram. Note that tf.summary.histogram uses the node name instead of the tag. This means that TensorFlow will automatically de-duplicate summary names based on their scope.
WARNING:tensorflow:From /home/mckc/anaconda/lib/python2.7/site-packages/tflearn/summaries.py:44 in get_summary.: histogram_summary (from tensorflow.python.ops.logging_ops) is deprecated and will be removed after 2016-11-30.
Instructions for updating:
Please switch to tf.summary.histogram. Note that tf.summary.histogram uses the node name instead of the tag. This means that TensorFlow will automatically de-duplicate summary names based on their scope.
WARNING:tensorflow:From /home/mckc/anaconda/lib/python2.7/site-packages/tflearn/summaries.py:44 in get_summary.: histogram_summary (from tensorflow.python.ops.logging_ops) is deprecated and will be removed after 2016-11-30.
Instructions for updating:
Please switch to tf.summary.histogram. Note that tf.summary.histogram uses the node name instead of the tag. This means that TensorFlow will automatically de-duplicate summary names based on their scope.
WARNING:tensorflow:From /home/mckc/anaconda/lib/python2.7/site-packages/tflearn/summaries.py:44 in get_summary.: histogram_summary (from tensorflow.python.ops.logging_ops) is deprecated and will be removed after 2016-11-30.
Instructions for updating:
Please switch to tf.summary.histogram. Note that tf.summary.histogram uses the node name instead of the tag. This means that TensorFlow will automatically de-duplicate summary names based on their scope.
WARNING:tensorflow:From /home/mckc/anaconda/lib/python2.7/site-packages/tflearn/summaries.py:44 in get_summary.: histogram_summary (from tensorflow.python.ops.logging_ops) is deprecated and will be removed after 2016-11-30.
Instructions for updating:
Please switch to tf.summary.histogram. Note that tf.summary.histogram uses the node name instead of the tag. This means that TensorFlow will automatically de-duplicate summary names based on their scope.
WARNING:tensorflow:From /home/mckc/anaconda/lib/python2.7/site-packages/tflearn/summaries.py:44 in get_summary.: histogram_summary (from tensorflow.python.ops.logging_ops) is deprecated and will be removed after 2016-11-30.
Instructions for updating:
Please switch to tf.summary.histogram. Note that tf.summary.histogram uses the node name instead of the tag. This means that TensorFlow will automatically de-duplicate summary names based on their scope.
WARNING:tensorflow:From /home/mckc/anaconda/lib/python2.7/site-packages/tflearn/summaries.py:44 in get_summary.: histogram_summary (from tensorflow.python.ops.logging_ops) is deprecated and will be removed after 2016-11-30.
Instructions for updating:
Please switch to tf.summary.histogram. Note that tf.summary.histogram uses the node name instead of the tag. This means that TensorFlow will automatically de-duplicate summary names based on their scope.
WARNING:tensorflow:From /home/mckc/anaconda/lib/python2.7/site-packages/tflearn/summaries.py:44 in get_summary.: histogram_summary (from tensorflow.python.ops.logging_ops) is deprecated and will be removed after 2016-11-30.
Instructions for updating:
Please switch to tf.summary.histogram. Note that tf.summary.histogram uses the node name instead of the tag. This means that TensorFlow will automatically de-duplicate summary names based on their scope.
WARNING:tensorflow:From /home/mckc/anaconda/lib/python2.7/site-packages/tflearn/summaries.py:44 in get_summary.: histogram_summary (from tensorflow.python.ops.logging_ops) is deprecated and will be removed after 2016-11-30.
Instructions for updating:
Please switch to tf.summary.histogram. Note that tf.summary.histogram uses the node name instead of the tag. This means that TensorFlow will automatically de-duplicate summary names based on their scope.
WARNING:tensorflow:From /home/mckc/anaconda/lib/python2.7/site-packages/tflearn/summaries.py:44 in get_summary.: histogram_summary (from tensorflow.python.ops.logging_ops) is deprecated and will be removed after 2016-11-30.
Instructions for updating:
Please switch to tf.summary.histogram. Note that tf.summary.histogram uses the node name instead of the tag. This means that TensorFlow will automatically de-duplicate summary names based on their scope.
WARNING:tensorflow:From /home/mckc/anaconda/lib/python2.7/site-packages/tflearn/summaries.py:44 in get_summary.: histogram_summary (from tensorflow.python.ops.logging_ops) is deprecated and will be removed after 2016-11-30.
Instructions for updating:
Please switch to tf.summary.histogram. Note that tf.summary.histogram uses the node name instead of the tag. This means that TensorFlow will automatically de-duplicate summary names based on their scope.
WARNING:tensorflow:From /home/mckc/anaconda/lib/python2.7/site-packages/tflearn/summaries.py:44 in get_summary.: histogram_summary (from tensorflow.python.ops.logging_ops) is deprecated and will be removed after 2016-11-30.
Instructions for updating:
Please switch to tf.summary.histogram. Note that tf.summary.histogram uses the node name instead of the tag. This means that TensorFlow will automatically de-duplicate summary names based on their scope.
WARNING:tensorflow:From /home/mckc/anaconda/lib/python2.7/site-packages/tflearn/summaries.py:44 in get_summary.: histogram_summary (from tensorflow.python.ops.logging_ops) is deprecated and will be removed after 2016-11-30.
Instructions for updating:
Please switch to tf.summary.histogram. Note that tf.summary.histogram uses the node name instead of the tag. This means that TensorFlow will automatically de-duplicate summary names based on their scope.
WARNING:tensorflow:From /home/mckc/anaconda/lib/python2.7/site-packages/tflearn/summaries.py:44 in get_summary.: histogram_summary (from tensorflow.python.ops.logging_ops) is deprecated and will be removed after 2016-11-30.
Instructions for updating:
Please switch to tf.summary.histogram. Note that tf.summary.histogram uses the node name instead of the tag. This means that TensorFlow will automatically de-duplicate summary names based on their scope.
WARNING:tensorflow:From /home/mckc/anaconda/lib/python2.7/site-packages/tflearn/summaries.py:44 in get_summary.: histogram_summary (from tensorflow.python.ops.logging_ops) is deprecated and will be removed after 2016-11-30.
Instructions for updating:
Please switch to tf.summary.histogram. Note that tf.summary.histogram uses the node name instead of the tag. This means that TensorFlow will automatically de-duplicate summary names based on their scope.
WARNING:tensorflow:From /home/mckc/anaconda/lib/python2.7/site-packages/tflearn/summaries.py:44 in get_summary.: histogram_summary (from tensorflow.python.ops.logging_ops) is deprecated and will be removed after 2016-11-30.
Instructions for updating:
Please switch to tf.summary.histogram. Note that tf.summary.histogram uses the node name instead of the tag. This means that TensorFlow will automatically de-duplicate summary names based on their scope.
WARNING:tensorflow:From /home/mckc/anaconda/lib/python2.7/site-packages/tflearn/summaries.py:44 in get_summary.: histogram_summary (from tensorflow.python.ops.logging_ops) is deprecated and will be removed after 2016-11-30.
Instructions for updating:
Please switch to tf.summary.histogram. Note that tf.summary.histogram uses the node name instead of the tag. This means that TensorFlow will automatically de-duplicate summary names based on their scope.
WARNING:tensorflow:From /home/mckc/anaconda/lib/python2.7/site-packages/tflearn/summaries.py:44 in get_summary.: histogram_summary (from tensorflow.python.ops.logging_ops) is deprecated and will be removed after 2016-11-30.
Instructions for updating:
Please switch to tf.summary.histogram. Note that tf.summary.histogram uses the node name instead of the tag. This means that TensorFlow will automatically de-duplicate summary names based on their scope.
WARNING:tensorflow:From /home/mckc/anaconda/lib/python2.7/site-packages/tflearn/summaries.py:44 in get_summary.: histogram_summary (from tensorflow.python.ops.logging_ops) is deprecated and will be removed after 2016-11-30.
Instructions for updating:
Please switch to tf.summary.histogram. Note that tf.summary.histogram uses the node name instead of the tag. This means that TensorFlow will automatically de-duplicate summary names based on their scope.
WARNING:tensorflow:From /home/mckc/anaconda/lib/python2.7/site-packages/tflearn/summaries.py:44 in get_summary.: histogram_summary (from tensorflow.python.ops.logging_ops) is deprecated and will be removed after 2016-11-30.
Instructions for updating:
Please switch to tf.summary.histogram. Note that tf.summary.histogram uses the node name instead of the tag. This means that TensorFlow will automatically de-duplicate summary names based on their scope.
WARNING:tensorflow:From /home/mckc/anaconda/lib/python2.7/site-packages/tflearn/helpers/summarizer.py:68 in summarize_gradients.: merge_summary (from tensorflow.python.ops.logging_ops) is deprecated and will be removed after 2016-11-30.
Instructions for updating:
Please switch to tf.summary.merge.
WARNING:tensorflow:From /home/mckc/anaconda/lib/python2.7/site-packages/tflearn/helpers/trainer.py:766 in create_summaries.: merge_summary (from tensorflow.python.ops.logging_ops) is deprecated and will be removed after 2016-11-30.
Instructions for updating:
Please switch to tf.summary.merge.
WARNING:tensorflow:VARIABLES collection name is deprecated, please use GLOBAL_VARIABLES instead; VARIABLES will be removed after 2017-03-02.
WARNING:tensorflow:From /home/mckc/anaconda/lib/python2.7/site-packages/tflearn/helpers/trainer.py:130 in __init__.: initialize_all_variables (from tensorflow.python.ops.variables) is deprecated and will be removed after 2017-03-02.
Instructions for updating:
Use `tf.global_variables_initializer` instead.

In [9]:
print('started')
model.fit( images_geq.reshape(-1,96,96,1), images_geq.reshape(-1,96,96,1), n_epoch=5, validation_set=0.1,
          run_id="auto_encoder_cnn", batch_size=200)
#encoding_model = tflearn.DNN(encoder, session=model.session)


Training Step: 4  | total loss: 0.08294
| Adam | epoch: 004 | loss: 0.08294 | val_loss: 0.08259 -- iter: 147/147
Training Step: 4  | total loss: 0.08294
| Adam | epoch: 004 | loss: 0.08294 | val_loss: 0.08259 -- iter: 147/147
--
---------------------------------------------------------------------------
KeyboardInterrupt                         Traceback (most recent call last)
<ipython-input-9-a9e2360dd65f> in <module>()
      1 print('started')
      2 model.fit( images_geq.reshape(-1,96,96,1), images_geq.reshape(-1,96,96,1), n_epoch=5, validation_set=0.1,
----> 3           run_id="auto_encoder_cnn", batch_size=200)
      4 #encoding_model = tflearn.DNN(encoder, session=model.session)

/home/mckc/anaconda/lib/python2.7/site-packages/tflearn/models/dnn.pyc in fit(self, X_inputs, Y_targets, n_epoch, validation_set, show_metric, batch_size, shuffle, snapshot_epoch, snapshot_step, excl_trainops, run_id)
    186                          daug_dict=daug_dict,
    187                          excl_trainops=excl_trainops,
--> 188                          run_id=run_id)
    189 
    190     def predict(self, X):

/home/mckc/anaconda/lib/python2.7/site-packages/tflearn/helpers/trainer.pyc in fit(self, feed_dicts, n_epoch, val_feed_dicts, show_metric, snapshot_step, snapshot_epoch, shuffle_all, dprep_dict, daug_dict, excl_trainops, run_id)
    275                                                        snapshot_epoch,
    276                                                        snapshot_step,
--> 277                                                        show_metric)
    278                             global_loss += train_op.loss_value
    279                             if train_op.acc_value and global_acc:

/home/mckc/anaconda/lib/python2.7/site-packages/tflearn/helpers/trainer.pyc in _train(self, training_step, snapshot_epoch, snapshot_step, show_metric)
    711             if show_metric and self.metric is not None:
    712                 eval_ops.append(self.metric)
--> 713             e = evaluate_flow(self.session, eval_ops, self.test_dflow)
    714             self.val_loss = e[0]
    715             if show_metric and self.metric is not None:

/home/mckc/anaconda/lib/python2.7/site-packages/tflearn/helpers/trainer.pyc in evaluate_flow(session, ops_to_evaluate, dataflow)
    828         dataflow.start()
    829         res = [0. for i in ops_to_evaluate]
--> 830         feed_batch = dataflow.next()
    831 
    832         while feed_batch:

/home/mckc/anaconda/lib/python2.7/site-packages/tflearn/data_flow.pyc in next(self, timeout)
    126         """
    127         self.data_status.update()
--> 128         return self.feed_dict_queue.get(timeout=timeout)
    129 
    130     def start(self, reset_status=True):

/home/mckc/anaconda/lib/python2.7/Queue.pyc in get(self, block, timeout)
    166             elif timeout is None:
    167                 while not self._qsize():
--> 168                     self.not_empty.wait()
    169             elif timeout < 0:
    170                 raise ValueError("'timeout' must be a non-negative number")

/home/mckc/anaconda/lib/python2.7/threading.pyc in wait(self, timeout)
    338         try:    # restore state no matter what (e.g., KeyboardInterrupt)
    339             if timeout is None:
--> 340                 waiter.acquire()
    341                 if __debug__:
    342                     self._note("%s.wait(): got it", self)

KeyboardInterrupt: 
model.save("/home/mckc/cluster.tflearn")
%%time encode_decode = encoding_model.predict(images_geq[0].reshape(-1,96,96,1)) X = np.array([encoding_model.predict(images_geq[i].reshape(-1,96,96,1)) for i in range(images_geq.shape[0])]).reshape(-1,1152)

In [98]:
from scipy.cluster.vq import kmeans
from scipy.spatial.distance import cdist,pdist

##### cluster data into K=1..20 clusters #####
K_MAX = 40
KK = range(1,K_MAX,2)

KM = [kmeans(X[:,:],k,iter=50) for k in KK]
centroids = [cent for (cent,var) in KM]
D_k = [cdist(X[:,:], cent, 'euclidean') for cent in centroids]
cIdx = [np.argmin(D,axis=1) for D in D_k]
dist = [np.min(D,axis=1) for D in D_k]

tot_withinss = [sum(d**2) for d in dist]  # Total within-cluster sum of squares
totss = sum(pdist(X)**2)/X.shape[0]       # The total sum of squares
betweenss = totss - tot_withinss      

##### plots #####
kIdx = 1        # K=10
# elbow curve
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(KK, betweenss/totss*100, 'b*-')
ax.plot(KK[kIdx], betweenss[kIdx]/totss*100, marker='o', markersize=12, 
    markeredgewidth=2, markeredgecolor='r', markerfacecolor='None')
ax.set_ylim((0,100))
plt.grid(True)
plt.xlabel('Number of clusters')
plt.ylabel('Percentage of variance explained (%)')
plt.title('Elbow for KMeans clustering')


Out[98]:
<matplotlib.text.Text at 0x7f1b7dfbd8d0>
from keras.layers import Input, Dense, Convolution2D, MaxPooling2D, UpSampling2D,Flatten,Reshape from keras.optimizers import Adam,SGD,Adadelta,Adagrad from keras.models import Model encoding_dim = 1000 input_img = Input(shape=(9216,)) # add a Dense layer with a L1 activity regularizer encoder = Dense(1024, activation='relu')(input_img) encoder = Dense(512, activation='relu')(encoder) encoder = Dense(256, activation='relu')(encoder) decoder = Dense(512, activation='relu')(encoder) decoder = Dense(1024, activation='relu')(decoder) decoder = Dense(9216, activation='relu')(decoder) adam = Adam(lr=0.0001, beta_1=0.9, beta_2=0.999, epsilon=1e-08) autoencoder = Model(input=input_img, output=decoder) autoencoder.compile(optimizer=adam, loss='mse') autoencoder.summary()
x_train = image_data
autoencoder.fit(x_train.reshape(-1,9216), x_train.reshape(-1,9216), nb_epoch=15, batch_size=25, shuffle=True, verbose=1, validation_split=0.2)
from keras.layers import Input, Dense, Convolution2D, MaxPooling2D, UpSampling2D,Flatten,Reshape from keras.optimizers import Adam,SGD,Adadelta,Adagrad from keras.models import Model input_img = Input(shape=(1, 96, 96)) x = Convolution2D(32, 3, 3, activation='relu', border_mode='same')(input_img) x = Convolution2D(64, 3, 3, activation='relu', border_mode='same')(x) x = MaxPooling2D((2, 2), border_mode='same')(x) x = Convolution2D(64, 3, 3, activation='relu', border_mode='same')(x) x = Convolution2D(128, 3, 3, activation='relu', border_mode='same')(x) x = MaxPooling2D((2, 2), border_mode='same')(x) x = Convolution2D(128, 3, 3, activation='relu', border_mode='same')(x) x = Convolution2D(192, 3, 3, activation='relu', border_mode='same')(x) encoded = MaxPooling2D((2, 2), border_mode='same')(x) decoded = Convolution2D(192, 3, 3, activation='relu', border_mode='same')(encoded) decoded = Convolution2D(128, 3, 3, activation='relu', border_mode='same')(decoded) decoded = UpSampling2D((2, 2))(decoded) decoded = Convolution2D(128, 3, 3, activation='relu', border_mode='same')(decoded) decoded = Convolution2D(64, 3, 3, activation='relu', border_mode='same')(decoded) decoded = UpSampling2D((2, 2))(decoded) decoded = Convolution2D(64, 3, 3, activation='relu', border_mode='same')(decoded) decoded = Convolution2D(32, 3, 3, activation='relu', border_mode='same')(decoded) decoded = UpSampling2D((2, 2))(decoded) decoded = Convolution2D(1, 3, 3, activation='sigmoid', border_mode='same')(decoded) autoencoder = Model(input_img, decoded) encoder = Model(input=input_img, output=encoded) #decoder = Model(input=encoded, output=decoded) adam = Adam(lr=0.0000001, beta_1=0.9, beta_2=0.999, epsilon=1e-08) autoencoder.compile(optimizer=adam, loss='mse') autoencoder.summary()
from keras.layers import Input, Dense, Convolution2D, MaxPooling2D, UpSampling2D,Flatten,Reshape from keras.optimizers import Adam,SGD,Adadelta,Adagrad from keras.models import Model input_img = Input(shape=(1, 96, 96)) decoder_input = Input(shape=(8, 12, 12)) x = Convolution2D(32, 3, 3, activation='relu', border_mode='same')(input_img) x = Convolution2D(64, 3, 3, activation='relu', border_mode='same')(x) x = MaxPooling2D((2, 2), border_mode='same')(x) x = Convolution2D(64, 3, 3, activation='relu', border_mode='same')(x) x = Convolution2D(128, 3, 3, activation='relu', border_mode='same')(x) x = MaxPooling2D((2, 2), border_mode='same')(x) x = Convolution2D(128, 3, 3, activation='relu', border_mode='same')(x) x = Convolution2D(192, 3, 3, activation='relu', border_mode='same')(x) x = MaxPooling2D((2, 2), border_mode='same')(x) #x = MaxPooling2D((2, 2), border_mode='same')(x) #x = Convolution2D(192, 3, 3, activation='relu', border_mode='same')(x) #x = MaxPooling2D((2, 2), border_mode='same')(x) encoded = MaxPooling2D((2, 2), border_mode='same')(x) #encoded = Flatten()(x) #x = Reshape((192,2,2))(encoded) #x = UpSampling2D((2, 2))(x) #x = Convolution2D(192, 3, 3, activation='relu', border_mode='same')(x) #x = Convolution2D(128, 3, 3, activation='relu', border_mode='same')(x) #x = UpSampling2D((2, 2))(x) #x = Convolution2D(128, 3, 3, activation='relu', border_mode='same')(x) #x = Convolution2D(64, 3, 3, activation='relu', border_mode='same')(x) #x = UpSampling2D((2, 2))(x) x = Convolution2D(64, 3, 3, activation='relu', border_mode='same')(encoded) x = Convolution2D(32, 3, 3, activation='relu', border_mode='same')(x) x = UpSampling2D((2, 2))(x) #x = Convolution2D(64, 3, 3, activation='relu', border_mode='same')(x) #x = UpSampling2D((2, 2))(x) #x = Convolution2D(64, 3, 3, activation='relu', border_mode='same')(x) #x = UpSampling2D((2, 2))(x) #x = Convolution2D(32, 3, 3, activation='relu', border_mode='same')(x) #x = UpSampling2D((2, 2))(x) decoded = Convolution2D(1, 3, 3, activation='sigmoid', border_mode='same')(x) autoencoder = Model(input_img, decoded) encoder = Model(input=input_img, output=encoded) #decoder = Model(input=encoded, output=decoded) adam = Adam(lr=0.0000001, beta_1=0.9, beta_2=0.999, epsilon=1e-08) autoencoder.compile(optimizer=adam, loss='mse') autoencoder.summary() # create the decoder model #decoder = Model(input=encoded, output=decoder_layer(encoded))
x_train = image_data.reshape(-1,1,96,96)
autoencoder.fit(x_train, x_train, nb_epoch=50, batch_size=3, shuffle=True, verbose=1, validation_split=0.2)

In [100]:
from sklearn.metrics.pairwise import pairwise_distances
D = pairwise_distances(X, metric='euclidean')

# split into 2 clusters
M, C = kMedoids(D, 25)
cleaned = image_data[M,].reshape(-1,96,96)

os.chdir(save_path+folder)
import scipy.misc
for i in range(cleaned.shape[0]):
    scipy.misc.toimage(cleaned[i]).save(str(folder+'_'+str(i)) +'.jpg')
os.chdir(read_path)

In [88]:
plt.imshow(images_geq[M[4],].reshape(96,96),cmap=cm.Greys_r)


Out[88]:
<matplotlib.image.AxesImage at 0x7f1b7e15d190>

In [13]:
save_path


Out[13]:
'/home/mckc/clust/'

In [14]:
folder


Out[14]:
'clust'

In [ ]: