Special thanks to KERNIX for their extremely helpful blog
http://www.kernix.com/blog/image-classification-with-a-pre-trained-deep-neural-network_p11
conda install -c conda-forge tensorflow
cd anaconda/lib/python2.7/site-packages/tensorflow/models/image/imagenet
python classify_image.py --model_dir <desired model location>/imagenet
python classify_image.py --model_dir ~/coradek/CNW_Wildlife_Identification/imagenet
In [10]:
import os
import tensorflow as tf
# import tensorflow.python.platform
from tensorflow.python.platform import gfile
import numpy as np
import pandas as pd
In [11]:
# Create the TensorFlow graph
def create_graph():
model_dir = './CNW_Wildlife_Identification/imagenet'
with gfile.FastGFile(os.path.join(
model_dir, 'classify_image_graph_def.pb'), 'rb') as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
_ = tf.import_graph_def(graph_def, name='')
In [12]:
# Start a TensorFlow Session and choose our "tensor" for feature extraction
# ('pool_3:0' is the last layer before classification)
def setup():
create_graph() # Only needs to run the first time
with tf.Session() as sess:
# Get the last feature layer (preclassification) from inception-v3
next_to_last_tensor = sess.graph.get_tensor_by_name('pool_3:0')
s = sess
t = next_to_last_tensor
return s,t
In [4]:
# Get the actual features!
def get_features(image, session, tensor):
if not gfile.Exists(image):
tf.logging.fatal('File does not exist %s', image)
image_data = gfile.FastGFile(image, 'rb').read()
predictions = session.run(tensor,
{'DecodeJpeg/contents:0': image_data})
features = np.squeeze(predictions)
return features.reshape(1,-1)
In [13]:
session, tensor = setup()
features = get_features('CNW_Wildlife_Identification/data/first_sample/EK000026-2.JPG',
session, tensor)
In [14]:
print features
If you put your images folders sorted by class (e.g.
In [15]:
directory = 'CNW_Wildlife_Identification/data/first_sample'
image_list = []
for p, dirs, files in os.walk(directory):
for ff in files:
if ff[-4:].lower() == '.jpg':
image_list.append(p+'/'+ff)
In [16]:
image_list
Out[16]:
In [17]:
# In practice this is only run once - so setup() is included as part of this function
def get_features_repeatedly(image_list):
'''take list of image file paths
return numpy array of features
'''
create_graph()
with tf.Session() as session:
# Get the last feature layer (preclassification) from inception-v3
tensor = session.graph.get_tensor_by_name('pool_3:0')
nb_features = 2048
features = np.empty((len(image_list),nb_features))
for ind, image in enumerate(image_list):
# if (ind%50 == 0):
# print('Processing %s...' % (image))
print('Processing %s...' % (image))
if not gfile.Exists(image):
tf.logging.fatal('File does not exist %s', image)
image_data = gfile.FastGFile(image, 'rb').read()
predictions = session.run(tensor,
{'DecodeJpeg/contents:0': image_data})
features[ind,:] = np.squeeze(predictions)
return features
In [18]:
lotsafeatures = get_features_repeatedly(image_list)
inception paper - https://arxiv.org/pdf/1512.00567v3.pdf
Kernix Blog - http://www.kernix.com/blog/image-classification-with-a-pre-trained-deep-neural-network_p11
In [ ]: