In [33]:
%run -i 5.2-cat-vs-dog.py


Found 2000 images belonging to 2 classes.
Found 1000 images belonging to 2 classes.
Epoch 1/100
100/100 [==============================] - 42s 416ms/step - loss: 0.6930 - acc: 0.5147 - val_loss: 0.6864 - val_acc: 0.5362
Epoch 2/100
100/100 [==============================] - 36s 361ms/step - loss: 0.6839 - acc: 0.5531 - val_loss: 0.6672 - val_acc: 0.6173
Epoch 3/100
100/100 [==============================] - 37s 365ms/step - loss: 0.6718 - acc: 0.5762 - val_loss: 0.6622 - val_acc: 0.6003
Epoch 4/100
100/100 [==============================] - 36s 363ms/step - loss: 0.6527 - acc: 0.6075 - val_loss: 0.6563 - val_acc: 0.5876
Epoch 5/100
100/100 [==============================] - 36s 364ms/step - loss: 0.6431 - acc: 0.6141 - val_loss: 0.6326 - val_acc: 0.6212
Epoch 6/100
100/100 [==============================] - 42s 423ms/step - loss: 0.6292 - acc: 0.6403 - val_loss: 0.6555 - val_acc: 0.5857
Epoch 7/100
100/100 [==============================] - 36s 362ms/step - loss: 0.6118 - acc: 0.6572 - val_loss: 0.5991 - val_acc: 0.6751
Epoch 8/100
100/100 [==============================] - 37s 365ms/step - loss: 0.5994 - acc: 0.6778 - val_loss: 0.6937 - val_acc: 0.5960
Epoch 9/100
100/100 [==============================] - 36s 360ms/step - loss: 0.5808 - acc: 0.6950 - val_loss: 0.6475 - val_acc: 0.6392
Epoch 10/100
100/100 [==============================] - 43s 426ms/step - loss: 0.5864 - acc: 0.6837 - val_loss: 0.6258 - val_acc: 0.6681
Epoch 11/100
100/100 [==============================] - 37s 366ms/step - loss: 0.5804 - acc: 0.6959 - val_loss: 0.6045 - val_acc: 0.6572
Epoch 12/100
100/100 [==============================] - 36s 364ms/step - loss: 0.5767 - acc: 0.6866 - val_loss: 0.5730 - val_acc: 0.6954
Epoch 13/100
100/100 [==============================] - 36s 364ms/step - loss: 0.5686 - acc: 0.7038 - val_loss: 0.5550 - val_acc: 0.7107
Epoch 14/100
100/100 [==============================] - 36s 361ms/step - loss: 0.5545 - acc: 0.7241 - val_loss: 0.5535 - val_acc: 0.7119
Epoch 15/100
100/100 [==============================] - 36s 363ms/step - loss: 0.5615 - acc: 0.7019 - val_loss: 0.5713 - val_acc: 0.6856
Epoch 16/100
100/100 [==============================] - 36s 361ms/step - loss: 0.5293 - acc: 0.7291 - val_loss: 0.5394 - val_acc: 0.7236
Epoch 17/100
100/100 [==============================] - 36s 364ms/step - loss: 0.5368 - acc: 0.7259 - val_loss: 0.5447 - val_acc: 0.7265
Epoch 18/100
100/100 [==============================] - 36s 364ms/step - loss: 0.5362 - acc: 0.7300 - val_loss: 0.5358 - val_acc: 0.7236
Epoch 19/100
100/100 [==============================] - 36s 365ms/step - loss: 0.5291 - acc: 0.7369 - val_loss: 0.5270 - val_acc: 0.7221
Epoch 20/100
100/100 [==============================] - 36s 362ms/step - loss: 0.5315 - acc: 0.7325 - val_loss: 0.5376 - val_acc: 0.7294
Epoch 21/100
100/100 [==============================] - 36s 362ms/step - loss: 0.5194 - acc: 0.7394 - val_loss: 0.5261 - val_acc: 0.7443
Epoch 22/100
100/100 [==============================] - 36s 364ms/step - loss: 0.5296 - acc: 0.7406 - val_loss: 0.5105 - val_acc: 0.7262
Epoch 23/100
100/100 [==============================] - 37s 373ms/step - loss: 0.5145 - acc: 0.7422 - val_loss: 0.5464 - val_acc: 0.7107
Epoch 24/100
100/100 [==============================] - 37s 375ms/step - loss: 0.5118 - acc: 0.7400 - val_loss: 0.5356 - val_acc: 0.7210
Epoch 25/100
100/100 [==============================] - 37s 374ms/step - loss: 0.5106 - acc: 0.7466 - val_loss: 0.5088 - val_acc: 0.7571
Epoch 26/100
100/100 [==============================] - 38s 378ms/step - loss: 0.5112 - acc: 0.7416 - val_loss: 0.5069 - val_acc: 0.7379
Epoch 27/100
100/100 [==============================] - 37s 372ms/step - loss: 0.5010 - acc: 0.7547 - val_loss: 0.5576 - val_acc: 0.7094
Epoch 28/100
100/100 [==============================] - 38s 377ms/step - loss: 0.5068 - acc: 0.7481 - val_loss: 0.5038 - val_acc: 0.7475
Epoch 29/100
100/100 [==============================] - 37s 368ms/step - loss: 0.4905 - acc: 0.7650 - val_loss: 0.5087 - val_acc: 0.7358
Epoch 30/100
100/100 [==============================] - 37s 375ms/step - loss: 0.4899 - acc: 0.7603 - val_loss: 0.4915 - val_acc: 0.7582
Epoch 31/100
100/100 [==============================] - 37s 372ms/step - loss: 0.4912 - acc: 0.7591 - val_loss: 0.5314 - val_acc: 0.7294
Epoch 32/100
100/100 [==============================] - 38s 378ms/step - loss: 0.4923 - acc: 0.7669 - val_loss: 0.5189 - val_acc: 0.7416
Epoch 33/100
100/100 [==============================] - 36s 363ms/step - loss: 0.4819 - acc: 0.7578 - val_loss: 0.4934 - val_acc: 0.7418
Epoch 34/100
100/100 [==============================] - 36s 363ms/step - loss: 0.4830 - acc: 0.7697 - val_loss: 0.5062 - val_acc: 0.7481
Epoch 35/100
100/100 [==============================] - 45s 445ms/step - loss: 0.4856 - acc: 0.7631 - val_loss: 0.4983 - val_acc: 0.7449
Epoch 36/100
100/100 [==============================] - 43s 434ms/step - loss: 0.4816 - acc: 0.7653 - val_loss: 0.5334 - val_acc: 0.7262
Epoch 37/100
100/100 [==============================] - 56s 563ms/step - loss: 0.4656 - acc: 0.7803 - val_loss: 0.5224 - val_acc: 0.7341
Epoch 38/100
100/100 [==============================] - 88s 878ms/step - loss: 0.4573 - acc: 0.7872 - val_loss: 0.5134 - val_acc: 0.7429
Epoch 39/100
100/100 [==============================] - 88s 881ms/step - loss: 0.4676 - acc: 0.7763 - val_loss: 0.4959 - val_acc: 0.7563
Epoch 40/100
100/100 [==============================] - 89s 888ms/step - loss: 0.4616 - acc: 0.7778 - val_loss: 0.5053 - val_acc: 0.7429
Epoch 41/100
100/100 [==============================] - 89s 887ms/step - loss: 0.4675 - acc: 0.7791 - val_loss: 0.4812 - val_acc: 0.7597
Epoch 42/100
100/100 [==============================] - 88s 885ms/step - loss: 0.4536 - acc: 0.7934 - val_loss: 0.5022 - val_acc: 0.7475
Epoch 43/100
100/100 [==============================] - 91s 907ms/step - loss: 0.4592 - acc: 0.7809 - val_loss: 0.4694 - val_acc: 0.7661
Epoch 44/100
100/100 [==============================] - 90s 901ms/step - loss: 0.4638 - acc: 0.7759 - val_loss: 0.4792 - val_acc: 0.7665
Epoch 45/100
100/100 [==============================] - 90s 902ms/step - loss: 0.4528 - acc: 0.7859 - val_loss: 0.4996 - val_acc: 0.7545
Epoch 46/100
100/100 [==============================] - 90s 900ms/step - loss: 0.4493 - acc: 0.7850 - val_loss: 0.4790 - val_acc: 0.7716
Epoch 47/100
100/100 [==============================] - 91s 911ms/step - loss: 0.4470 - acc: 0.7931 - val_loss: 0.4776 - val_acc: 0.7610
Epoch 48/100
100/100 [==============================] - 47s 468ms/step - loss: 0.4428 - acc: 0.7935 - val_loss: 0.5574 - val_acc: 0.7210
Epoch 49/100
100/100 [==============================] - 48s 482ms/step - loss: 0.4432 - acc: 0.7972 - val_loss: 0.4924 - val_acc: 0.7576
Epoch 50/100
100/100 [==============================] - 48s 482ms/step - loss: 0.4279 - acc: 0.7909 - val_loss: 0.4741 - val_acc: 0.7732
Epoch 51/100
100/100 [==============================] - 47s 471ms/step - loss: 0.4398 - acc: 0.7897 - val_loss: 0.4782 - val_acc: 0.7614
Epoch 52/100
100/100 [==============================] - 53s 533ms/step - loss: 0.4394 - acc: 0.7959 - val_loss: 0.4657 - val_acc: 0.7764
Epoch 53/100
100/100 [==============================] - 47s 466ms/step - loss: 0.4375 - acc: 0.7997 - val_loss: 0.5003 - val_acc: 0.7500
Epoch 54/100
100/100 [==============================] - 48s 475ms/step - loss: 0.4410 - acc: 0.7875 - val_loss: 0.4767 - val_acc: 0.7603
Epoch 55/100
100/100 [==============================] - 48s 481ms/step - loss: 0.4224 - acc: 0.8038 - val_loss: 0.4762 - val_acc: 0.7716
Epoch 56/100
100/100 [==============================] - 48s 476ms/step - loss: 0.4332 - acc: 0.7984 - val_loss: 0.5071 - val_acc: 0.7500
Epoch 57/100
100/100 [==============================] - 49s 486ms/step - loss: 0.4175 - acc: 0.8134 - val_loss: 0.4545 - val_acc: 0.7867
Epoch 58/100
100/100 [==============================] - 46s 462ms/step - loss: 0.4229 - acc: 0.8069 - val_loss: 0.4691 - val_acc: 0.7741
Epoch 59/100
100/100 [==============================] - 46s 462ms/step - loss: 0.4172 - acc: 0.8031 - val_loss: 0.4758 - val_acc: 0.7680
Epoch 60/100
100/100 [==============================] - 48s 476ms/step - loss: 0.4080 - acc: 0.8150 - val_loss: 0.4449 - val_acc: 0.7868
Epoch 61/100
100/100 [==============================] - 48s 484ms/step - loss: 0.4208 - acc: 0.8025 - val_loss: 0.4709 - val_acc: 0.7680
Epoch 62/100
100/100 [==============================] - 49s 489ms/step - loss: 0.4156 - acc: 0.8041 - val_loss: 0.4581 - val_acc: 0.7703
Epoch 63/100
100/100 [==============================] - 53s 530ms/step - loss: 0.4068 - acc: 0.8163 - val_loss: 0.5448 - val_acc: 0.7558
Epoch 64/100
100/100 [==============================] - 38s 382ms/step - loss: 0.4022 - acc: 0.8188 - val_loss: 0.4566 - val_acc: 0.7816
Epoch 65/100
100/100 [==============================] - 36s 360ms/step - loss: 0.4063 - acc: 0.8141 - val_loss: 0.4568 - val_acc: 0.7938
Epoch 66/100
100/100 [==============================] - 35s 354ms/step - loss: 0.4059 - acc: 0.8134 - val_loss: 0.4387 - val_acc: 0.7945
Epoch 67/100
100/100 [==============================] - 35s 355ms/step - loss: 0.3977 - acc: 0.8178 - val_loss: 0.4869 - val_acc: 0.7614
Epoch 68/100
100/100 [==============================] - 36s 357ms/step - loss: 0.4115 - acc: 0.8187 - val_loss: 0.5090 - val_acc: 0.7558
Epoch 69/100
100/100 [==============================] - 39s 388ms/step - loss: 0.3755 - acc: 0.8281 - val_loss: 0.4792 - val_acc: 0.7824
Epoch 70/100
100/100 [==============================] - 40s 401ms/step - loss: 0.4002 - acc: 0.8184 - val_loss: 0.4385 - val_acc: 0.7938
Epoch 71/100
100/100 [==============================] - 36s 357ms/step - loss: 0.3984 - acc: 0.8219 - val_loss: 0.4594 - val_acc: 0.7843
Epoch 72/100
100/100 [==============================] - 36s 355ms/step - loss: 0.3841 - acc: 0.8284 - val_loss: 0.5078 - val_acc: 0.7494
Epoch 73/100
100/100 [==============================] - 36s 356ms/step - loss: 0.3770 - acc: 0.8372 - val_loss: 0.4639 - val_acc: 0.7790
Epoch 74/100
100/100 [==============================] - 36s 357ms/step - loss: 0.3894 - acc: 0.8247 - val_loss: 0.4606 - val_acc: 0.7931
Epoch 75/100
100/100 [==============================] - 35s 353ms/step - loss: 0.3896 - acc: 0.8197 - val_loss: 0.4695 - val_acc: 0.7764
Epoch 76/100
100/100 [==============================] - 36s 356ms/step - loss: 0.3794 - acc: 0.8266 - val_loss: 0.4719 - val_acc: 0.7735
Epoch 77/100
100/100 [==============================] - 36s 355ms/step - loss: 0.3775 - acc: 0.8372 - val_loss: 0.4227 - val_acc: 0.8054
Epoch 78/100
100/100 [==============================] - 36s 360ms/step - loss: 0.3812 - acc: 0.8281 - val_loss: 0.4454 - val_acc: 0.7912
Epoch 79/100
100/100 [==============================] - 36s 357ms/step - loss: 0.3913 - acc: 0.8285 - val_loss: 0.5289 - val_acc: 0.7597
Epoch 80/100
100/100 [==============================] - 36s 355ms/step - loss: 0.3792 - acc: 0.8281 - val_loss: 0.4655 - val_acc: 0.7803
Epoch 81/100
100/100 [==============================] - 36s 356ms/step - loss: 0.3665 - acc: 0.8419 - val_loss: 0.4612 - val_acc: 0.7773
Epoch 82/100
100/100 [==============================] - 35s 354ms/step - loss: 0.3627 - acc: 0.8406 - val_loss: 0.5078 - val_acc: 0.7655
Epoch 83/100
100/100 [==============================] - 40s 400ms/step - loss: 0.3623 - acc: 0.8369 - val_loss: 0.4386 - val_acc: 0.7912
Epoch 84/100
100/100 [==============================] - 36s 357ms/step - loss: 0.3732 - acc: 0.8312 - val_loss: 0.4254 - val_acc: 0.8073
Epoch 85/100
100/100 [==============================] - 36s 363ms/step - loss: 0.3633 - acc: 0.8341 - val_loss: 0.4641 - val_acc: 0.7995
Epoch 86/100
100/100 [==============================] - 36s 357ms/step - loss: 0.3596 - acc: 0.8419 - val_loss: 0.4499 - val_acc: 0.7848
Epoch 87/100
100/100 [==============================] - 35s 355ms/step - loss: 0.3594 - acc: 0.8363 - val_loss: 0.4292 - val_acc: 0.8001
Epoch 88/100
100/100 [==============================] - 37s 366ms/step - loss: 0.3527 - acc: 0.8400 - val_loss: 0.4391 - val_acc: 0.7996
Epoch 89/100
100/100 [==============================] - 39s 386ms/step - loss: 0.3600 - acc: 0.8384 - val_loss: 0.4372 - val_acc: 0.7964
Epoch 90/100
100/100 [==============================] - 36s 357ms/step - loss: 0.3479 - acc: 0.8475 - val_loss: 0.4259 - val_acc: 0.8071
Epoch 91/100
100/100 [==============================] - 36s 356ms/step - loss: 0.3481 - acc: 0.8456 - val_loss: 0.4983 - val_acc: 0.7726
Epoch 92/100
100/100 [==============================] - 36s 356ms/step - loss: 0.3401 - acc: 0.8512 - val_loss: 0.5484 - val_acc: 0.7608
Epoch 93/100
100/100 [==============================] - 36s 356ms/step - loss: 0.3506 - acc: 0.8475 - val_loss: 0.4425 - val_acc: 0.7938
Epoch 94/100
100/100 [==============================] - 36s 356ms/step - loss: 0.3597 - acc: 0.8419 - val_loss: 0.4433 - val_acc: 0.8109
Epoch 95/100
100/100 [==============================] - 36s 358ms/step - loss: 0.3314 - acc: 0.8544 - val_loss: 0.4595 - val_acc: 0.8015
Epoch 96/100
100/100 [==============================] - 36s 357ms/step - loss: 0.3408 - acc: 0.8516 - val_loss: 0.4593 - val_acc: 0.7945
Epoch 97/100
100/100 [==============================] - 36s 356ms/step - loss: 0.3464 - acc: 0.8431 - val_loss: 0.4328 - val_acc: 0.7912
Epoch 98/100
100/100 [==============================] - 35s 355ms/step - loss: 0.3365 - acc: 0.8528 - val_loss: 0.4862 - val_acc: 0.7693
Epoch 99/100
100/100 [==============================] - 35s 354ms/step - loss: 0.3238 - acc: 0.8600 - val_loss: 0.5174 - val_acc: 0.7728
Epoch 100/100
100/100 [==============================] - 36s 357ms/step - loss: 0.3383 - acc: 0.8484 - val_loss: 0.4222 - val_acc: 0.8112
---------------------------------------------------------------------------
KeyError                                  Traceback (most recent call last)
c:\work\demo-code\python\books\DLWP\5.2-cat-vs-dog.py in <module>
     63 
     64 loss = history.history['loss']
---> 65 acc = history.history['binary_accuracy']
     66 val_acc = history.history['val_binary_accuracy']
     67 val_loss = history.history['val_loss']

KeyError: 'binary_accuracy'

In [24]:
import os, shutil
# The path to the directory where the original
# dataset was uncompressed
original_dataset_dir = r"C:\Users\huxiaomi\Downloads\deep-learning\data\kaggle-dogs-vs-cats\train"

# The directory where we will
# store our smaller dataset
base_dir = r"C:\Users\huxiaomi\Downloads\deep-learning\data\kaggle-dogs-vs-cats\book"
os.mkdir(base_dir)

# Directories for our training,
# validation and test splits
train_dir = os.path.join(base_dir, 'train')
os.mkdir(train_dir)
validation_dir = os.path.join(base_dir, 'validation')
os.mkdir(validation_dir)
test_dir = os.path.join(base_dir, 'test')
os.mkdir(test_dir)

# Directory with our training cat pictures
train_cats_dir = os.path.join(train_dir, 'cats')
os.mkdir(train_cats_dir)

# Directory with our training dog pictures
train_dogs_dir = os.path.join(train_dir, 'dogs')
os.mkdir(train_dogs_dir)

# Directory with our validation cat pictures
validation_cats_dir = os.path.join(validation_dir, 'cats')
os.mkdir(validation_cats_dir)

# Directory with our validation dog pictures
validation_dogs_dir = os.path.join(validation_dir, 'dogs')
os.mkdir(validation_dogs_dir)

# Directory with our validation cat pictures
test_cats_dir = os.path.join(test_dir, 'cats')
os.mkdir(test_cats_dir)

# Directory with our validation dog pictures
test_dogs_dir = os.path.join(test_dir, 'dogs')
os.mkdir(test_dogs_dir)

# Copy first 1000 cat images to train_cats_dir
fnames = ['cat.{}.jpg'.format(i) for i in range(1000)]
for fname in fnames:
    src = os.path.join(original_dataset_dir, fname)
    dst = os.path.join(train_cats_dir, fname)
    shutil.copyfile(src, dst)

# Copy next 500 cat images to validation_cats_dir
fnames = ['cat.{}.jpg'.format(i) for i in range(1000, 1500)]
for fname in fnames:
    src = os.path.join(original_dataset_dir, fname)
    dst = os.path.join(validation_cats_dir, fname)
    shutil.copyfile(src, dst)
    
# Copy next 500 cat images to test_cats_dir
fnames = ['cat.{}.jpg'.format(i) for i in range(1500, 2000)]
for fname in fnames:
    src = os.path.join(original_dataset_dir, fname)
    dst = os.path.join(test_cats_dir, fname)
    shutil.copyfile(src, dst)
    
# Copy first 1000 dog images to train_dogs_dir
fnames = ['dog.{}.jpg'.format(i) for i in range(1000)]
for fname in fnames:
    src = os.path.join(original_dataset_dir, fname)
    dst = os.path.join(train_dogs_dir, fname)
    shutil.copyfile(src, dst)
    
# Copy next 500 dog images to validation_dogs_dir
fnames = ['dog.{}.jpg'.format(i) for i in range(1000, 1500)]
for fname in fnames:
    src = os.path.join(original_dataset_dir, fname)
    dst = os.path.join(validation_dogs_dir, fname)
    shutil.copyfile(src, dst)
    
# Copy next 500 dog images to test_dogs_dir
fnames = ['dog.{}.jpg'.format(i) for i in range(1500, 2000)]
for fname in fnames:
    src = os.path.join(original_dataset_dir, fname)
    dst = os.path.join(test_dogs_dir, fname)
    shutil.copyfile(src, dst)

In [28]:
from keras import layers
from keras import models
from keras import optimizers
from keras.preprocessing.image import ImageDataGenerator
import matplotlib.pyplot as plt

model = models.Sequential()
model.add(layers.Conv2D(32, (3, 3), activation='relu', input_shape=(150, 150, 3)))
model.add(layers.MaxPooling2D((2, 2)))
model.add(layers.Conv2D(64, (3, 3), activation='relu'))
model.add(layers.MaxPooling2D((2, 2)))
model.add(layers.Conv2D(128, (3, 3), activation='relu'))
model.add(layers.MaxPooling2D((2, 2)))
model.add(layers.Conv2D(128, (3, 3), activation='relu'))
model.add(layers.MaxPooling2D((2, 2)))
model.add(layers.Flatten())
model.add(layers.Dropout(0.5))
model.add(layers.Dense(512, activation='relu'))
model.add(layers.Dense(1, activation='sigmoid'))

model.compile(loss='binary_crossentropy', optimizer=optimizers.RMSprop(lr=1e-4),metrics=['acc'])

In [35]:
train_datagen = ImageDataGenerator(
    rescale=1./255,
    rotation_range=40,
    width_shift_range=0.2,
    height_shift_range=0.2,
    shear_range=0.2,
    zoom_range=0.2,
    horizontal_flip=True,)

# Note that the validation data should not be augmented!
test_datagen = ImageDataGenerator(rescale=1./255)

train_generator = train_datagen.flow_from_directory(
        # This is the target directory
        train_dir,
        # All images will be resized to 150x150
        target_size=(150, 150),
        batch_size=32,
        # Since we use binary_crossentropy loss, we need binary labels
        class_mode='binary')

validation_generator = test_datagen.flow_from_directory(
        validation_dir,
        target_size=(150, 150),
        batch_size=32,
        class_mode='binary')

history = model.fit_generator(
      train_generator,
      steps_per_epoch=100,
      epochs=100,
      validation_data=validation_generator,
      validation_steps=50)


Found 2000 images belonging to 2 classes.
Found 1000 images belonging to 2 classes.
Epoch 1/100
100/100 [==============================] - 33s 334ms/step - loss: 0.3268 - acc: 0.8559 - val_loss: 0.3651 - val_acc: 0.8477
Epoch 2/100
100/100 [==============================] - 30s 298ms/step - loss: 0.3232 - acc: 0.8569 - val_loss: 0.4615 - val_acc: 0.8093
Epoch 3/100
100/100 [==============================] - 30s 298ms/step - loss: 0.3277 - acc: 0.8522 - val_loss: 0.4223 - val_acc: 0.8274
Epoch 4/100
100/100 [==============================] - 30s 301ms/step - loss: 0.3237 - acc: 0.8644 - val_loss: 0.4379 - val_acc: 0.8273
Epoch 5/100
  1/100 [..............................] - ETA: 11s - loss: 0.3496 - acc: 0.8125
---------------------------------------------------------------------------
KeyboardInterrupt                         Traceback (most recent call last)
c:\work\demo-code\python\books\DLWP\5.2-cat-vs-dog.py in <module>
     31       epochs=100,
     32       validation_data=validation_generator,
---> 33       validation_steps=50)

~\AppData\Local\Continuum\anaconda3\envs\tf_gpu\lib\site-packages\keras\legacy\interfaces.py in wrapper(*args, **kwargs)
     89                 warnings.warn('Update your `' + object_name + '` call to the ' +
     90                               'Keras 2 API: ' + signature, stacklevel=2)
---> 91             return func(*args, **kwargs)
     92         wrapper._original_function = func
     93         return wrapper

~\AppData\Local\Continuum\anaconda3\envs\tf_gpu\lib\site-packages\keras\engine\training.py in fit_generator(self, generator, steps_per_epoch, epochs, verbose, callbacks, validation_data, validation_steps, class_weight, max_queue_size, workers, use_multiprocessing, shuffle, initial_epoch)
   1416             use_multiprocessing=use_multiprocessing,
   1417             shuffle=shuffle,
-> 1418             initial_epoch=initial_epoch)
   1419 
   1420     @interfaces.legacy_generator_methods_support

~\AppData\Local\Continuum\anaconda3\envs\tf_gpu\lib\site-packages\keras\engine\training_generator.py in fit_generator(model, generator, steps_per_epoch, epochs, verbose, callbacks, validation_data, validation_steps, class_weight, max_queue_size, workers, use_multiprocessing, shuffle, initial_epoch)
    215                 outs = model.train_on_batch(x, y,
    216                                             sample_weight=sample_weight,
--> 217                                             class_weight=class_weight)
    218 
    219                 outs = to_list(outs)

~\AppData\Local\Continuum\anaconda3\envs\tf_gpu\lib\site-packages\keras\engine\training.py in train_on_batch(self, x, y, sample_weight, class_weight)
   1215             ins = x + y + sample_weights
   1216         self._make_train_function()
-> 1217         outputs = self.train_function(ins)
   1218         return unpack_singleton(outputs)
   1219 

~\AppData\Local\Continuum\anaconda3\envs\tf_gpu\lib\site-packages\keras\backend\tensorflow_backend.py in __call__(self, inputs)
   2713                 return self._legacy_call(inputs)
   2714 
-> 2715             return self._call(inputs)
   2716         else:
   2717             if py_any(is_tensor(x) for x in inputs):

~\AppData\Local\Continuum\anaconda3\envs\tf_gpu\lib\site-packages\keras\backend\tensorflow_backend.py in _call(self, inputs)
   2673             fetched = self._callable_fn(*array_vals, run_metadata=self.run_metadata)
   2674         else:
-> 2675             fetched = self._callable_fn(*array_vals)
   2676         return fetched[:len(self.outputs)]
   2677 

~\AppData\Local\Continuum\anaconda3\envs\tf_gpu\lib\site-packages\tensorflow\python\client\session.py in __call__(self, *args, **kwargs)
   1437           ret = tf_session.TF_SessionRunCallable(
   1438               self._session._session, self._handle, args, status,
-> 1439               run_metadata_ptr)
   1440         if run_metadata:
   1441           proto_data = tf_session.TF_GetBuffer(run_metadata_ptr)

KeyboardInterrupt: 

In [34]:
model.summary()


_________________________________________________________________
Layer (type)                 Output Shape              Param #   
=================================================================
conv2d_33 (Conv2D)           (None, 148, 148, 32)      896       
_________________________________________________________________
max_pooling2d_27 (MaxPooling (None, 74, 74, 32)        0         
_________________________________________________________________
conv2d_34 (Conv2D)           (None, 72, 72, 64)        18496     
_________________________________________________________________
max_pooling2d_28 (MaxPooling (None, 36, 36, 64)        0         
_________________________________________________________________
conv2d_35 (Conv2D)           (None, 34, 34, 128)       73856     
_________________________________________________________________
max_pooling2d_29 (MaxPooling (None, 17, 17, 128)       0         
_________________________________________________________________
conv2d_36 (Conv2D)           (None, 15, 15, 128)       147584    
_________________________________________________________________
max_pooling2d_30 (MaxPooling (None, 7, 7, 128)         0         
_________________________________________________________________
flatten_9 (Flatten)          (None, 6272)              0         
_________________________________________________________________
dropout_7 (Dropout)          (None, 6272)              0         
_________________________________________________________________
dense_17 (Dense)             (None, 512)               3211776   
_________________________________________________________________
dense_18 (Dense)             (None, 1)                 513       
=================================================================
Total params: 3,453,121
Trainable params: 3,453,121
Non-trainable params: 0
_________________________________________________________________

In [36]:
%reset


Once deleted, variables cannot be recovered. Proceed (y/[n])? y

In [37]:
from keras.applications import VGG16

conv_base = VGG16(weights='imagenet', include_top=False,
                  input_shape=(150, 150, 3))

In [38]:
conv_base.summary()


_________________________________________________________________
Layer (type)                 Output Shape              Param #   
=================================================================
input_1 (InputLayer)         (None, 150, 150, 3)       0         
_________________________________________________________________
block1_conv1 (Conv2D)        (None, 150, 150, 64)      1792      
_________________________________________________________________
block1_conv2 (Conv2D)        (None, 150, 150, 64)      36928     
_________________________________________________________________
block1_pool (MaxPooling2D)   (None, 75, 75, 64)        0         
_________________________________________________________________
block2_conv1 (Conv2D)        (None, 75, 75, 128)       73856     
_________________________________________________________________
block2_conv2 (Conv2D)        (None, 75, 75, 128)       147584    
_________________________________________________________________
block2_pool (MaxPooling2D)   (None, 37, 37, 128)       0         
_________________________________________________________________
block3_conv1 (Conv2D)        (None, 37, 37, 256)       295168    
_________________________________________________________________
block3_conv2 (Conv2D)        (None, 37, 37, 256)       590080    
_________________________________________________________________
block3_conv3 (Conv2D)        (None, 37, 37, 256)       590080    
_________________________________________________________________
block3_pool (MaxPooling2D)   (None, 18, 18, 256)       0         
_________________________________________________________________
block4_conv1 (Conv2D)        (None, 18, 18, 512)       1180160   
_________________________________________________________________
block4_conv2 (Conv2D)        (None, 18, 18, 512)       2359808   
_________________________________________________________________
block4_conv3 (Conv2D)        (None, 18, 18, 512)       2359808   
_________________________________________________________________
block4_pool (MaxPooling2D)   (None, 9, 9, 512)         0         
_________________________________________________________________
block5_conv1 (Conv2D)        (None, 9, 9, 512)         2359808   
_________________________________________________________________
block5_conv2 (Conv2D)        (None, 9, 9, 512)         2359808   
_________________________________________________________________
block5_conv3 (Conv2D)        (None, 9, 9, 512)         2359808   
_________________________________________________________________
block5_pool (MaxPooling2D)   (None, 4, 4, 512)         0         
=================================================================
Total params: 14,714,688
Trainable params: 14,714,688
Non-trainable params: 0
_________________________________________________________________

In [64]:
import os
import numpy as np
from keras.preprocessing.image import ImageDataGenerator
from keras.applications import VGG16
import time
conv_base = VGG16(weights='imagenet', include_top=False,
                  input_shape=(150, 150, 3))

base_dir = r"C:\Users\huxiaomi\Downloads\deep-learning\data\kaggle-dogs-vs-cats"
small_base_dir = os.path.join(base_dir, "small")
train_dir = os.path.join(small_base_dir, r"train")
cv_dir = os.path.join(small_base_dir, r"validation")
test_dir = os.path.join(small_base_dir, r"test")
batch_size = 20

imageGen = ImageDataGenerator(rescale=(1.0/255))


def extract_features(dir, sample_count):
    features = np.zeros(shape=(sample_count, 4, 4, 512))
    labels = np.zeros(shape=(sample_count,))

    generator = imageGen.flow_from_directory(dir, target_size=(
        150, 150), batch_size=batch_size, class_mode='binary')
    i = 0
    for inputs_batch, labels_batch in generator:
        start_time = time.time()
        feature = conv_base.predict(inputs_batch)
        elapsed_time = time.time() - start_time
        print(elapsed_time)
        features[i*batch_size:(i+1)*batch_size] = feature
        labels[i*batch_size:(i+1)*batch_size] = labels_batch
        i += 1
        if(i*batch_size >= sample_count):
            break

    return features, labels


train_feature, train_label = extract_features(train_dir, 2000)
validation_feature, validation_label = extract_features(cv_dir, 1000)
test_feature, test_label = extract_features(train_dir, 2000)


Found 2000 images belonging to 2 classes.
1.3397986888885498
1.0067832469940186
1.0321071147918701
0.9995181560516357
0.8646934032440186
0.8647334575653076
0.8597636222839355
0.869530200958252
0.8603055477142334
0.8611993789672852
0.8532729148864746
---------------------------------------------------------------------------
KeyboardInterrupt                         Traceback (most recent call last)
<ipython-input-64-bcc2e9cce4a1> in <module>
     38 
     39 
---> 40 train_feature, train_label = extract_features(train_dir, 2000)
     41 validation_feature, validation_label = extract_features(cv_dir, 1000)
     42 test_feature, test_label = extract_features(train_dir, 2000)

<ipython-input-64-bcc2e9cce4a1> in extract_features(dir, sample_count)
     26     for inputs_batch, labels_batch in generator:
     27         start_time = time.time()
---> 28         feature = conv_base.predict(inputs_batch)
     29         elapsed_time = time.time() - start_time
     30         print(elapsed_time)

~\AppData\Local\Continuum\anaconda3\envs\tf_gpu\lib\site-packages\keras\engine\training.py in predict(self, x, batch_size, verbose, steps)
   1167                                             batch_size=batch_size,
   1168                                             verbose=verbose,
-> 1169                                             steps=steps)
   1170 
   1171     def train_on_batch(self, x, y,

~\AppData\Local\Continuum\anaconda3\envs\tf_gpu\lib\site-packages\keras\engine\training_arrays.py in predict_loop(model, f, ins, batch_size, verbose, steps)
    292                 ins_batch[i] = ins_batch[i].toarray()
    293 
--> 294             batch_outs = f(ins_batch)
    295             batch_outs = to_list(batch_outs)
    296             if batch_index == 0:

~\AppData\Local\Continuum\anaconda3\envs\tf_gpu\lib\site-packages\keras\backend\tensorflow_backend.py in __call__(self, inputs)
   2713                 return self._legacy_call(inputs)
   2714 
-> 2715             return self._call(inputs)
   2716         else:
   2717             if py_any(is_tensor(x) for x in inputs):

~\AppData\Local\Continuum\anaconda3\envs\tf_gpu\lib\site-packages\keras\backend\tensorflow_backend.py in _call(self, inputs)
   2673             fetched = self._callable_fn(*array_vals, run_metadata=self.run_metadata)
   2674         else:
-> 2675             fetched = self._callable_fn(*array_vals)
   2676         return fetched[:len(self.outputs)]
   2677 

~\AppData\Local\Continuum\anaconda3\envs\tf_gpu\lib\site-packages\tensorflow\python\client\session.py in __call__(self, *args, **kwargs)
   1437           ret = tf_session.TF_SessionRunCallable(
   1438               self._session._session, self._handle, args, status,
-> 1439               run_metadata_ptr)
   1440         if run_metadata:
   1441           proto_data = tf_session.TF_GetBuffer(run_metadata_ptr)

KeyboardInterrupt: 

In [44]:
train_feature = train_feature.reshape(2000, -1)
validation_feature = validation_feature.reshape(1000, -1)
test_feature = test_feature.reshape(1000, -1)

In [48]:
from keras import models
from keras import layers
from keras import optimizers
from keras import losses
from keras import metrics

model = models.Sequential()
model.add(layers.Dense(512, activation='relu', input_dim=4*4*512))
model.add(layers.Dropout(0.5))
model.add(layers.Dense(1, activation='sigmoid'))

model.compile(optimizer=optimizers.RMSprop(lr=1e-4),
              loss=losses.binary_crossentropy, metrics=[metrics.binary_accuracy])

history = model.fit(train_feature, train_label, batch_size=20, epochs=30,
          validation_data=(validation_feature, validation_label))


Train on 2000 samples, validate on 1000 samples
Epoch 1/30
2000/2000 [==============================] - 2s 1ms/step - loss: 0.4398 - binary_accuracy: 0.7930 - val_loss: 0.2847 - val_binary_accuracy: 0.8760
Epoch 2/30
2000/2000 [==============================] - 1s 731us/step - loss: 0.2810 - binary_accuracy: 0.8805 - val_loss: 0.3072 - val_binary_accuracy: 0.8600
Epoch 3/30
2000/2000 [==============================] - 1s 721us/step - loss: 0.2140 - binary_accuracy: 0.9120 - val_loss: 0.2559 - val_binary_accuracy: 0.8980
Epoch 4/30
2000/2000 [==============================] - 1s 726us/step - loss: 0.1891 - binary_accuracy: 0.9210 - val_loss: 0.3842 - val_binary_accuracy: 0.8410
Epoch 5/30
2000/2000 [==============================] - 2s 764us/step - loss: 0.1609 - binary_accuracy: 0.9335 - val_loss: 0.2582 - val_binary_accuracy: 0.8990
Epoch 6/30
2000/2000 [==============================] - 1s 727us/step - loss: 0.1391 - binary_accuracy: 0.9425 - val_loss: 0.2347 - val_binary_accuracy: 0.9050
Epoch 7/30
2000/2000 [==============================] - 1s 723us/step - loss: 0.1172 - binary_accuracy: 0.9520 - val_loss: 0.2819 - val_binary_accuracy: 0.8920
Epoch 8/30
2000/2000 [==============================] - 1s 721us/step - loss: 0.1044 - binary_accuracy: 0.9565 - val_loss: 0.2903 - val_binary_accuracy: 0.8870
Epoch 9/30
2000/2000 [==============================] - 1s 728us/step - loss: 0.0806 - binary_accuracy: 0.9710 - val_loss: 0.2934 - val_binary_accuracy: 0.8940
Epoch 10/30
2000/2000 [==============================] - 1s 732us/step - loss: 0.0729 - binary_accuracy: 0.9755 - val_loss: 0.2971 - val_binary_accuracy: 0.8960
Epoch 11/30
2000/2000 [==============================] - 1s 731us/step - loss: 0.0669 - binary_accuracy: 0.9755 - val_loss: 0.2842 - val_binary_accuracy: 0.9040
Epoch 12/30
2000/2000 [==============================] - 1s 731us/step - loss: 0.0565 - binary_accuracy: 0.9775 - val_loss: 0.3138 - val_binary_accuracy: 0.8970
Epoch 13/30
2000/2000 [==============================] - 1s 737us/step - loss: 0.0510 - binary_accuracy: 0.9850 - val_loss: 0.3231 - val_binary_accuracy: 0.8960
Epoch 14/30
2000/2000 [==============================] - 1s 735us/step - loss: 0.0367 - binary_accuracy: 0.9870 - val_loss: 0.3095 - val_binary_accuracy: 0.8980
Epoch 15/30
2000/2000 [==============================] - 1s 743us/step - loss: 0.0345 - binary_accuracy: 0.9915 - val_loss: 0.3524 - val_binary_accuracy: 0.8990
Epoch 16/30
2000/2000 [==============================] - 1s 732us/step - loss: 0.0302 - binary_accuracy: 0.9905 - val_loss: 0.3489 - val_binary_accuracy: 0.8960
Epoch 17/30
2000/2000 [==============================] - 1s 727us/step - loss: 0.0219 - binary_accuracy: 0.9950 - val_loss: 0.3847 - val_binary_accuracy: 0.8980
Epoch 18/30
2000/2000 [==============================] - 1s 734us/step - loss: 0.0271 - binary_accuracy: 0.9930 - val_loss: 0.3612 - val_binary_accuracy: 0.9030
Epoch 19/30
2000/2000 [==============================] - 2s 754us/step - loss: 0.0203 - binary_accuracy: 0.9950 - val_loss: 0.3728 - val_binary_accuracy: 0.8970
Epoch 20/30
2000/2000 [==============================] - 2s 810us/step - loss: 0.0141 - binary_accuracy: 0.9980 - val_loss: 0.3702 - val_binary_accuracy: 0.9000
Epoch 21/30
2000/2000 [==============================] - 2s 776us/step - loss: 0.0148 - binary_accuracy: 0.9965 - val_loss: 0.3705 - val_binary_accuracy: 0.9020
Epoch 22/30
2000/2000 [==============================] - 2s 759us/step - loss: 0.0141 - binary_accuracy: 0.9955 - val_loss: 0.4450 - val_binary_accuracy: 0.8820
Epoch 23/30
2000/2000 [==============================] - 2s 797us/step - loss: 0.0107 - binary_accuracy: 0.9985 - val_loss: 0.3981 - val_binary_accuracy: 0.9010
Epoch 24/30
2000/2000 [==============================] - 2s 750us/step - loss: 0.0090 - binary_accuracy: 0.9990 - val_loss: 0.4030 - val_binary_accuracy: 0.8930
Epoch 25/30
2000/2000 [==============================] - 1s 750us/step - loss: 0.0076 - binary_accuracy: 0.9990 - val_loss: 0.4105 - val_binary_accuracy: 0.9040
Epoch 26/30
2000/2000 [==============================] - 2s 758us/step - loss: 0.0059 - binary_accuracy: 0.9995 - val_loss: 0.4580 - val_binary_accuracy: 0.8990
Epoch 27/30
2000/2000 [==============================] - 2s 756us/step - loss: 0.0066 - binary_accuracy: 1.0000 - val_loss: 0.4775 - val_binary_accuracy: 0.8930
Epoch 28/30
2000/2000 [==============================] - 2s 813us/step - loss: 0.0047 - binary_accuracy: 0.9995 - val_loss: 0.4690 - val_binary_accuracy: 0.8990
Epoch 29/30
2000/2000 [==============================] - 2s 788us/step - loss: 0.0037 - binary_accuracy: 0.9995 - val_loss: 0.4722 - val_binary_accuracy: 0.9040
Epoch 30/30
2000/2000 [==============================] - 1s 747us/step - loss: 0.0034 - binary_accuracy: 1.0000 - val_loss: 0.4993 - val_binary_accuracy: 0.8930

In [52]:
import matplotlib.pyplot as plt

loss = history.history['loss']
acc = history.history['binary_accuracy']
val_acc = history.history['val_binary_accuracy']
val_loss = history.history['val_loss']

epochs = range(1, len(acc) + 1)
plt.plot(epochs, acc, 'bo', label='Training acc')
plt.plot(epochs, val_acc, 'b', label='Val acc')
plt.title('Accuracy')
plt.legend()

plt.figure()
plt.plot(epochs, loss, 'bo', label='Training loss')
plt.plot(epochs, val_loss, 'b', label='Val loss')
plt.title('Losses')
plt.legend()

plt.show()



In [53]:
%reset


Once deleted, variables cannot be recovered. Proceed (y/[n])? y

In [58]:
import os
import numpy as np
from keras.preprocessing.image import ImageDataGenerator
from keras.applications import VGG16
from keras import models
from keras import layers
from keras import optimizers
from keras import losses
from keras import metrics

conv_base = VGG16(weights='imagenet', include_top=False,
                  input_shape=(150, 150, 3))

base_dir = r"C:\Users\huxiaomi\Downloads\deep-learning\data\kaggle-dogs-vs-cats"
small_base_dir = os.path.join(base_dir, "small")
train_dir = os.path.join(small_base_dir, r"train")
cv_dir = os.path.join(small_base_dir, r"validation")
test_dir = os.path.join(small_base_dir, r"test")
batch_size = 20

train_datagen = ImageDataGenerator(rescale=1.0 / 255, rotation_range=40, width_shift_range=0.2,
                                   height_shift_range=0.2, shear_range=0.2,
                                   zoom_range=0.2, horizontal_flip=True)
test_datagen = ImageDataGenerator(rescale=1.0 / 255)

conv_base = VGG16(weights='imagenet', include_top=False,
                  input_shape=(150, 150, 3))

model = models.Sequential()
model.add(conv_base)
model.add(layers.Flatten())
model.add(layers.Dense(256, activation='relu', input_dim=4*4*512))
model.add(layers.Dense(1, activation='sigmoid'))
conv_base.trainable = False
model.compile(optimizer=optimizers.RMSprop(lr=1e-5),
              loss=losses.binary_crossentropy, metrics=[metrics.binary_accuracy])

In [63]:
model.summary()
conv_base.trainable = True
len(model.trainable_weights)


_________________________________________________________________
Layer (type)                 Output Shape              Param #   
=================================================================
vgg16 (Model)                (None, 4, 4, 512)         14714688  
_________________________________________________________________
flatten_11 (Flatten)         (None, 8192)              0         
_________________________________________________________________
dense_27 (Dense)             (None, 256)               2097408   
_________________________________________________________________
dense_28 (Dense)             (None, 1)                 257       
=================================================================
Total params: 16,812,353
Trainable params: 2,097,665
Non-trainable params: 14,714,688
_________________________________________________________________
Out[63]:
30

In [60]:
train_generator = train_datagen.flow_from_directory(
    train_dir, target_size=(150, 150), batch_size=batch_size, class_mode='binary')
validation_generator = train_datagen.flow_from_directory(
    cv_dir, target_size=(150, 150), batch_size=batch_size, class_mode='binary')

history = model.fit_generator(train_generator, epochs=100, steps_per_epoch=100,
                              validation_data=validation_generator, validation_steps=50)


Found 2000 images belonging to 2 classes.
Found 1000 images belonging to 2 classes.
Epoch 1/100
100/100 [==============================] - 113s 1s/step - loss: 0.6366 - binary_accuracy: 0.6370 - val_loss: 0.5882 - val_binary_accuracy: 0.7090
Epoch 2/100
100/100 [==============================] - 112s 1s/step - loss: 0.5459 - binary_accuracy: 0.7520 - val_loss: 0.5248 - val_binary_accuracy: 0.7450
Epoch 3/100
100/100 [==============================] - 111s 1s/step - loss: 0.5037 - binary_accuracy: 0.7725 - val_loss: 0.4911 - val_binary_accuracy: 0.7700
Epoch 4/100
 28/100 [=======>......................] - ETA: 56s - loss: 0.4488 - binary_accuracy: 0.8179
---------------------------------------------------------------------------
KeyboardInterrupt                         Traceback (most recent call last)
<ipython-input-60-f06b2db78565> in <module>
      5 
      6 history = model.fit_generator(train_generator, epochs=100, steps_per_epoch=100,
----> 7                               validation_data=validation_generator, validation_steps=50)

~\AppData\Local\Continuum\anaconda3\envs\tf_gpu\lib\site-packages\keras\legacy\interfaces.py in wrapper(*args, **kwargs)
     89                 warnings.warn('Update your `' + object_name + '` call to the ' +
     90                               'Keras 2 API: ' + signature, stacklevel=2)
---> 91             return func(*args, **kwargs)
     92         wrapper._original_function = func
     93         return wrapper

~\AppData\Local\Continuum\anaconda3\envs\tf_gpu\lib\site-packages\keras\engine\training.py in fit_generator(self, generator, steps_per_epoch, epochs, verbose, callbacks, validation_data, validation_steps, class_weight, max_queue_size, workers, use_multiprocessing, shuffle, initial_epoch)
   1416             use_multiprocessing=use_multiprocessing,
   1417             shuffle=shuffle,
-> 1418             initial_epoch=initial_epoch)
   1419 
   1420     @interfaces.legacy_generator_methods_support

~\AppData\Local\Continuum\anaconda3\envs\tf_gpu\lib\site-packages\keras\engine\training_generator.py in fit_generator(model, generator, steps_per_epoch, epochs, verbose, callbacks, validation_data, validation_steps, class_weight, max_queue_size, workers, use_multiprocessing, shuffle, initial_epoch)
    215                 outs = model.train_on_batch(x, y,
    216                                             sample_weight=sample_weight,
--> 217                                             class_weight=class_weight)
    218 
    219                 outs = to_list(outs)

~\AppData\Local\Continuum\anaconda3\envs\tf_gpu\lib\site-packages\keras\engine\training.py in train_on_batch(self, x, y, sample_weight, class_weight)
   1215             ins = x + y + sample_weights
   1216         self._make_train_function()
-> 1217         outputs = self.train_function(ins)
   1218         return unpack_singleton(outputs)
   1219 

~\AppData\Local\Continuum\anaconda3\envs\tf_gpu\lib\site-packages\keras\backend\tensorflow_backend.py in __call__(self, inputs)
   2713                 return self._legacy_call(inputs)
   2714 
-> 2715             return self._call(inputs)
   2716         else:
   2717             if py_any(is_tensor(x) for x in inputs):

~\AppData\Local\Continuum\anaconda3\envs\tf_gpu\lib\site-packages\keras\backend\tensorflow_backend.py in _call(self, inputs)
   2673             fetched = self._callable_fn(*array_vals, run_metadata=self.run_metadata)
   2674         else:
-> 2675             fetched = self._callable_fn(*array_vals)
   2676         return fetched[:len(self.outputs)]
   2677 

~\AppData\Local\Continuum\anaconda3\envs\tf_gpu\lib\site-packages\tensorflow\python\client\session.py in __call__(self, *args, **kwargs)
   1437           ret = tf_session.TF_SessionRunCallable(
   1438               self._session._session, self._handle, args, status,
-> 1439               run_metadata_ptr)
   1440         if run_metadata:
   1441           proto_data = tf_session.TF_GetBuffer(run_metadata_ptr)

KeyboardInterrupt: 

In [57]:
%reset


Once deleted, variables cannot be recovered. Proceed (y/[n])? y

In [ ]: