[2]
%load_ext watermark
%watermark -a "Romell D.Z." -u -d -p numpy,pandas,matplotlib,keras
The watermark extension is already loaded. To reload it, use: %reload_ext watermark Romell D.Z. last updated: 2019-02-22 numpy 1.16.1 pandas 0.23.4 matplotlib 2.2.2 keras 2.2.4

7. Transfer Learning

[ ]
%matplotlib inline
%config InlineBackend.figure_format = 'retina'
import matplotlib.pyplot as plt
plt.rcParams['figure.figsize'] = (18,6)

Load Dataset from KAGGLE DATA BASE

[ ]
import os
import glob
import zipfile
import functools

# Upload the API token.
def get_kaggle_credentials():
    token_dir = os.path.join(os.path.expanduser("~"),".kaggle")
    token_file = os.path.join(token_dir, "kaggle.json")
    if not os.path.isdir(token_dir):
        os.mkdir(token_dir)
    try:
        with open(token_file,'r') as f:
            pass
    except IOError as no_file:
        try:
            from google.colab import files
        except ImportError:
            raise no_file
        uploaded = files.upload()

        if "kaggle.json" not in uploaded:
            raise ValueError("You need an API key! see: "
                           "https://github.com/Kaggle/kaggle-api#api-credentials")
        with open(token_file, "wb") as f:
            f.write(uploaded["kaggle.json"])
        os.chmod(token_file, 600)

get_kaggle_credentials()

import kaggle

# Download data from Kaggle and unzip the files of interest. 
def load_data_from_zip(competition, file):
    with zipfile.ZipFile(os.path.join(competition, file), "r") as zip_ref:
        unzipped_file = zip_ref.namelist()[0]
        zip_ref.extractall(competition)

def load_data_from_7z(competition, file):
    os.system( '7z x %s ' % os.path.join(competition, file) )

def get_data(competition):
    kaggle.api.competition_download_files(competition, competition)
    load_data_from_zip(competition, 'train.zip')
    load_data_from_zip(competition, 'train.zip')
Upload widget is only available when the cell has been executed in the current browser session. Please rerun this cell to enable.
Saving kaggle.json to kaggle.json
[ ]
competition = 'dogs-vs-cats'
get_data(competition)

LOAD ALL PICS DOG & CATS FROM ANIMALS TRAIN FOLDER

[ ]
import glob
import numpy as np
import os
import shutil
np.random.seed(42)
[ ]
files = glob.glob('%s/train/*'%competition)

cat_files = [fn for fn in files if 'cat' in fn]
dog_files = [fn for fn in files if 'dog' in fn]
len(cat_files), len(dog_files)
(25000, 25000)

select 1500 pics for train sample, 500 validation sample, 500 for test sample

[ ]
cat_train = np.random.choice(cat_files, size=1500, replace=False)
dog_train = np.random.choice(dog_files, size=1500, replace=False)
cat_files = list(set(cat_files) - set(cat_train))
dog_files = list(set(dog_files) - set(dog_train))

cat_val = np.random.choice(cat_files, size=500, replace=False)
dog_val = np.random.choice(dog_files, size=500, replace=False)
cat_files = list(set(cat_files) - set(cat_val))
dog_files = list(set(dog_files) - set(dog_val))

cat_test = np.random.choice(cat_files, size=500, replace=False)
dog_test = np.random.choice(dog_files, size=500, replace=False)

print('Cat datasets:', cat_train.shape, cat_val.shape, cat_test.shape)
print('Dog datasets:', dog_train.shape, dog_val.shape, dog_test.shape)
Cat datasets: (1500,) (500,) (500,) Dog datasets: (1500,) (500,) (500,)

puts this samples into news folders

[ ]
train_dir = 'training_data'
val_dir = 'validation_data'
test_dir = 'test_data'

train_files = np.concatenate([cat_train, dog_train])
validate_files = np.concatenate([cat_val, dog_val])
test_files = np.concatenate([cat_test, dog_test])

os.mkdir(train_dir) if not os.path.isdir(train_dir) else None
os.mkdir(val_dir) if not os.path.isdir(val_dir) else None
os.mkdir(test_dir) if not os.path.isdir(test_dir) else None

for fn in train_files:
    shutil.copy(fn, train_dir)

for fn in validate_files:
    shutil.copy(fn, val_dir)
    
for fn in test_files:
    shutil.copy(fn, test_dir)
[ ]
import glob
import numpy as np
import matplotlib.pyplot as plt
from keras.preprocessing.image import ImageDataGenerator, load_img, img_to_array, array_to_img
%matplotlib inline
Using TensorFlow backend.
[ ]
import os
os.sep
'/'

Load each folder on RAM MEMORY

[ ]
IMG_DIM = (150, 150)

train_files = glob.glob('training_data/*')
train_imgs = [img_to_array(load_img(img, target_size=IMG_DIM)) for img in train_files]
train_imgs = np.array(train_imgs)
train_labels = [fn.split(os.sep)[1].split('.')[0].strip() for fn in train_files]

validation_files = glob.glob('validation_data/*')
validation_imgs = [img_to_array(load_img(img, target_size=IMG_DIM)) for img in validation_files]
validation_imgs = np.array(validation_imgs)
validation_labels = [fn.split(os.sep)[1].split('.')[0].strip() for fn in validation_files]

print('Train dataset shape:', train_imgs.shape, 
      '\tValidation dataset shape:', validation_imgs.shape)
Train dataset shape: (2905, 150, 150, 3) Validation dataset shape: (989, 150, 150, 3)

Allways need scale(normalization): generating a better performance

[ ]
train_imgs_scaled = train_imgs.astype('float32')
validation_imgs_scaled  = validation_imgs.astype('float32')
train_imgs_scaled /= 255
validation_imgs_scaled /= 255

print(train_imgs[0].shape)
array_to_img(train_imgs[0])
(150, 150, 3)

Encoding LABEL prectitions using One Hot Encoding

[ ]
batch_size = 30
num_classes = 2
epochs = 30
input_shape = (150, 150, 3)

# encode text category labels
from sklearn.preprocessing import LabelEncoder

le = LabelEncoder()
le.fit(train_labels)
train_labels_enc = le.transform(train_labels)
validation_labels_enc = le.transform(validation_labels)

print(train_labels[1495:1505], train_labels_enc[1495:1505])
['dog', 'cat', 'cat', 'dog', 'dog', 'dog', 'cat', 'cat', 'dog', 'cat'] [1 0 0 1 1 1 0 0 1 0]

Build Model 1 using Convolution Layers

[ ]
from keras.layers import Conv2D, MaxPooling2D, Flatten, Dense, Dropout
from keras.models import Sequential
from keras import optimizers

model = Sequential()

model.add(Conv2D(16, kernel_size=(3, 3), activation='relu', 
                 input_shape=input_shape))
model.add(MaxPooling2D(pool_size=(2, 2)))

model.add(Conv2D(64, kernel_size=(3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))

model.add(Conv2D(128, kernel_size=(3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))

model.add(Flatten())
model.add(Dense(512, activation='relu'))
model.add(Dense(1, activation='sigmoid'))


model.compile(loss='binary_crossentropy',
              optimizer=optimizers.RMSprop(),
              metrics=['accuracy'])

model.summary()
WARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/tensorflow/python/framework/op_def_library.py:263: colocate_with (from tensorflow.python.framework.ops) is deprecated and will be removed in a future version. Instructions for updating: Colocations handled automatically by placer. _________________________________________________________________ Layer (type) Output Shape Param # ================================================================= conv2d_1 (Conv2D) (None, 148, 148, 16) 448 _________________________________________________________________ max_pooling2d_1 (MaxPooling2 (None, 74, 74, 16) 0 _________________________________________________________________ conv2d_2 (Conv2D) (None, 72, 72, 64) 9280 _________________________________________________________________ max_pooling2d_2 (MaxPooling2 (None, 36, 36, 64) 0 _________________________________________________________________ conv2d_3 (Conv2D) (None, 34, 34, 128) 73856 _________________________________________________________________ max_pooling2d_3 (MaxPooling2 (None, 17, 17, 128) 0 _________________________________________________________________ flatten_1 (Flatten) (None, 36992) 0 _________________________________________________________________ dense_1 (Dense) (None, 512) 18940416 _________________________________________________________________ dense_2 (Dense) (None, 1) 513 ================================================================= Total params: 19,024,513 Trainable params: 19,024,513 Non-trainable params: 0 _________________________________________________________________
[ ]
history = model.fit(x=train_imgs_scaled, y=train_labels_enc,
                    validation_data=(validation_imgs_scaled, validation_labels_enc),
                    batch_size=batch_size,
                    epochs=epochs,
                    verbose=1)
WARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/tensorflow/python/ops/math_ops.py:3066: to_int32 (from tensorflow.python.ops.math_ops) is deprecated and will be removed in a future version. Instructions for updating: Use tf.cast instead. Train on 2916 samples, validate on 993 samples Epoch 1/30 2916/2916 [==============================] - 11s 4ms/step - loss: 0.8053 - acc: 0.5628 - val_loss: 1.0819 - val_acc: 0.5065 Epoch 2/30 2916/2916 [==============================] - 7s 2ms/step - loss: 0.6463 - acc: 0.6488 - val_loss: 0.6257 - val_acc: 0.6526 Epoch 3/30 2916/2916 [==============================] - 7s 2ms/step - loss: 0.5672 - acc: 0.7109 - val_loss: 0.7008 - val_acc: 0.5720 Epoch 4/30 2916/2916 [==============================] - 7s 2ms/step - loss: 0.5064 - acc: 0.7507 - val_loss: 0.5800 - val_acc: 0.6858 Epoch 5/30 2916/2916 [==============================] - 7s 2ms/step - loss: 0.4324 - acc: 0.7946 - val_loss: 0.5795 - val_acc: 0.6818 Epoch 6/30 2916/2916 [==============================] - 7s 2ms/step - loss: 0.3468 - acc: 0.8474 - val_loss: 0.6050 - val_acc: 0.7059 Epoch 7/30 2916/2916 [==============================] - 7s 2ms/step - loss: 0.2647 - acc: 0.8858 - val_loss: 1.4185 - val_acc: 0.6203 Epoch 8/30 2916/2916 [==============================] - 7s 2ms/step - loss: 0.1952 - acc: 0.9194 - val_loss: 0.7608 - val_acc: 0.7432 Epoch 9/30 2916/2916 [==============================] - 7s 2ms/step - loss: 0.1479 - acc: 0.9462 - val_loss: 1.0801 - val_acc: 0.6002 Epoch 10/30 2916/2916 [==============================] - 7s 2ms/step - loss: 0.0964 - acc: 0.9684 - val_loss: 1.2078 - val_acc: 0.7271 Epoch 11/30 2916/2916 [==============================] - 7s 2ms/step - loss: 0.0785 - acc: 0.9736 - val_loss: 1.3216 - val_acc: 0.7160 Epoch 12/30 2916/2916 [==============================] - 7s 2ms/step - loss: 0.0802 - acc: 0.9774 - val_loss: 1.3947 - val_acc: 0.7382 Epoch 13/30 2916/2916 [==============================] - 7s 2ms/step - loss: 0.0657 - acc: 0.9825 - val_loss: 1.4594 - val_acc: 0.7281 Epoch 14/30 2916/2916 [==============================] - 7s 2ms/step - loss: 0.0732 - acc: 0.9870 - val_loss: 1.6747 - val_acc: 0.7170 Epoch 15/30 2916/2916 [==============================] - 7s 2ms/step - loss: 0.0465 - acc: 0.9887 - val_loss: 1.5646 - val_acc: 0.6898 Epoch 16/30 2916/2916 [==============================] - 7s 2ms/step - loss: 0.1323 - acc: 0.9808 - val_loss: 1.6134 - val_acc: 0.7231 Epoch 17/30 2916/2916 [==============================] - 7s 2ms/step - loss: 0.0601 - acc: 0.9877 - val_loss: 1.8608 - val_acc: 0.7271 Epoch 18/30 2916/2916 [==============================] - 7s 2ms/step - loss: 0.0510 - acc: 0.9873 - val_loss: 1.9094 - val_acc: 0.7120 Epoch 19/30 2916/2916 [==============================] - 7s 2ms/step - loss: 0.0446 - acc: 0.9911 - val_loss: 2.4751 - val_acc: 0.7019 Epoch 20/30 2916/2916 [==============================] - 7s 2ms/step - loss: 0.0528 - acc: 0.9894 - val_loss: 2.1112 - val_acc: 0.7170 Epoch 21/30 2916/2916 [==============================] - 7s 2ms/step - loss: 0.0506 - acc: 0.9870 - val_loss: 1.9705 - val_acc: 0.7452 Epoch 22/30 2916/2916 [==============================] - 7s 2ms/step - loss: 0.0395 - acc: 0.9928 - val_loss: 2.3017 - val_acc: 0.7059 Epoch 23/30 2916/2916 [==============================] - 7s 2ms/step - loss: 0.0494 - acc: 0.9907 - val_loss: 2.2039 - val_acc: 0.7382 Epoch 24/30 2916/2916 [==============================] - 7s 2ms/step - loss: 0.0156 - acc: 0.9966 - val_loss: 2.1927 - val_acc: 0.7291 Epoch 25/30 2916/2916 [==============================] - 7s 2ms/step - loss: 0.0839 - acc: 0.9877 - val_loss: 2.2331 - val_acc: 0.7231 Epoch 26/30 2916/2916 [==============================] - 7s 2ms/step - loss: 0.0048 - acc: 0.9990 - val_loss: 2.4191 - val_acc: 0.7341 Epoch 27/30 2916/2916 [==============================] - 7s 2ms/step - loss: 0.0352 - acc: 0.9911 - val_loss: 2.2420 - val_acc: 0.7221 Epoch 28/30 2916/2916 [==============================] - 7s 2ms/step - loss: 0.0512 - acc: 0.9904 - val_loss: 2.2553 - val_acc: 0.7301 Epoch 29/30 2916/2916 [==============================] - 7s 2ms/step - loss: 0.0347 - acc: 0.9928 - val_loss: 2.4888 - val_acc: 0.7160 Epoch 30/30 2916/2916 [==============================] - 7s 2ms/step - loss: 0.0089 - acc: 0.9976 - val_loss: 2.5108 - val_acc: 0.7402

Model 1 Basic Convolution Neural Network

[ ]
f, (ax1, ax2) = plt.subplots(1, 2, figsize=(12, 4))
t = f.suptitle('Basic CNN Performance', fontsize=12)
f.subplots_adjust(top=0.85, wspace=0.3)

epoch_list = list(range(1,31))
ax1.plot(epoch_list, history.history['acc'], label='Train Accuracy')
ax1.plot(epoch_list, history.history['val_acc'], label='Validation Accuracy')
ax1.set_xticks(np.arange(0, 31, 5))
ax1.set_ylabel('Accuracy Value')
ax1.set_xlabel('Epoch')
ax1.set_title('Accuracy')
l1 = ax1.legend(loc="best")

ax2.plot(epoch_list, history.history['loss'], label='Train Loss')
ax2.plot(epoch_list, history.history['val_loss'], label='Validation Loss')
ax2.set_xticks(np.arange(0, 31, 5))
ax2.set_ylabel('Loss Value')
ax2.set_xlabel('Epoch')
ax2.set_title('Loss')
l2 = ax2.legend(loc="best")

Save Model

[ ]
model.save('cats_dogs_basic_without_dropout.h5')

Model 2 CNN with Dropout layers

[ ]
model = Sequential()

model.add(Conv2D(16, kernel_size=(3, 3), activation='relu', 
                 input_shape=input_shape))
model.add(MaxPooling2D(pool_size=(2, 2)))

model.add(Conv2D(64, kernel_size=(3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))

model.add(Conv2D(128, kernel_size=(3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))

model.add(Conv2D(128, kernel_size=(3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))

model.add(Flatten())
model.add(Dense(512, activation='relu'))
model.add(Dropout(0.3))
model.add(Dense(512, activation='relu'))
model.add(Dropout(0.3))
model.add(Dense(1, activation='sigmoid'))


model.compile(loss='binary_crossentropy',
              optimizer=optimizers.RMSprop(),
              metrics=['accuracy'])
              
              
history = model.fit(x=train_imgs_scaled, y=train_labels_enc,
                    validation_data=(validation_imgs_scaled, validation_labels_enc),
                    batch_size=batch_size,
                    epochs=epochs,
                    verbose=1) 
WARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/keras/backend/tensorflow_backend.py:3445: calling dropout (from tensorflow.python.ops.nn_ops) with keep_prob is deprecated and will be removed in a future version. Instructions for updating: Please use `rate` instead of `keep_prob`. Rate should be set to `rate = 1 - keep_prob`. Train on 2916 samples, validate on 993 samples Epoch 1/30 2916/2916 [==============================] - 6s 2ms/step - loss: 0.7244 - acc: 0.5041 - val_loss: 0.6920 - val_acc: 0.4935 Epoch 2/30 2916/2916 [==============================] - 6s 2ms/step - loss: 0.6939 - acc: 0.5487 - val_loss: 2.2433 - val_acc: 0.4935 Epoch 3/30 2916/2916 [==============================] - 6s 2ms/step - loss: 0.6711 - acc: 0.6337 - val_loss: 0.6448 - val_acc: 0.6576 Epoch 4/30 2916/2916 [==============================] - 6s 2ms/step - loss: 0.5867 - acc: 0.6996 - val_loss: 0.5880 - val_acc: 0.6697 Epoch 5/30 2916/2916 [==============================] - 6s 2ms/step - loss: 0.5274 - acc: 0.7548 - val_loss: 0.5787 - val_acc: 0.7412 Epoch 6/30 2916/2916 [==============================] - 6s 2ms/step - loss: 0.4785 - acc: 0.7781 - val_loss: 0.5449 - val_acc: 0.7200 Epoch 7/30 2916/2916 [==============================] - 6s 2ms/step - loss: 0.4296 - acc: 0.8018 - val_loss: 0.6493 - val_acc: 0.7422 Epoch 8/30 2916/2916 [==============================] - 6s 2ms/step - loss: 0.3924 - acc: 0.8278 - val_loss: 1.0365 - val_acc: 0.6425 Epoch 9/30 2916/2916 [==============================] - 6s 2ms/step - loss: 0.3200 - acc: 0.8680 - val_loss: 0.5699 - val_acc: 0.7744 Epoch 10/30 2916/2916 [==============================] - 6s 2ms/step - loss: 0.2542 - acc: 0.8968 - val_loss: 0.8832 - val_acc: 0.7623 Epoch 11/30 2916/2916 [==============================] - 6s 2ms/step - loss: 0.2122 - acc: 0.9170 - val_loss: 0.9782 - val_acc: 0.7714 Epoch 12/30 2916/2916 [==============================] - 6s 2ms/step - loss: 0.1736 - acc: 0.9345 - val_loss: 0.8127 - val_acc: 0.7754 Epoch 13/30 2916/2916 [==============================] - 6s 2ms/step - loss: 0.1603 - acc: 0.9427 - val_loss: 1.3601 - val_acc: 0.6878 Epoch 14/30 2916/2916 [==============================] - 6s 2ms/step - loss: 0.1363 - acc: 0.9510 - val_loss: 0.9988 - val_acc: 0.7835 Epoch 15/30 2916/2916 [==============================] - 6s 2ms/step - loss: 0.1130 - acc: 0.9575 - val_loss: 0.7704 - val_acc: 0.7764 Epoch 16/30 2916/2916 [==============================] - 6s 2ms/step - loss: 0.1233 - acc: 0.9606 - val_loss: 1.5219 - val_acc: 0.7372 Epoch 17/30 2916/2916 [==============================] - 6s 2ms/step - loss: 0.0854 - acc: 0.9743 - val_loss: 1.2230 - val_acc: 0.7774 Epoch 18/30 2916/2916 [==============================] - 6s 2ms/step - loss: 0.0819 - acc: 0.9681 - val_loss: 1.3888 - val_acc: 0.7734 Epoch 19/30 2916/2916 [==============================] - 6s 2ms/step - loss: 0.0863 - acc: 0.9726 - val_loss: 1.5275 - val_acc: 0.7704 Epoch 20/30 2916/2916 [==============================] - 6s 2ms/step - loss: 0.0686 - acc: 0.9774 - val_loss: 1.3179 - val_acc: 0.7704 Epoch 21/30 2916/2916 [==============================] - 6s 2ms/step - loss: 0.0730 - acc: 0.9784 - val_loss: 1.2313 - val_acc: 0.7855 Epoch 22/30 2916/2916 [==============================] - 6s 2ms/step - loss: 0.0847 - acc: 0.9787 - val_loss: 1.3993 - val_acc: 0.7744 Epoch 23/30 2916/2916 [==============================] - 6s 2ms/step - loss: 0.0691 - acc: 0.9781 - val_loss: 1.7341 - val_acc: 0.7553 Epoch 24/30 2916/2916 [==============================] - 6s 2ms/step - loss: 0.0755 - acc: 0.9750 - val_loss: 1.4530 - val_acc: 0.7724 Epoch 25/30 2916/2916 [==============================] - 6s 2ms/step - loss: 0.0819 - acc: 0.9805 - val_loss: 0.8672 - val_acc: 0.6858 Epoch 26/30 2916/2916 [==============================] - 6s 2ms/step - loss: 0.0729 - acc: 0.9777 - val_loss: 1.3318 - val_acc: 0.7825 Epoch 27/30 2916/2916 [==============================] - 6s 2ms/step - loss: 0.0871 - acc: 0.9801 - val_loss: 1.3081 - val_acc: 0.7976 Epoch 28/30 2916/2916 [==============================] - 6s 2ms/step - loss: 0.0606 - acc: 0.9794 - val_loss: 1.7220 - val_acc: 0.7764 Epoch 29/30 2916/2916 [==============================] - 6s 2ms/step - loss: 0.0615 - acc: 0.9849 - val_loss: 1.9543 - val_acc: 0.7895 Epoch 30/30 2916/2916 [==============================] - 6s 2ms/step - loss: 0.0861 - acc: 0.9777 - val_loss: 1.1088 - val_acc: 0.7885
[ ]
model.save('cats_dogs_basic_cnn.h5')

Build Data Augmentation Generator to reduce VARIANCE

[ ]
train_datagen = ImageDataGenerator(rescale=1./255, zoom_range=0.3, rotation_range=50,
                                   width_shift_range=0.2, height_shift_range=0.2, shear_range=0.2, 
                                   horizontal_flip=True, fill_mode='nearest')

val_datagen = ImageDataGenerator(rescale=1./255)
[ ]
img_id = 1991
dog_generator = train_datagen.flow(train_imgs[img_id:img_id+1], train_labels[img_id:img_id+1],
                                   batch_size=1)
dog = [next(dog_generator) for i in range(0,5)]
fig, ax = plt.subplots(1,5, figsize=(15, 6))
print('Labels:', [item[1][0] for item in dog])
l = [ax[i].imshow(dog[i][0][0]) for i in range(0,5)]
Labels: ['dog', 'dog', 'dog', 'dog', 'dog']
[ ]
img_id = 2595
cat_generator = train_datagen.flow(train_imgs[img_id:img_id+1], train_labels[img_id:img_id+1],
                                   batch_size=1)
cat = [next(cat_generator) for i in range(0,5)]
fig, ax = plt.subplots(1,5, figsize=(16, 6))
print('Labels:', [item[1][0] for item in cat])
l = [ax[i].imshow(cat[i][0][0]) for i in range(0,5)]
Labels: ['cat', 'cat', 'cat', 'cat', 'cat']

Model 3 CNN with Dropout layers using Data Augmentation Generator

[ ]
train_generator = train_datagen.flow(train_imgs, train_labels_enc, batch_size=30)
val_generator = val_datagen.flow(validation_imgs, validation_labels_enc, batch_size=20)
input_shape = (150, 150, 3)

from keras.layers import Conv2D, MaxPooling2D, Flatten, Dense, Dropout
from keras.models import Sequential
from keras import optimizers

model = Sequential()

model.add(Conv2D(16, kernel_size=(3, 3), activation='relu', 
                 input_shape=input_shape))
model.add(MaxPooling2D(pool_size=(2, 2)))

model.add(Conv2D(64, kernel_size=(3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))

model.add(Conv2D(128, kernel_size=(3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))

model.add(Conv2D(128, kernel_size=(3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))

model.add(Flatten())
model.add(Dense(512, activation='relu'))
model.add(Dropout(0.3))
model.add(Dense(512, activation='relu'))
model.add(Dropout(0.3))
model.add(Dense(1, activation='sigmoid'))

model.compile(loss='binary_crossentropy',
              optimizer=optimizers.RMSprop(lr=1e-4),
              metrics=['accuracy'])
              
history = model.fit_generator(train_generator, steps_per_epoch=100, epochs=100,
                              validation_data=val_generator, validation_steps=50, 
                              verbose=1) 
Epoch 1/100 100/100 [==============================] - 20s 196ms/step - loss: 0.6923 - acc: 0.5127 - val_loss: 0.6859 - val_acc: 0.5509 Epoch 2/100 100/100 [==============================] - 18s 177ms/step - loss: 0.6846 - acc: 0.5630 - val_loss: 0.6694 - val_acc: 0.6113 Epoch 3/100 100/100 [==============================] - 18s 182ms/step - loss: 0.6763 - acc: 0.5726 - val_loss: 0.6758 - val_acc: 0.5498 Epoch 4/100 100/100 [==============================] - 18s 178ms/step - loss: 0.6653 - acc: 0.5980 - val_loss: 0.6707 - val_acc: 0.5730 Epoch 5/100 100/100 [==============================] - 18s 178ms/step - loss: 0.6601 - acc: 0.6007 - val_loss: 0.6347 - val_acc: 0.6334 Epoch 6/100 100/100 [==============================] - 18s 184ms/step - loss: 0.6484 - acc: 0.6180 - val_loss: 0.6515 - val_acc: 0.6093 Epoch 7/100 100/100 [==============================] - 18s 177ms/step - loss: 0.6363 - acc: 0.6313 - val_loss: 0.6082 - val_acc: 0.6606 Epoch 8/100 100/100 [==============================] - 18s 179ms/step - loss: 0.6221 - acc: 0.6453 - val_loss: 0.6152 - val_acc: 0.6445 Epoch 9/100 100/100 [==============================] - 18s 176ms/step - loss: 0.6225 - acc: 0.6417 - val_loss: 0.5944 - val_acc: 0.6777 Epoch 10/100 100/100 [==============================] - 18s 177ms/step - loss: 0.6144 - acc: 0.6427 - val_loss: 0.5976 - val_acc: 0.6757 Epoch 11/100 100/100 [==============================] - 18s 183ms/step - loss: 0.5940 - acc: 0.6780 - val_loss: 0.6131 - val_acc: 0.6707 Epoch 12/100 100/100 [==============================] - 18s 176ms/step - loss: 0.6013 - acc: 0.6747 - val_loss: 0.5833 - val_acc: 0.6767 Epoch 13/100 100/100 [==============================] - 17s 174ms/step - loss: 0.5892 - acc: 0.6883 - val_loss: 0.5646 - val_acc: 0.7059 Epoch 14/100 100/100 [==============================] - 18s 176ms/step - loss: 0.5842 - acc: 0.6840 - val_loss: 0.5730 - val_acc: 0.6949 Epoch 15/100 100/100 [==============================] - 17s 168ms/step - loss: 0.5721 - acc: 0.6923 - val_loss: 0.5623 - val_acc: 0.7200 Epoch 16/100 100/100 [==============================] - 17s 171ms/step - loss: 0.5678 - acc: 0.7017 - val_loss: 0.5557 - val_acc: 0.7090 Epoch 17/100 100/100 [==============================] - 17s 167ms/step - loss: 0.5738 - acc: 0.6913 - val_loss: 0.5445 - val_acc: 0.7291 Epoch 18/100 100/100 [==============================] - 17s 172ms/step - loss: 0.5626 - acc: 0.7003 - val_loss: 0.5563 - val_acc: 0.7039 Epoch 19/100 100/100 [==============================] - 18s 180ms/step - loss: 0.5592 - acc: 0.7067 - val_loss: 0.5347 - val_acc: 0.7311 Epoch 20/100 100/100 [==============================] - 18s 177ms/step - loss: 0.5570 - acc: 0.7053 - val_loss: 0.5297 - val_acc: 0.7281 Epoch 21/100 100/100 [==============================] - 18s 178ms/step - loss: 0.5591 - acc: 0.7114 - val_loss: 0.5534 - val_acc: 0.7170 Epoch 22/100 100/100 [==============================] - 18s 185ms/step - loss: 0.5487 - acc: 0.7110 - val_loss: 0.5627 - val_acc: 0.7331 Epoch 23/100 100/100 [==============================] - 18s 178ms/step - loss: 0.5370 - acc: 0.7307 - val_loss: 0.5016 - val_acc: 0.7613 Epoch 24/100 100/100 [==============================] - 18s 180ms/step - loss: 0.5458 - acc: 0.7103 - val_loss: 0.5289 - val_acc: 0.7341 Epoch 25/100 100/100 [==============================] - 18s 178ms/step - loss: 0.5311 - acc: 0.7273 - val_loss: 0.5153 - val_acc: 0.7472 Epoch 26/100 100/100 [==============================] - 18s 179ms/step - loss: 0.5365 - acc: 0.7277 - val_loss: 0.5237 - val_acc: 0.7392 Epoch 27/100 100/100 [==============================] - 18s 181ms/step - loss: 0.5315 - acc: 0.7357 - val_loss: 0.4984 - val_acc: 0.7513 Epoch 28/100 100/100 [==============================] - 17s 173ms/step - loss: 0.5200 - acc: 0.7370 - val_loss: 0.5322 - val_acc: 0.7301 Epoch 29/100 100/100 [==============================] - 18s 179ms/step - loss: 0.5137 - acc: 0.7493 - val_loss: 0.5275 - val_acc: 0.7321 Epoch 30/100 100/100 [==============================] - 17s 172ms/step - loss: 0.5140 - acc: 0.7407 - val_loss: 0.5193 - val_acc: 0.7563 Epoch 31/100 100/100 [==============================] - 17s 174ms/step - loss: 0.5278 - acc: 0.7377 - val_loss: 0.4939 - val_acc: 0.7583 Epoch 32/100 100/100 [==============================] - 18s 182ms/step - loss: 0.5080 - acc: 0.7420 - val_loss: 0.5204 - val_acc: 0.7452 Epoch 33/100 100/100 [==============================] - 18s 175ms/step - loss: 0.4832 - acc: 0.7633 - val_loss: 0.4919 - val_acc: 0.7543 Epoch 34/100 100/100 [==============================] - 17s 173ms/step - loss: 0.5161 - acc: 0.7417 - val_loss: 0.4981 - val_acc: 0.7553 Epoch 35/100 100/100 [==============================] - 18s 180ms/step - loss: 0.4996 - acc: 0.7547 - val_loss: 0.5086 - val_acc: 0.7613 Epoch 36/100 100/100 [==============================] - 17s 174ms/step - loss: 0.5128 - acc: 0.7467 - val_loss: 0.4941 - val_acc: 0.7654 Epoch 37/100 100/100 [==============================] - 18s 177ms/step - loss: 0.4828 - acc: 0.7657 - val_loss: 0.4676 - val_acc: 0.7754 Epoch 38/100 100/100 [==============================] - 17s 172ms/step - loss: 0.4890 - acc: 0.7633 - val_loss: 0.5727 - val_acc: 0.7503 Epoch 39/100 100/100 [==============================] - 18s 175ms/step - loss: 0.5085 - acc: 0.7490 - val_loss: 0.4880 - val_acc: 0.7664 Epoch 40/100 100/100 [==============================] - 18s 178ms/step - loss: 0.4924 - acc: 0.7576 - val_loss: 0.4923 - val_acc: 0.7825 Epoch 41/100 100/100 [==============================] - 17s 171ms/step - loss: 0.4982 - acc: 0.7584 - val_loss: 0.4768 - val_acc: 0.7654 Epoch 42/100 100/100 [==============================] - 17s 168ms/step - loss: 0.4935 - acc: 0.7557 - val_loss: 0.4820 - val_acc: 0.7714 Epoch 43/100 100/100 [==============================] - 17s 174ms/step - loss: 0.4796 - acc: 0.7680 - val_loss: 0.5440 - val_acc: 0.7513 Epoch 44/100 100/100 [==============================] - 17s 168ms/step - loss: 0.4809 - acc: 0.7610 - val_loss: 0.4793 - val_acc: 0.7774 Epoch 45/100 100/100 [==============================] - 17s 170ms/step - loss: 0.4855 - acc: 0.7643 - val_loss: 0.4844 - val_acc: 0.7654 Epoch 46/100 100/100 [==============================] - 17s 167ms/step - loss: 0.4816 - acc: 0.7620 - val_loss: 0.4696 - val_acc: 0.7795 Epoch 47/100 100/100 [==============================] - 18s 175ms/step - loss: 0.4725 - acc: 0.7780 - val_loss: 0.5151 - val_acc: 0.7482 Epoch 48/100 100/100 [==============================] - 17s 171ms/step - loss: 0.4729 - acc: 0.7700 - val_loss: 0.4790 - val_acc: 0.7875 Epoch 49/100 100/100 [==============================] - 17s 166ms/step - loss: 0.4676 - acc: 0.7727 - val_loss: 0.5276 - val_acc: 0.7462 Epoch 50/100 100/100 [==============================] - 17s 169ms/step - loss: 0.4595 - acc: 0.7857 - val_loss: 0.4976 - val_acc: 0.7633 Epoch 51/100 100/100 [==============================] - 17s 174ms/step - loss: 0.4613 - acc: 0.7797 - val_loss: 0.5057 - val_acc: 0.7664 Epoch 52/100 100/100 [==============================] - 17s 168ms/step - loss: 0.4600 - acc: 0.7737 - val_loss: 0.4777 - val_acc: 0.7895 Epoch 53/100 100/100 [==============================] - 17s 171ms/step - loss: 0.4610 - acc: 0.7810 - val_loss: 0.4700 - val_acc: 0.7825 Epoch 54/100 100/100 [==============================] - 17s 168ms/step - loss: 0.4667 - acc: 0.7807 - val_loss: 0.4948 - val_acc: 0.7513 Epoch 55/100 100/100 [==============================] - 17s 170ms/step - loss: 0.4599 - acc: 0.7833 - val_loss: 0.4625 - val_acc: 0.8026 Epoch 56/100 100/100 [==============================] - 17s 173ms/step - loss: 0.4520 - acc: 0.7823 - val_loss: 0.4653 - val_acc: 0.7845 Epoch 57/100 100/100 [==============================] - 17s 169ms/step - loss: 0.4549 - acc: 0.7790 - val_loss: 0.4282 - val_acc: 0.8107 Epoch 58/100 100/100 [==============================] - 17s 171ms/step - loss: 0.4467 - acc: 0.7860 - val_loss: 0.4469 - val_acc: 0.7855 Epoch 59/100 100/100 [==============================] - 18s 181ms/step - loss: 0.4495 - acc: 0.7903 - val_loss: 0.4630 - val_acc: 0.7996 Epoch 60/100 100/100 [==============================] - 17s 175ms/step - loss: 0.4641 - acc: 0.7804 - val_loss: 0.4453 - val_acc: 0.7966 Epoch 61/100 100/100 [==============================] - 18s 176ms/step - loss: 0.4356 - acc: 0.7927 - val_loss: 0.4599 - val_acc: 0.8016 Epoch 62/100 100/100 [==============================] - 17s 174ms/step - loss: 0.4552 - acc: 0.7890 - val_loss: 0.4813 - val_acc: 0.7915 Epoch 63/100 100/100 [==============================] - 17s 175ms/step - loss: 0.4442 - acc: 0.7886 - val_loss: 0.4543 - val_acc: 0.7966 Epoch 64/100 100/100 [==============================] - 18s 179ms/step - loss: 0.4464 - acc: 0.7860 - val_loss: 0.5102 - val_acc: 0.7603 Epoch 65/100 100/100 [==============================] - 18s 177ms/step - loss: 0.4315 - acc: 0.8037 - val_loss: 0.4873 - val_acc: 0.7845 Epoch 66/100 100/100 [==============================] - 18s 176ms/step - loss: 0.4402 - acc: 0.7937 - val_loss: 0.4894 - val_acc: 0.7855 Epoch 67/100 100/100 [==============================] - 18s 181ms/step - loss: 0.4387 - acc: 0.7943 - val_loss: 0.4418 - val_acc: 0.8036 Epoch 68/100 100/100 [==============================] - 18s 176ms/step - loss: 0.4264 - acc: 0.7984 - val_loss: 0.6016 - val_acc: 0.7432 Epoch 69/100 100/100 [==============================] - 18s 178ms/step - loss: 0.4313 - acc: 0.8037 - val_loss: 0.4373 - val_acc: 0.8127 Epoch 70/100 100/100 [==============================] - 18s 175ms/step - loss: 0.4128 - acc: 0.8140 - val_loss: 0.4353 - val_acc: 0.8066 Epoch 71/100 100/100 [==============================] - 18s 177ms/step - loss: 0.4292 - acc: 0.7963 - val_loss: 0.4196 - val_acc: 0.8298 Epoch 72/100 100/100 [==============================] - 18s 181ms/step - loss: 0.4369 - acc: 0.7957 - val_loss: 0.4697 - val_acc: 0.7724 Epoch 73/100 100/100 [==============================] - 18s 175ms/step - loss: 0.4162 - acc: 0.8040 - val_loss: 0.4838 - val_acc: 0.8056 Epoch 74/100 100/100 [==============================] - 18s 178ms/step - loss: 0.4433 - acc: 0.7927 - val_loss: 0.4586 - val_acc: 0.7875 Epoch 75/100 100/100 [==============================] - 17s 174ms/step - loss: 0.4180 - acc: 0.8160 - val_loss: 0.4418 - val_acc: 0.8167 Epoch 76/100 100/100 [==============================] - 17s 173ms/step - loss: 0.4108 - acc: 0.8203 - val_loss: 0.4585 - val_acc: 0.8077 Epoch 77/100 100/100 [==============================] - 17s 172ms/step - loss: 0.4132 - acc: 0.8023 - val_loss: 0.5029 - val_acc: 0.7835 Epoch 78/100 100/100 [==============================] - 17s 169ms/step - loss: 0.4155 - acc: 0.8147 - val_loss: 0.4134 - val_acc: 0.8187 Epoch 79/100 100/100 [==============================] - 17s 168ms/step - loss: 0.3923 - acc: 0.8337 - val_loss: 0.4650 - val_acc: 0.8077 Epoch 80/100 100/100 [==============================] - 17s 173ms/step - loss: 0.4183 - acc: 0.8010 - val_loss: 0.4633 - val_acc: 0.8157 Epoch 81/100 100/100 [==============================] - 17s 166ms/step - loss: 0.4097 - acc: 0.8077 - val_loss: 0.3891 - val_acc: 0.8359 Epoch 82/100 100/100 [==============================] - 17s 172ms/step - loss: 0.3982 - acc: 0.8167 - val_loss: 0.4860 - val_acc: 0.8006 Epoch 83/100 100/100 [==============================] - 17s 170ms/step - loss: 0.3938 - acc: 0.8213 - val_loss: 0.4282 - val_acc: 0.8278 Epoch 84/100 100/100 [==============================] - 17s 167ms/step - loss: 0.4204 - acc: 0.8123 - val_loss: 0.4275 - val_acc: 0.8218 Epoch 85/100 100/100 [==============================] - 17s 172ms/step - loss: 0.4058 - acc: 0.8180 - val_loss: 0.4317 - val_acc: 0.8097 Epoch 86/100 100/100 [==============================] - 17s 168ms/step - loss: 0.4059 - acc: 0.8140 - val_loss: 0.3900 - val_acc: 0.8379 Epoch 87/100 100/100 [==============================] - 17s 169ms/step - loss: 0.4025 - acc: 0.8190 - val_loss: 0.4313 - val_acc: 0.8167 Epoch 88/100 100/100 [==============================] - 17s 173ms/step - loss: 0.4202 - acc: 0.8000 - val_loss: 0.4001 - val_acc: 0.8288 Epoch 89/100 100/100 [==============================] - 17s 168ms/step - loss: 0.3995 - acc: 0.8147 - val_loss: 0.3921 - val_acc: 0.8429 Epoch 90/100 100/100 [==============================] - 17s 166ms/step - loss: 0.4037 - acc: 0.8220 - val_loss: 0.4734 - val_acc: 0.8157 Epoch 91/100 100/100 [==============================] - 17s 173ms/step - loss: 0.3812 - acc: 0.8213 - val_loss: 0.4083 - val_acc: 0.8298 Epoch 92/100 100/100 [==============================] - 17s 167ms/step - loss: 0.3973 - acc: 0.8217 - val_loss: 0.4022 - val_acc: 0.8308 Epoch 93/100 100/100 [==============================] - 17s 170ms/step - loss: 0.3892 - acc: 0.8257 - val_loss: 0.4414 - val_acc: 0.8228 Epoch 94/100 100/100 [==============================] - 17s 170ms/step - loss: 0.3926 - acc: 0.8203 - val_loss: 0.4197 - val_acc: 0.8308 Epoch 95/100 100/100 [==============================] - 17s 174ms/step - loss: 0.3957 - acc: 0.8320 - val_loss: 0.4067 - val_acc: 0.8258 Epoch 96/100 100/100 [==============================] - 18s 179ms/step - loss: 0.3876 - acc: 0.8263 - val_loss: 0.6104 - val_acc: 0.7875 Epoch 97/100 100/100 [==============================] - 18s 177ms/step - loss: 0.3987 - acc: 0.8170 - val_loss: 0.4011 - val_acc: 0.8499 Epoch 98/100 100/100 [==============================] - 18s 176ms/step - loss: 0.3852 - acc: 0.8263 - val_loss: 0.4080 - val_acc: 0.8177 Epoch 99/100 100/100 [==============================] - 18s 180ms/step - loss: 0.3923 - acc: 0.8260 - val_loss: 0.4669 - val_acc: 0.8207 Epoch 100/100 100/100 [==============================] - 18s 177ms/step - loss: 0.3814 - acc: 0.8283 - val_loss: 0.4099 - val_acc: 0.8359
[ ]
model.save('cats_dogs_cnn_img_aug.h5')

Model 4 Using a VGG16 pre-trained model, adding new outout layers (Transfer Learning)

[ ]
from keras.applications import vgg16
from keras.models import Model
import keras

vgg = vgg16.VGG16(include_top=False, weights='imagenet', 
                                     input_shape=input_shape)

output = vgg.layers[-1].output
output = keras.layers.Flatten()(output)
vgg_model = Model(vgg.input, output)

vgg_model.trainable = False
for layer in vgg_model.layers:
    layer.trainable = False
    
import pandas as pd
pd.set_option('max_colwidth', -1)
layers = [(layer, layer.name, layer.trainable) for layer in vgg_model.layers]
pd.DataFrame(layers, columns=['Layer Type', 'Layer Name', 'Layer Trainable']) 
Downloading data from https://github.com/fchollet/deep-learning-models/releases/download/v0.1/vgg16_weights_tf_dim_ordering_tf_kernels_notop.h5 58892288/58889256 [==============================] - 2s 0us/step
Layer Type Layer Name Layer Trainable
0 <keras.engine.input_layer.InputLayer object at 0x7f3a8df32f60> input_1 False
1 <keras.layers.convolutional.Conv2D object at 0x7f3a8dde2780> block1_conv1 False
2 <keras.layers.convolutional.Conv2D object at 0x7f3a8df32278> block1_conv2 False
3 <keras.layers.pooling.MaxPooling2D object at 0x7f3aa0092b00> block1_pool False
4 <keras.layers.convolutional.Conv2D object at 0x7f3a8ddce6d8> block2_conv1 False
5 <keras.layers.convolutional.Conv2D object at 0x7f3a88fdecf8> block2_conv2 False
6 <keras.layers.pooling.MaxPooling2D object at 0x7f3a8d02b630> block2_pool False
7 <keras.layers.convolutional.Conv2D object at 0x7f3a8d02b518> block3_conv1 False
8 <keras.layers.convolutional.Conv2D object at 0x7f3a8cfdc2b0> block3_conv2 False
9 <keras.layers.convolutional.Conv2D object at 0x7f3a8cfee908> block3_conv3 False
10 <keras.layers.pooling.MaxPooling2D object at 0x7f3a8cf8ad30> block3_pool False
11 <keras.layers.convolutional.Conv2D object at 0x7f3a8cf8ac88> block4_conv1 False
12 <keras.layers.convolutional.Conv2D object at 0x7f3a8cfb7ba8> block4_conv2 False
13 <keras.layers.convolutional.Conv2D object at 0x7f3a8cf62240> block4_conv3 False
14 <keras.layers.pooling.MaxPooling2D object at 0x7f3a8cf7cbe0> block4_pool False
15 <keras.layers.convolutional.Conv2D object at 0x7f3a8cf7c6a0> block5_conv1 False
16 <keras.layers.convolutional.Conv2D object at 0x7f3a8cf2a400> block5_conv2 False
17 <keras.layers.convolutional.Conv2D object at 0x7f3a8cebfa58> block5_conv3 False
18 <keras.layers.pooling.MaxPooling2D object at 0x7f3a8ceed6d8> block5_pool False
19 <keras.layers.core.Flatten object at 0x7f3a8ce4cd68> flatten_4 False

Testing how VVG6 working

[ ]
bottleneck_feature_example = vgg.predict(train_imgs_scaled[0:1])
print(bottleneck_feature_example.shape)
plt.imshow(bottleneck_feature_example[0][:,:,0])
(1, 4, 4, 512)
<matplotlib.image.AxesImage at 0x7f3a8cd934a8>