%load_ext watermark
%watermark -a "Romell D.Z." -u -d -p numpy,pandas,matplotlib,keras
The watermark extension is already loaded. To reload it, use:
%reload_ext watermark
Romell D.Z.
last updated: 2019-02-22
numpy 1.16.1
pandas 0.23.4
matplotlib 2.2.2
keras 2.2.4
%matplotlib inline
%config InlineBackend.figure_format = 'retina'
import matplotlib.pyplot as plt
plt.rcParams['figure.figsize'] = (18,6)
import os
import glob
import zipfile
import functools
# Upload the API token.
def get_kaggle_credentials():
token_dir = os.path.join(os.path.expanduser("~"),".kaggle")
token_file = os.path.join(token_dir, "kaggle.json")
if not os.path.isdir(token_dir):
os.mkdir(token_dir)
try:
with open(token_file,'r') as f:
pass
except IOError as no_file:
try:
from google.colab import files
except ImportError:
raise no_file
uploaded = files.upload()
if "kaggle.json" not in uploaded:
raise ValueError("You need an API key! see: "
"https://github.com/Kaggle/kaggle-api#api-credentials")
with open(token_file, "wb") as f:
f.write(uploaded["kaggle.json"])
os.chmod(token_file, 600)
get_kaggle_credentials()
import kaggle
# Download data from Kaggle and unzip the files of interest.
def load_data_from_zip(competition, file):
with zipfile.ZipFile(os.path.join(competition, file), "r") as zip_ref:
unzipped_file = zip_ref.namelist()[0]
zip_ref.extractall(competition)
def load_data_from_7z(competition, file):
os.system( '7z x %s ' % os.path.join(competition, file) )
def get_data(competition):
kaggle.api.competition_download_files(competition, competition)
load_data_from_zip(competition, 'train.zip')
load_data_from_zip(competition, 'train.zip')
Saving kaggle.json to kaggle.json
competition = 'dogs-vs-cats'
get_data(competition)
import glob
import numpy as np
import os
import shutil
np.random.seed(42)
files = glob.glob('%s/train/*'%competition)
cat_files = [fn for fn in files if 'cat' in fn]
dog_files = [fn for fn in files if 'dog' in fn]
len(cat_files), len(dog_files)
(25000, 25000)
cat_train = np.random.choice(cat_files, size=1500, replace=False)
dog_train = np.random.choice(dog_files, size=1500, replace=False)
cat_files = list(set(cat_files) - set(cat_train))
dog_files = list(set(dog_files) - set(dog_train))
cat_val = np.random.choice(cat_files, size=500, replace=False)
dog_val = np.random.choice(dog_files, size=500, replace=False)
cat_files = list(set(cat_files) - set(cat_val))
dog_files = list(set(dog_files) - set(dog_val))
cat_test = np.random.choice(cat_files, size=500, replace=False)
dog_test = np.random.choice(dog_files, size=500, replace=False)
print('Cat datasets:', cat_train.shape, cat_val.shape, cat_test.shape)
print('Dog datasets:', dog_train.shape, dog_val.shape, dog_test.shape)
Cat datasets: (1500,) (500,) (500,)
Dog datasets: (1500,) (500,) (500,)
train_dir = 'training_data'
val_dir = 'validation_data'
test_dir = 'test_data'
train_files = np.concatenate([cat_train, dog_train])
validate_files = np.concatenate([cat_val, dog_val])
test_files = np.concatenate([cat_test, dog_test])
os.mkdir(train_dir) if not os.path.isdir(train_dir) else None
os.mkdir(val_dir) if not os.path.isdir(val_dir) else None
os.mkdir(test_dir) if not os.path.isdir(test_dir) else None
for fn in train_files:
shutil.copy(fn, train_dir)
for fn in validate_files:
shutil.copy(fn, val_dir)
for fn in test_files:
shutil.copy(fn, test_dir)
import glob
import numpy as np
import matplotlib.pyplot as plt
from keras.preprocessing.image import ImageDataGenerator, load_img, img_to_array, array_to_img
%matplotlib inline
Using TensorFlow backend.
import os
os.sep
'/'
IMG_DIM = (150, 150)
train_files = glob.glob('training_data/*')
train_imgs = [img_to_array(load_img(img, target_size=IMG_DIM)) for img in train_files]
train_imgs = np.array(train_imgs)
train_labels = [fn.split(os.sep)[1].split('.')[0].strip() for fn in train_files]
validation_files = glob.glob('validation_data/*')
validation_imgs = [img_to_array(load_img(img, target_size=IMG_DIM)) for img in validation_files]
validation_imgs = np.array(validation_imgs)
validation_labels = [fn.split(os.sep)[1].split('.')[0].strip() for fn in validation_files]
print('Train dataset shape:', train_imgs.shape,
'\tValidation dataset shape:', validation_imgs.shape)
Train dataset shape: (2905, 150, 150, 3) Validation dataset shape: (989, 150, 150, 3)
train_imgs_scaled = train_imgs.astype('float32')
validation_imgs_scaled = validation_imgs.astype('float32')
train_imgs_scaled /= 255
validation_imgs_scaled /= 255
print(train_imgs[0].shape)
array_to_img(train_imgs[0])
(150, 150, 3)
batch_size = 30
num_classes = 2
epochs = 30
input_shape = (150, 150, 3)
# encode text category labels
from sklearn.preprocessing import LabelEncoder
le = LabelEncoder()
le.fit(train_labels)
train_labels_enc = le.transform(train_labels)
validation_labels_enc = le.transform(validation_labels)
print(train_labels[1495:1505], train_labels_enc[1495:1505])
['dog', 'cat', 'cat', 'dog', 'dog', 'dog', 'cat', 'cat', 'dog', 'cat'] [1 0 0 1 1 1 0 0 1 0]
from keras.layers import Conv2D, MaxPooling2D, Flatten, Dense, Dropout
from keras.models import Sequential
from keras import optimizers
model = Sequential()
model.add(Conv2D(16, kernel_size=(3, 3), activation='relu',
input_shape=input_shape))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(64, kernel_size=(3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(128, kernel_size=(3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Flatten())
model.add(Dense(512, activation='relu'))
model.add(Dense(1, activation='sigmoid'))
model.compile(loss='binary_crossentropy',
optimizer=optimizers.RMSprop(),
metrics=['accuracy'])
model.summary()
WARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/tensorflow/python/framework/op_def_library.py:263: colocate_with (from tensorflow.python.framework.ops) is deprecated and will be removed in a future version.
Instructions for updating:
Colocations handled automatically by placer.
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
conv2d_1 (Conv2D) (None, 148, 148, 16) 448
_________________________________________________________________
max_pooling2d_1 (MaxPooling2 (None, 74, 74, 16) 0
_________________________________________________________________
conv2d_2 (Conv2D) (None, 72, 72, 64) 9280
_________________________________________________________________
max_pooling2d_2 (MaxPooling2 (None, 36, 36, 64) 0
_________________________________________________________________
conv2d_3 (Conv2D) (None, 34, 34, 128) 73856
_________________________________________________________________
max_pooling2d_3 (MaxPooling2 (None, 17, 17, 128) 0
_________________________________________________________________
flatten_1 (Flatten) (None, 36992) 0
_________________________________________________________________
dense_1 (Dense) (None, 512) 18940416
_________________________________________________________________
dense_2 (Dense) (None, 1) 513
=================================================================
Total params: 19,024,513
Trainable params: 19,024,513
Non-trainable params: 0
_________________________________________________________________
history = model.fit(x=train_imgs_scaled, y=train_labels_enc,
validation_data=(validation_imgs_scaled, validation_labels_enc),
batch_size=batch_size,
epochs=epochs,
verbose=1)
WARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/tensorflow/python/ops/math_ops.py:3066: to_int32 (from tensorflow.python.ops.math_ops) is deprecated and will be removed in a future version.
Instructions for updating:
Use tf.cast instead.
Train on 2916 samples, validate on 993 samples
Epoch 1/30
2916/2916 [==============================] - 11s 4ms/step - loss: 0.8053 - acc: 0.5628 - val_loss: 1.0819 - val_acc: 0.5065
Epoch 2/30
2916/2916 [==============================] - 7s 2ms/step - loss: 0.6463 - acc: 0.6488 - val_loss: 0.6257 - val_acc: 0.6526
Epoch 3/30
2916/2916 [==============================] - 7s 2ms/step - loss: 0.5672 - acc: 0.7109 - val_loss: 0.7008 - val_acc: 0.5720
Epoch 4/30
2916/2916 [==============================] - 7s 2ms/step - loss: 0.5064 - acc: 0.7507 - val_loss: 0.5800 - val_acc: 0.6858
Epoch 5/30
2916/2916 [==============================] - 7s 2ms/step - loss: 0.4324 - acc: 0.7946 - val_loss: 0.5795 - val_acc: 0.6818
Epoch 6/30
2916/2916 [==============================] - 7s 2ms/step - loss: 0.3468 - acc: 0.8474 - val_loss: 0.6050 - val_acc: 0.7059
Epoch 7/30
2916/2916 [==============================] - 7s 2ms/step - loss: 0.2647 - acc: 0.8858 - val_loss: 1.4185 - val_acc: 0.6203
Epoch 8/30
2916/2916 [==============================] - 7s 2ms/step - loss: 0.1952 - acc: 0.9194 - val_loss: 0.7608 - val_acc: 0.7432
Epoch 9/30
2916/2916 [==============================] - 7s 2ms/step - loss: 0.1479 - acc: 0.9462 - val_loss: 1.0801 - val_acc: 0.6002
Epoch 10/30
2916/2916 [==============================] - 7s 2ms/step - loss: 0.0964 - acc: 0.9684 - val_loss: 1.2078 - val_acc: 0.7271
Epoch 11/30
2916/2916 [==============================] - 7s 2ms/step - loss: 0.0785 - acc: 0.9736 - val_loss: 1.3216 - val_acc: 0.7160
Epoch 12/30
2916/2916 [==============================] - 7s 2ms/step - loss: 0.0802 - acc: 0.9774 - val_loss: 1.3947 - val_acc: 0.7382
Epoch 13/30
2916/2916 [==============================] - 7s 2ms/step - loss: 0.0657 - acc: 0.9825 - val_loss: 1.4594 - val_acc: 0.7281
Epoch 14/30
2916/2916 [==============================] - 7s 2ms/step - loss: 0.0732 - acc: 0.9870 - val_loss: 1.6747 - val_acc: 0.7170
Epoch 15/30
2916/2916 [==============================] - 7s 2ms/step - loss: 0.0465 - acc: 0.9887 - val_loss: 1.5646 - val_acc: 0.6898
Epoch 16/30
2916/2916 [==============================] - 7s 2ms/step - loss: 0.1323 - acc: 0.9808 - val_loss: 1.6134 - val_acc: 0.7231
Epoch 17/30
2916/2916 [==============================] - 7s 2ms/step - loss: 0.0601 - acc: 0.9877 - val_loss: 1.8608 - val_acc: 0.7271
Epoch 18/30
2916/2916 [==============================] - 7s 2ms/step - loss: 0.0510 - acc: 0.9873 - val_loss: 1.9094 - val_acc: 0.7120
Epoch 19/30
2916/2916 [==============================] - 7s 2ms/step - loss: 0.0446 - acc: 0.9911 - val_loss: 2.4751 - val_acc: 0.7019
Epoch 20/30
2916/2916 [==============================] - 7s 2ms/step - loss: 0.0528 - acc: 0.9894 - val_loss: 2.1112 - val_acc: 0.7170
Epoch 21/30
2916/2916 [==============================] - 7s 2ms/step - loss: 0.0506 - acc: 0.9870 - val_loss: 1.9705 - val_acc: 0.7452
Epoch 22/30
2916/2916 [==============================] - 7s 2ms/step - loss: 0.0395 - acc: 0.9928 - val_loss: 2.3017 - val_acc: 0.7059
Epoch 23/30
2916/2916 [==============================] - 7s 2ms/step - loss: 0.0494 - acc: 0.9907 - val_loss: 2.2039 - val_acc: 0.7382
Epoch 24/30
2916/2916 [==============================] - 7s 2ms/step - loss: 0.0156 - acc: 0.9966 - val_loss: 2.1927 - val_acc: 0.7291
Epoch 25/30
2916/2916 [==============================] - 7s 2ms/step - loss: 0.0839 - acc: 0.9877 - val_loss: 2.2331 - val_acc: 0.7231
Epoch 26/30
2916/2916 [==============================] - 7s 2ms/step - loss: 0.0048 - acc: 0.9990 - val_loss: 2.4191 - val_acc: 0.7341
Epoch 27/30
2916/2916 [==============================] - 7s 2ms/step - loss: 0.0352 - acc: 0.9911 - val_loss: 2.2420 - val_acc: 0.7221
Epoch 28/30
2916/2916 [==============================] - 7s 2ms/step - loss: 0.0512 - acc: 0.9904 - val_loss: 2.2553 - val_acc: 0.7301
Epoch 29/30
2916/2916 [==============================] - 7s 2ms/step - loss: 0.0347 - acc: 0.9928 - val_loss: 2.4888 - val_acc: 0.7160
Epoch 30/30
2916/2916 [==============================] - 7s 2ms/step - loss: 0.0089 - acc: 0.9976 - val_loss: 2.5108 - val_acc: 0.7402
f, (ax1, ax2) = plt.subplots(1, 2, figsize=(12, 4))
t = f.suptitle('Basic CNN Performance', fontsize=12)
f.subplots_adjust(top=0.85, wspace=0.3)
epoch_list = list(range(1,31))
ax1.plot(epoch_list, history.history['acc'], label='Train Accuracy')
ax1.plot(epoch_list, history.history['val_acc'], label='Validation Accuracy')
ax1.set_xticks(np.arange(0, 31, 5))
ax1.set_ylabel('Accuracy Value')
ax1.set_xlabel('Epoch')
ax1.set_title('Accuracy')
l1 = ax1.legend(loc="best")
ax2.plot(epoch_list, history.history['loss'], label='Train Loss')
ax2.plot(epoch_list, history.history['val_loss'], label='Validation Loss')
ax2.set_xticks(np.arange(0, 31, 5))
ax2.set_ylabel('Loss Value')
ax2.set_xlabel('Epoch')
ax2.set_title('Loss')
l2 = ax2.legend(loc="best")
model.save('cats_dogs_basic_without_dropout.h5')
model = Sequential()
model.add(Conv2D(16, kernel_size=(3, 3), activation='relu',
input_shape=input_shape))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(64, kernel_size=(3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(128, kernel_size=(3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(128, kernel_size=(3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Flatten())
model.add(Dense(512, activation='relu'))
model.add(Dropout(0.3))
model.add(Dense(512, activation='relu'))
model.add(Dropout(0.3))
model.add(Dense(1, activation='sigmoid'))
model.compile(loss='binary_crossentropy',
optimizer=optimizers.RMSprop(),
metrics=['accuracy'])
history = model.fit(x=train_imgs_scaled, y=train_labels_enc,
validation_data=(validation_imgs_scaled, validation_labels_enc),
batch_size=batch_size,
epochs=epochs,
verbose=1)
WARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/keras/backend/tensorflow_backend.py:3445: calling dropout (from tensorflow.python.ops.nn_ops) with keep_prob is deprecated and will be removed in a future version.
Instructions for updating:
Please use `rate` instead of `keep_prob`. Rate should be set to `rate = 1 - keep_prob`.
Train on 2916 samples, validate on 993 samples
Epoch 1/30
2916/2916 [==============================] - 6s 2ms/step - loss: 0.7244 - acc: 0.5041 - val_loss: 0.6920 - val_acc: 0.4935
Epoch 2/30
2916/2916 [==============================] - 6s 2ms/step - loss: 0.6939 - acc: 0.5487 - val_loss: 2.2433 - val_acc: 0.4935
Epoch 3/30
2916/2916 [==============================] - 6s 2ms/step - loss: 0.6711 - acc: 0.6337 - val_loss: 0.6448 - val_acc: 0.6576
Epoch 4/30
2916/2916 [==============================] - 6s 2ms/step - loss: 0.5867 - acc: 0.6996 - val_loss: 0.5880 - val_acc: 0.6697
Epoch 5/30
2916/2916 [==============================] - 6s 2ms/step - loss: 0.5274 - acc: 0.7548 - val_loss: 0.5787 - val_acc: 0.7412
Epoch 6/30
2916/2916 [==============================] - 6s 2ms/step - loss: 0.4785 - acc: 0.7781 - val_loss: 0.5449 - val_acc: 0.7200
Epoch 7/30
2916/2916 [==============================] - 6s 2ms/step - loss: 0.4296 - acc: 0.8018 - val_loss: 0.6493 - val_acc: 0.7422
Epoch 8/30
2916/2916 [==============================] - 6s 2ms/step - loss: 0.3924 - acc: 0.8278 - val_loss: 1.0365 - val_acc: 0.6425
Epoch 9/30
2916/2916 [==============================] - 6s 2ms/step - loss: 0.3200 - acc: 0.8680 - val_loss: 0.5699 - val_acc: 0.7744
Epoch 10/30
2916/2916 [==============================] - 6s 2ms/step - loss: 0.2542 - acc: 0.8968 - val_loss: 0.8832 - val_acc: 0.7623
Epoch 11/30
2916/2916 [==============================] - 6s 2ms/step - loss: 0.2122 - acc: 0.9170 - val_loss: 0.9782 - val_acc: 0.7714
Epoch 12/30
2916/2916 [==============================] - 6s 2ms/step - loss: 0.1736 - acc: 0.9345 - val_loss: 0.8127 - val_acc: 0.7754
Epoch 13/30
2916/2916 [==============================] - 6s 2ms/step - loss: 0.1603 - acc: 0.9427 - val_loss: 1.3601 - val_acc: 0.6878
Epoch 14/30
2916/2916 [==============================] - 6s 2ms/step - loss: 0.1363 - acc: 0.9510 - val_loss: 0.9988 - val_acc: 0.7835
Epoch 15/30
2916/2916 [==============================] - 6s 2ms/step - loss: 0.1130 - acc: 0.9575 - val_loss: 0.7704 - val_acc: 0.7764
Epoch 16/30
2916/2916 [==============================] - 6s 2ms/step - loss: 0.1233 - acc: 0.9606 - val_loss: 1.5219 - val_acc: 0.7372
Epoch 17/30
2916/2916 [==============================] - 6s 2ms/step - loss: 0.0854 - acc: 0.9743 - val_loss: 1.2230 - val_acc: 0.7774
Epoch 18/30
2916/2916 [==============================] - 6s 2ms/step - loss: 0.0819 - acc: 0.9681 - val_loss: 1.3888 - val_acc: 0.7734
Epoch 19/30
2916/2916 [==============================] - 6s 2ms/step - loss: 0.0863 - acc: 0.9726 - val_loss: 1.5275 - val_acc: 0.7704
Epoch 20/30
2916/2916 [==============================] - 6s 2ms/step - loss: 0.0686 - acc: 0.9774 - val_loss: 1.3179 - val_acc: 0.7704
Epoch 21/30
2916/2916 [==============================] - 6s 2ms/step - loss: 0.0730 - acc: 0.9784 - val_loss: 1.2313 - val_acc: 0.7855
Epoch 22/30
2916/2916 [==============================] - 6s 2ms/step - loss: 0.0847 - acc: 0.9787 - val_loss: 1.3993 - val_acc: 0.7744
Epoch 23/30
2916/2916 [==============================] - 6s 2ms/step - loss: 0.0691 - acc: 0.9781 - val_loss: 1.7341 - val_acc: 0.7553
Epoch 24/30
2916/2916 [==============================] - 6s 2ms/step - loss: 0.0755 - acc: 0.9750 - val_loss: 1.4530 - val_acc: 0.7724
Epoch 25/30
2916/2916 [==============================] - 6s 2ms/step - loss: 0.0819 - acc: 0.9805 - val_loss: 0.8672 - val_acc: 0.6858
Epoch 26/30
2916/2916 [==============================] - 6s 2ms/step - loss: 0.0729 - acc: 0.9777 - val_loss: 1.3318 - val_acc: 0.7825
Epoch 27/30
2916/2916 [==============================] - 6s 2ms/step - loss: 0.0871 - acc: 0.9801 - val_loss: 1.3081 - val_acc: 0.7976
Epoch 28/30
2916/2916 [==============================] - 6s 2ms/step - loss: 0.0606 - acc: 0.9794 - val_loss: 1.7220 - val_acc: 0.7764
Epoch 29/30
2916/2916 [==============================] - 6s 2ms/step - loss: 0.0615 - acc: 0.9849 - val_loss: 1.9543 - val_acc: 0.7895
Epoch 30/30
2916/2916 [==============================] - 6s 2ms/step - loss: 0.0861 - acc: 0.9777 - val_loss: 1.1088 - val_acc: 0.7885
model.save('cats_dogs_basic_cnn.h5')
train_datagen = ImageDataGenerator(rescale=1./255, zoom_range=0.3, rotation_range=50,
width_shift_range=0.2, height_shift_range=0.2, shear_range=0.2,
horizontal_flip=True, fill_mode='nearest')
val_datagen = ImageDataGenerator(rescale=1./255)
img_id = 1991
dog_generator = train_datagen.flow(train_imgs[img_id:img_id+1], train_labels[img_id:img_id+1],
batch_size=1)
dog = [next(dog_generator) for i in range(0,5)]
fig, ax = plt.subplots(1,5, figsize=(15, 6))
print('Labels:', [item[1][0] for item in dog])
l = [ax[i].imshow(dog[i][0][0]) for i in range(0,5)]
Labels: ['dog', 'dog', 'dog', 'dog', 'dog']
img_id = 2595
cat_generator = train_datagen.flow(train_imgs[img_id:img_id+1], train_labels[img_id:img_id+1],
batch_size=1)
cat = [next(cat_generator) for i in range(0,5)]
fig, ax = plt.subplots(1,5, figsize=(16, 6))
print('Labels:', [item[1][0] for item in cat])
l = [ax[i].imshow(cat[i][0][0]) for i in range(0,5)]
Labels: ['cat', 'cat', 'cat', 'cat', 'cat']
train_generator = train_datagen.flow(train_imgs, train_labels_enc, batch_size=30)
val_generator = val_datagen.flow(validation_imgs, validation_labels_enc, batch_size=20)
input_shape = (150, 150, 3)
from keras.layers import Conv2D, MaxPooling2D, Flatten, Dense, Dropout
from keras.models import Sequential
from keras import optimizers
model = Sequential()
model.add(Conv2D(16, kernel_size=(3, 3), activation='relu',
input_shape=input_shape))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(64, kernel_size=(3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(128, kernel_size=(3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(128, kernel_size=(3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Flatten())
model.add(Dense(512, activation='relu'))
model.add(Dropout(0.3))
model.add(Dense(512, activation='relu'))
model.add(Dropout(0.3))
model.add(Dense(1, activation='sigmoid'))
model.compile(loss='binary_crossentropy',
optimizer=optimizers.RMSprop(lr=1e-4),
metrics=['accuracy'])
history = model.fit_generator(train_generator, steps_per_epoch=100, epochs=100,
validation_data=val_generator, validation_steps=50,
verbose=1)
Epoch 1/100
100/100 [==============================] - 20s 196ms/step - loss: 0.6923 - acc: 0.5127 - val_loss: 0.6859 - val_acc: 0.5509
Epoch 2/100
100/100 [==============================] - 18s 177ms/step - loss: 0.6846 - acc: 0.5630 - val_loss: 0.6694 - val_acc: 0.6113
Epoch 3/100
100/100 [==============================] - 18s 182ms/step - loss: 0.6763 - acc: 0.5726 - val_loss: 0.6758 - val_acc: 0.5498
Epoch 4/100
100/100 [==============================] - 18s 178ms/step - loss: 0.6653 - acc: 0.5980 - val_loss: 0.6707 - val_acc: 0.5730
Epoch 5/100
100/100 [==============================] - 18s 178ms/step - loss: 0.6601 - acc: 0.6007 - val_loss: 0.6347 - val_acc: 0.6334
Epoch 6/100
100/100 [==============================] - 18s 184ms/step - loss: 0.6484 - acc: 0.6180 - val_loss: 0.6515 - val_acc: 0.6093
Epoch 7/100
100/100 [==============================] - 18s 177ms/step - loss: 0.6363 - acc: 0.6313 - val_loss: 0.6082 - val_acc: 0.6606
Epoch 8/100
100/100 [==============================] - 18s 179ms/step - loss: 0.6221 - acc: 0.6453 - val_loss: 0.6152 - val_acc: 0.6445
Epoch 9/100
100/100 [==============================] - 18s 176ms/step - loss: 0.6225 - acc: 0.6417 - val_loss: 0.5944 - val_acc: 0.6777
Epoch 10/100
100/100 [==============================] - 18s 177ms/step - loss: 0.6144 - acc: 0.6427 - val_loss: 0.5976 - val_acc: 0.6757
Epoch 11/100
100/100 [==============================] - 18s 183ms/step - loss: 0.5940 - acc: 0.6780 - val_loss: 0.6131 - val_acc: 0.6707
Epoch 12/100
100/100 [==============================] - 18s 176ms/step - loss: 0.6013 - acc: 0.6747 - val_loss: 0.5833 - val_acc: 0.6767
Epoch 13/100
100/100 [==============================] - 17s 174ms/step - loss: 0.5892 - acc: 0.6883 - val_loss: 0.5646 - val_acc: 0.7059
Epoch 14/100
100/100 [==============================] - 18s 176ms/step - loss: 0.5842 - acc: 0.6840 - val_loss: 0.5730 - val_acc: 0.6949
Epoch 15/100
100/100 [==============================] - 17s 168ms/step - loss: 0.5721 - acc: 0.6923 - val_loss: 0.5623 - val_acc: 0.7200
Epoch 16/100
100/100 [==============================] - 17s 171ms/step - loss: 0.5678 - acc: 0.7017 - val_loss: 0.5557 - val_acc: 0.7090
Epoch 17/100
100/100 [==============================] - 17s 167ms/step - loss: 0.5738 - acc: 0.6913 - val_loss: 0.5445 - val_acc: 0.7291
Epoch 18/100
100/100 [==============================] - 17s 172ms/step - loss: 0.5626 - acc: 0.7003 - val_loss: 0.5563 - val_acc: 0.7039
Epoch 19/100
100/100 [==============================] - 18s 180ms/step - loss: 0.5592 - acc: 0.7067 - val_loss: 0.5347 - val_acc: 0.7311
Epoch 20/100
100/100 [==============================] - 18s 177ms/step - loss: 0.5570 - acc: 0.7053 - val_loss: 0.5297 - val_acc: 0.7281
Epoch 21/100
100/100 [==============================] - 18s 178ms/step - loss: 0.5591 - acc: 0.7114 - val_loss: 0.5534 - val_acc: 0.7170
Epoch 22/100
100/100 [==============================] - 18s 185ms/step - loss: 0.5487 - acc: 0.7110 - val_loss: 0.5627 - val_acc: 0.7331
Epoch 23/100
100/100 [==============================] - 18s 178ms/step - loss: 0.5370 - acc: 0.7307 - val_loss: 0.5016 - val_acc: 0.7613
Epoch 24/100
100/100 [==============================] - 18s 180ms/step - loss: 0.5458 - acc: 0.7103 - val_loss: 0.5289 - val_acc: 0.7341
Epoch 25/100
100/100 [==============================] - 18s 178ms/step - loss: 0.5311 - acc: 0.7273 - val_loss: 0.5153 - val_acc: 0.7472
Epoch 26/100
100/100 [==============================] - 18s 179ms/step - loss: 0.5365 - acc: 0.7277 - val_loss: 0.5237 - val_acc: 0.7392
Epoch 27/100
100/100 [==============================] - 18s 181ms/step - loss: 0.5315 - acc: 0.7357 - val_loss: 0.4984 - val_acc: 0.7513
Epoch 28/100
100/100 [==============================] - 17s 173ms/step - loss: 0.5200 - acc: 0.7370 - val_loss: 0.5322 - val_acc: 0.7301
Epoch 29/100
100/100 [==============================] - 18s 179ms/step - loss: 0.5137 - acc: 0.7493 - val_loss: 0.5275 - val_acc: 0.7321
Epoch 30/100
100/100 [==============================] - 17s 172ms/step - loss: 0.5140 - acc: 0.7407 - val_loss: 0.5193 - val_acc: 0.7563
Epoch 31/100
100/100 [==============================] - 17s 174ms/step - loss: 0.5278 - acc: 0.7377 - val_loss: 0.4939 - val_acc: 0.7583
Epoch 32/100
100/100 [==============================] - 18s 182ms/step - loss: 0.5080 - acc: 0.7420 - val_loss: 0.5204 - val_acc: 0.7452
Epoch 33/100
100/100 [==============================] - 18s 175ms/step - loss: 0.4832 - acc: 0.7633 - val_loss: 0.4919 - val_acc: 0.7543
Epoch 34/100
100/100 [==============================] - 17s 173ms/step - loss: 0.5161 - acc: 0.7417 - val_loss: 0.4981 - val_acc: 0.7553
Epoch 35/100
100/100 [==============================] - 18s 180ms/step - loss: 0.4996 - acc: 0.7547 - val_loss: 0.5086 - val_acc: 0.7613
Epoch 36/100
100/100 [==============================] - 17s 174ms/step - loss: 0.5128 - acc: 0.7467 - val_loss: 0.4941 - val_acc: 0.7654
Epoch 37/100
100/100 [==============================] - 18s 177ms/step - loss: 0.4828 - acc: 0.7657 - val_loss: 0.4676 - val_acc: 0.7754
Epoch 38/100
100/100 [==============================] - 17s 172ms/step - loss: 0.4890 - acc: 0.7633 - val_loss: 0.5727 - val_acc: 0.7503
Epoch 39/100
100/100 [==============================] - 18s 175ms/step - loss: 0.5085 - acc: 0.7490 - val_loss: 0.4880 - val_acc: 0.7664
Epoch 40/100
100/100 [==============================] - 18s 178ms/step - loss: 0.4924 - acc: 0.7576 - val_loss: 0.4923 - val_acc: 0.7825
Epoch 41/100
100/100 [==============================] - 17s 171ms/step - loss: 0.4982 - acc: 0.7584 - val_loss: 0.4768 - val_acc: 0.7654
Epoch 42/100
100/100 [==============================] - 17s 168ms/step - loss: 0.4935 - acc: 0.7557 - val_loss: 0.4820 - val_acc: 0.7714
Epoch 43/100
100/100 [==============================] - 17s 174ms/step - loss: 0.4796 - acc: 0.7680 - val_loss: 0.5440 - val_acc: 0.7513
Epoch 44/100
100/100 [==============================] - 17s 168ms/step - loss: 0.4809 - acc: 0.7610 - val_loss: 0.4793 - val_acc: 0.7774
Epoch 45/100
100/100 [==============================] - 17s 170ms/step - loss: 0.4855 - acc: 0.7643 - val_loss: 0.4844 - val_acc: 0.7654
Epoch 46/100
100/100 [==============================] - 17s 167ms/step - loss: 0.4816 - acc: 0.7620 - val_loss: 0.4696 - val_acc: 0.7795
Epoch 47/100
100/100 [==============================] - 18s 175ms/step - loss: 0.4725 - acc: 0.7780 - val_loss: 0.5151 - val_acc: 0.7482
Epoch 48/100
100/100 [==============================] - 17s 171ms/step - loss: 0.4729 - acc: 0.7700 - val_loss: 0.4790 - val_acc: 0.7875
Epoch 49/100
100/100 [==============================] - 17s 166ms/step - loss: 0.4676 - acc: 0.7727 - val_loss: 0.5276 - val_acc: 0.7462
Epoch 50/100
100/100 [==============================] - 17s 169ms/step - loss: 0.4595 - acc: 0.7857 - val_loss: 0.4976 - val_acc: 0.7633
Epoch 51/100
100/100 [==============================] - 17s 174ms/step - loss: 0.4613 - acc: 0.7797 - val_loss: 0.5057 - val_acc: 0.7664
Epoch 52/100
100/100 [==============================] - 17s 168ms/step - loss: 0.4600 - acc: 0.7737 - val_loss: 0.4777 - val_acc: 0.7895
Epoch 53/100
100/100 [==============================] - 17s 171ms/step - loss: 0.4610 - acc: 0.7810 - val_loss: 0.4700 - val_acc: 0.7825
Epoch 54/100
100/100 [==============================] - 17s 168ms/step - loss: 0.4667 - acc: 0.7807 - val_loss: 0.4948 - val_acc: 0.7513
Epoch 55/100
100/100 [==============================] - 17s 170ms/step - loss: 0.4599 - acc: 0.7833 - val_loss: 0.4625 - val_acc: 0.8026
Epoch 56/100
100/100 [==============================] - 17s 173ms/step - loss: 0.4520 - acc: 0.7823 - val_loss: 0.4653 - val_acc: 0.7845
Epoch 57/100
100/100 [==============================] - 17s 169ms/step - loss: 0.4549 - acc: 0.7790 - val_loss: 0.4282 - val_acc: 0.8107
Epoch 58/100
100/100 [==============================] - 17s 171ms/step - loss: 0.4467 - acc: 0.7860 - val_loss: 0.4469 - val_acc: 0.7855
Epoch 59/100
100/100 [==============================] - 18s 181ms/step - loss: 0.4495 - acc: 0.7903 - val_loss: 0.4630 - val_acc: 0.7996
Epoch 60/100
100/100 [==============================] - 17s 175ms/step - loss: 0.4641 - acc: 0.7804 - val_loss: 0.4453 - val_acc: 0.7966
Epoch 61/100
100/100 [==============================] - 18s 176ms/step - loss: 0.4356 - acc: 0.7927 - val_loss: 0.4599 - val_acc: 0.8016
Epoch 62/100
100/100 [==============================] - 17s 174ms/step - loss: 0.4552 - acc: 0.7890 - val_loss: 0.4813 - val_acc: 0.7915
Epoch 63/100
100/100 [==============================] - 17s 175ms/step - loss: 0.4442 - acc: 0.7886 - val_loss: 0.4543 - val_acc: 0.7966
Epoch 64/100
100/100 [==============================] - 18s 179ms/step - loss: 0.4464 - acc: 0.7860 - val_loss: 0.5102 - val_acc: 0.7603
Epoch 65/100
100/100 [==============================] - 18s 177ms/step - loss: 0.4315 - acc: 0.8037 - val_loss: 0.4873 - val_acc: 0.7845
Epoch 66/100
100/100 [==============================] - 18s 176ms/step - loss: 0.4402 - acc: 0.7937 - val_loss: 0.4894 - val_acc: 0.7855
Epoch 67/100
100/100 [==============================] - 18s 181ms/step - loss: 0.4387 - acc: 0.7943 - val_loss: 0.4418 - val_acc: 0.8036
Epoch 68/100
100/100 [==============================] - 18s 176ms/step - loss: 0.4264 - acc: 0.7984 - val_loss: 0.6016 - val_acc: 0.7432
Epoch 69/100
100/100 [==============================] - 18s 178ms/step - loss: 0.4313 - acc: 0.8037 - val_loss: 0.4373 - val_acc: 0.8127
Epoch 70/100
100/100 [==============================] - 18s 175ms/step - loss: 0.4128 - acc: 0.8140 - val_loss: 0.4353 - val_acc: 0.8066
Epoch 71/100
100/100 [==============================] - 18s 177ms/step - loss: 0.4292 - acc: 0.7963 - val_loss: 0.4196 - val_acc: 0.8298
Epoch 72/100
100/100 [==============================] - 18s 181ms/step - loss: 0.4369 - acc: 0.7957 - val_loss: 0.4697 - val_acc: 0.7724
Epoch 73/100
100/100 [==============================] - 18s 175ms/step - loss: 0.4162 - acc: 0.8040 - val_loss: 0.4838 - val_acc: 0.8056
Epoch 74/100
100/100 [==============================] - 18s 178ms/step - loss: 0.4433 - acc: 0.7927 - val_loss: 0.4586 - val_acc: 0.7875
Epoch 75/100
100/100 [==============================] - 17s 174ms/step - loss: 0.4180 - acc: 0.8160 - val_loss: 0.4418 - val_acc: 0.8167
Epoch 76/100
100/100 [==============================] - 17s 173ms/step - loss: 0.4108 - acc: 0.8203 - val_loss: 0.4585 - val_acc: 0.8077
Epoch 77/100
100/100 [==============================] - 17s 172ms/step - loss: 0.4132 - acc: 0.8023 - val_loss: 0.5029 - val_acc: 0.7835
Epoch 78/100
100/100 [==============================] - 17s 169ms/step - loss: 0.4155 - acc: 0.8147 - val_loss: 0.4134 - val_acc: 0.8187
Epoch 79/100
100/100 [==============================] - 17s 168ms/step - loss: 0.3923 - acc: 0.8337 - val_loss: 0.4650 - val_acc: 0.8077
Epoch 80/100
100/100 [==============================] - 17s 173ms/step - loss: 0.4183 - acc: 0.8010 - val_loss: 0.4633 - val_acc: 0.8157
Epoch 81/100
100/100 [==============================] - 17s 166ms/step - loss: 0.4097 - acc: 0.8077 - val_loss: 0.3891 - val_acc: 0.8359
Epoch 82/100
100/100 [==============================] - 17s 172ms/step - loss: 0.3982 - acc: 0.8167 - val_loss: 0.4860 - val_acc: 0.8006
Epoch 83/100
100/100 [==============================] - 17s 170ms/step - loss: 0.3938 - acc: 0.8213 - val_loss: 0.4282 - val_acc: 0.8278
Epoch 84/100
100/100 [==============================] - 17s 167ms/step - loss: 0.4204 - acc: 0.8123 - val_loss: 0.4275 - val_acc: 0.8218
Epoch 85/100
100/100 [==============================] - 17s 172ms/step - loss: 0.4058 - acc: 0.8180 - val_loss: 0.4317 - val_acc: 0.8097
Epoch 86/100
100/100 [==============================] - 17s 168ms/step - loss: 0.4059 - acc: 0.8140 - val_loss: 0.3900 - val_acc: 0.8379
Epoch 87/100
100/100 [==============================] - 17s 169ms/step - loss: 0.4025 - acc: 0.8190 - val_loss: 0.4313 - val_acc: 0.8167
Epoch 88/100
100/100 [==============================] - 17s 173ms/step - loss: 0.4202 - acc: 0.8000 - val_loss: 0.4001 - val_acc: 0.8288
Epoch 89/100
100/100 [==============================] - 17s 168ms/step - loss: 0.3995 - acc: 0.8147 - val_loss: 0.3921 - val_acc: 0.8429
Epoch 90/100
100/100 [==============================] - 17s 166ms/step - loss: 0.4037 - acc: 0.8220 - val_loss: 0.4734 - val_acc: 0.8157
Epoch 91/100
100/100 [==============================] - 17s 173ms/step - loss: 0.3812 - acc: 0.8213 - val_loss: 0.4083 - val_acc: 0.8298
Epoch 92/100
100/100 [==============================] - 17s 167ms/step - loss: 0.3973 - acc: 0.8217 - val_loss: 0.4022 - val_acc: 0.8308
Epoch 93/100
100/100 [==============================] - 17s 170ms/step - loss: 0.3892 - acc: 0.8257 - val_loss: 0.4414 - val_acc: 0.8228
Epoch 94/100
100/100 [==============================] - 17s 170ms/step - loss: 0.3926 - acc: 0.8203 - val_loss: 0.4197 - val_acc: 0.8308
Epoch 95/100
100/100 [==============================] - 17s 174ms/step - loss: 0.3957 - acc: 0.8320 - val_loss: 0.4067 - val_acc: 0.8258
Epoch 96/100
100/100 [==============================] - 18s 179ms/step - loss: 0.3876 - acc: 0.8263 - val_loss: 0.6104 - val_acc: 0.7875
Epoch 97/100
100/100 [==============================] - 18s 177ms/step - loss: 0.3987 - acc: 0.8170 - val_loss: 0.4011 - val_acc: 0.8499
Epoch 98/100
100/100 [==============================] - 18s 176ms/step - loss: 0.3852 - acc: 0.8263 - val_loss: 0.4080 - val_acc: 0.8177
Epoch 99/100
100/100 [==============================] - 18s 180ms/step - loss: 0.3923 - acc: 0.8260 - val_loss: 0.4669 - val_acc: 0.8207
Epoch 100/100
100/100 [==============================] - 18s 177ms/step - loss: 0.3814 - acc: 0.8283 - val_loss: 0.4099 - val_acc: 0.8359
model.save('cats_dogs_cnn_img_aug.h5')
from keras.applications import vgg16
from keras.models import Model
import keras
vgg = vgg16.VGG16(include_top=False, weights='imagenet',
input_shape=input_shape)
output = vgg.layers[-1].output
output = keras.layers.Flatten()(output)
vgg_model = Model(vgg.input, output)
vgg_model.trainable = False
for layer in vgg_model.layers:
layer.trainable = False
import pandas as pd
pd.set_option('max_colwidth', -1)
layers = [(layer, layer.name, layer.trainable) for layer in vgg_model.layers]
pd.DataFrame(layers, columns=['Layer Type', 'Layer Name', 'Layer Trainable'])
Downloading data from https://github.com/fchollet/deep-learning-models/releases/download/v0.1/vgg16_weights_tf_dim_ordering_tf_kernels_notop.h5
58892288/58889256 [==============================] - 2s 0us/step
Layer Type | Layer Name | Layer Trainable | |
---|---|---|---|
0 | <keras.engine.input_layer.InputLayer object at 0x7f3a8df32f60> | input_1 | False |
1 | <keras.layers.convolutional.Conv2D object at 0x7f3a8dde2780> | block1_conv1 | False |
2 | <keras.layers.convolutional.Conv2D object at 0x7f3a8df32278> | block1_conv2 | False |
3 | <keras.layers.pooling.MaxPooling2D object at 0x7f3aa0092b00> | block1_pool | False |
4 | <keras.layers.convolutional.Conv2D object at 0x7f3a8ddce6d8> | block2_conv1 | False |
5 | <keras.layers.convolutional.Conv2D object at 0x7f3a88fdecf8> | block2_conv2 | False |
6 | <keras.layers.pooling.MaxPooling2D object at 0x7f3a8d02b630> | block2_pool | False |
7 | <keras.layers.convolutional.Conv2D object at 0x7f3a8d02b518> | block3_conv1 | False |
8 | <keras.layers.convolutional.Conv2D object at 0x7f3a8cfdc2b0> | block3_conv2 | False |
9 | <keras.layers.convolutional.Conv2D object at 0x7f3a8cfee908> | block3_conv3 | False |
10 | <keras.layers.pooling.MaxPooling2D object at 0x7f3a8cf8ad30> | block3_pool | False |
11 | <keras.layers.convolutional.Conv2D object at 0x7f3a8cf8ac88> | block4_conv1 | False |
12 | <keras.layers.convolutional.Conv2D object at 0x7f3a8cfb7ba8> | block4_conv2 | False |
13 | <keras.layers.convolutional.Conv2D object at 0x7f3a8cf62240> | block4_conv3 | False |
14 | <keras.layers.pooling.MaxPooling2D object at 0x7f3a8cf7cbe0> | block4_pool | False |
15 | <keras.layers.convolutional.Conv2D object at 0x7f3a8cf7c6a0> | block5_conv1 | False |
16 | <keras.layers.convolutional.Conv2D object at 0x7f3a8cf2a400> | block5_conv2 | False |
17 | <keras.layers.convolutional.Conv2D object at 0x7f3a8cebfa58> | block5_conv3 | False |
18 | <keras.layers.pooling.MaxPooling2D object at 0x7f3a8ceed6d8> | block5_pool | False |
19 | <keras.layers.core.Flatten object at 0x7f3a8ce4cd68> | flatten_4 | False |
bottleneck_feature_example = vgg.predict(train_imgs_scaled[0:1])
print(bottleneck_feature_example.shape)
plt.imshow(bottleneck_feature_example[0][:,:,0])
(1, 4, 4, 512)
<matplotlib.image.AxesImage at 0x7f3a8cd934a8>
def get_bottleneck_features(model, input_imgs):
features = model.predict(input_imgs, verbose=0)
return features
train_features_vgg = get_bottleneck_features(vgg_model, train_imgs_scaled)
validation_features_vgg = get_bottleneck_features(vgg_model, validation_imgs_scaled)
print('Train Bottleneck Features:', train_features_vgg.shape,
'\tValidation Bottleneck Features:', validation_features_vgg.shape)
Train Bottleneck Features: (2916, 8192) Validation Bottleneck Features: (993, 8192)
from keras.layers import Conv2D, MaxPooling2D, Flatten, Dense, Dropout, InputLayer
from keras.models import Sequential
from keras import optimizers
input_shape = vgg_model.output_shape[1]
model = Sequential()
model.add(InputLayer(input_shape=(input_shape,)))
model.add(Dense(512, activation='relu', input_dim=input_shape))
model.add(Dropout(0.3))
model.add(Dense(512, activation='relu'))
model.add(Dropout(0.3))
model.add(Dense(1, activation='sigmoid'))
model.compile(loss='binary_crossentropy',
optimizer=optimizers.RMSprop(lr=1e-4),
metrics=['accuracy'])
model.summary()
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
dense_9 (Dense) (None, 512) 4194816
_________________________________________________________________
dropout_5 (Dropout) (None, 512) 0
_________________________________________________________________
dense_10 (Dense) (None, 512) 262656
_________________________________________________________________
dropout_6 (Dropout) (None, 512) 0
_________________________________________________________________
dense_11 (Dense) (None, 1) 513
=================================================================
Total params: 4,457,985
Trainable params: 4,457,985
Non-trainable params: 0
_________________________________________________________________
history = model.fit(x=train_features_vgg, y=train_labels_enc,
validation_data=(validation_features_vgg, validation_labels_enc),
batch_size=batch_size,
epochs=epochs,
verbose=1)
Train on 2916 samples, validate on 993 samples
Epoch 1/30
2916/2916 [==============================] - 2s 539us/step - loss: 0.4275 - acc: 0.7966 - val_loss: 0.3040 - val_acc: 0.8550
Epoch 2/30
2916/2916 [==============================] - 1s 316us/step - loss: 0.2872 - acc: 0.8759 - val_loss: 0.2484 - val_acc: 0.8902
Epoch 3/30
2916/2916 [==============================] - 1s 308us/step - loss: 0.2483 - acc: 0.8930 - val_loss: 0.2259 - val_acc: 0.9013
Epoch 4/30
2916/2916 [==============================] - 1s 308us/step - loss: 0.2095 - acc: 0.9122 - val_loss: 0.2150 - val_acc: 0.9114
Epoch 5/30
2916/2916 [==============================] - 1s 312us/step - loss: 0.1696 - acc: 0.9270 - val_loss: 0.2263 - val_acc: 0.9094
Epoch 6/30
2916/2916 [==============================] - 1s 308us/step - loss: 0.1481 - acc: 0.9400 - val_loss: 0.2607 - val_acc: 0.8983
Epoch 7/30
2916/2916 [==============================] - 1s 308us/step - loss: 0.1180 - acc: 0.9540 - val_loss: 0.2187 - val_acc: 0.9164
Epoch 8/30
2916/2916 [==============================] - 1s 308us/step - loss: 0.0959 - acc: 0.9612 - val_loss: 0.3365 - val_acc: 0.8802
Epoch 9/30
2916/2916 [==============================] - 1s 305us/step - loss: 0.0786 - acc: 0.9691 - val_loss: 0.2733 - val_acc: 0.9094
Epoch 10/30
2916/2916 [==============================] - 1s 309us/step - loss: 0.0623 - acc: 0.9753 - val_loss: 0.5602 - val_acc: 0.8580
Epoch 11/30
2916/2916 [==============================] - 1s 310us/step - loss: 0.0491 - acc: 0.9829 - val_loss: 0.3511 - val_acc: 0.8963
Epoch 12/30
2916/2916 [==============================] - 1s 308us/step - loss: 0.0375 - acc: 0.9842 - val_loss: 0.3783 - val_acc: 0.8973
Epoch 13/30
2916/2916 [==============================] - 1s 312us/step - loss: 0.0326 - acc: 0.9890 - val_loss: 0.3156 - val_acc: 0.9114
Epoch 14/30
2916/2916 [==============================] - 1s 308us/step - loss: 0.0244 - acc: 0.9931 - val_loss: 0.4533 - val_acc: 0.8953
Epoch 15/30
2916/2916 [==============================] - 1s 310us/step - loss: 0.0292 - acc: 0.9914 - val_loss: 0.3982 - val_acc: 0.9074
Epoch 16/30
2916/2916 [==============================] - 1s 308us/step - loss: 0.0188 - acc: 0.9945 - val_loss: 0.4182 - val_acc: 0.9074
Epoch 17/30
2916/2916 [==============================] - 1s 306us/step - loss: 0.0182 - acc: 0.9914 - val_loss: 0.4084 - val_acc: 0.9114
Epoch 18/30
2916/2916 [==============================] - 1s 307us/step - loss: 0.0104 - acc: 0.9966 - val_loss: 1.6013 - val_acc: 0.7946
Epoch 19/30
2916/2916 [==============================] - 1s 310us/step - loss: 0.0120 - acc: 0.9973 - val_loss: 0.4396 - val_acc: 0.9053
Epoch 20/30
2916/2916 [==============================] - 1s 308us/step - loss: 0.0176 - acc: 0.9949 - val_loss: 0.4491 - val_acc: 0.9114
Epoch 21/30
2916/2916 [==============================] - 1s 310us/step - loss: 0.0059 - acc: 0.9986 - val_loss: 0.4922 - val_acc: 0.9144
Epoch 22/30
2916/2916 [==============================] - 1s 310us/step - loss: 0.0172 - acc: 0.9955 - val_loss: 0.4816 - val_acc: 0.9154
Epoch 23/30
2916/2916 [==============================] - 1s 311us/step - loss: 0.0053 - acc: 0.9976 - val_loss: 0.4900 - val_acc: 0.9194
Epoch 24/30
2916/2916 [==============================] - 1s 305us/step - loss: 0.0045 - acc: 0.9986 - val_loss: 0.5613 - val_acc: 0.9053
Epoch 25/30
2916/2916 [==============================] - 1s 305us/step - loss: 0.0148 - acc: 0.9959 - val_loss: 0.5541 - val_acc: 0.9104
Epoch 26/30
2916/2916 [==============================] - 1s 309us/step - loss: 0.0070 - acc: 0.9983 - val_loss: 0.5563 - val_acc: 0.9104
Epoch 27/30
2916/2916 [==============================] - 1s 307us/step - loss: 0.0029 - acc: 0.9993 - val_loss: 0.5653 - val_acc: 0.9063
Epoch 28/30
2916/2916 [==============================] - 1s 310us/step - loss: 0.0032 - acc: 0.9990 - val_loss: 0.6226 - val_acc: 0.9104
Epoch 29/30
2916/2916 [==============================] - 1s 309us/step - loss: 0.0011 - acc: 0.9997 - val_loss: 0.6483 - val_acc: 0.9084
Epoch 30/30
2916/2916 [==============================] - 1s 309us/step - loss: 0.0017 - acc: 0.9990 - val_loss: 0.6809 - val_acc: 0.9124
model.save('cats_dogs_tlearn_basic_cnn.h5')
train_datagen = ImageDataGenerator(rescale=1./255, zoom_range=0.3, rotation_range=50,
width_shift_range=0.2, height_shift_range=0.2, shear_range=0.2,
horizontal_flip=True, fill_mode='nearest')
val_datagen = ImageDataGenerator(rescale=1./255)
train_generator = train_datagen.flow(train_imgs, train_labels_enc, batch_size=30)
val_generator = val_datagen.flow(validation_imgs, validation_labels_enc, batch_size=20)
from keras.layers import Conv2D, MaxPooling2D, Flatten, Dense, Dropout, InputLayer
from keras.models import Sequential
from keras import optimizers
model = Sequential()
model.add(vgg_model)
model.add(Dense(512, activation='relu', input_dim=input_shape))
model.add(Dropout(0.3))
model.add(Dense(512, activation='relu'))
model.add(Dropout(0.3))
model.add(Dense(1, activation='sigmoid'))
model.compile(loss='binary_crossentropy',
optimizer=optimizers.RMSprop(lr=2e-5),
metrics=['accuracy'])
history = model.fit_generator(train_generator, steps_per_epoch=100, epochs=100,
validation_data=val_generator, validation_steps=50,
verbose=1)
Epoch 1/100
100/100 [==============================] - 30s 300ms/step - loss: 0.6549 - acc: 0.6000 - val_loss: 0.4954 - val_acc: 0.8207
Epoch 2/100
100/100 [==============================] - 25s 255ms/step - loss: 0.5680 - acc: 0.7017 - val_loss: 0.3932 - val_acc: 0.8560
Epoch 3/100
100/100 [==============================] - 25s 252ms/step - loss: 0.5345 - acc: 0.7200 - val_loss: 0.3489 - val_acc: 0.8681
Epoch 4/100
100/100 [==============================] - 26s 258ms/step - loss: 0.4851 - acc: 0.7600 - val_loss: 0.3066 - val_acc: 0.8852
Epoch 5/100
100/100 [==============================] - 26s 261ms/step - loss: 0.4564 - acc: 0.7810 - val_loss: 0.2904 - val_acc: 0.8862
Epoch 6/100
100/100 [==============================] - 27s 267ms/step - loss: 0.4411 - acc: 0.7840 - val_loss: 0.2830 - val_acc: 0.8812
Epoch 7/100
100/100 [==============================] - 26s 261ms/step - loss: 0.4128 - acc: 0.8047 - val_loss: 0.2633 - val_acc: 0.8973
Epoch 8/100
100/100 [==============================] - 26s 258ms/step - loss: 0.4227 - acc: 0.8057 - val_loss: 0.2555 - val_acc: 0.8963
Epoch 9/100
100/100 [==============================] - 26s 260ms/step - loss: 0.3949 - acc: 0.8103 - val_loss: 0.2491 - val_acc: 0.8943
Epoch 10/100
100/100 [==============================] - 26s 260ms/step - loss: 0.3971 - acc: 0.8170 - val_loss: 0.2798 - val_acc: 0.8771
Epoch 11/100
100/100 [==============================] - 27s 266ms/step - loss: 0.4083 - acc: 0.8067 - val_loss: 0.2431 - val_acc: 0.9003
Epoch 12/100
100/100 [==============================] - 26s 260ms/step - loss: 0.3866 - acc: 0.8230 - val_loss: 0.2569 - val_acc: 0.8993
Epoch 13/100
100/100 [==============================] - 26s 259ms/step - loss: 0.3893 - acc: 0.8144 - val_loss: 0.2323 - val_acc: 0.9074
Epoch 14/100
100/100 [==============================] - 26s 259ms/step - loss: 0.3871 - acc: 0.8180 - val_loss: 0.2339 - val_acc: 0.9043
Epoch 15/100
100/100 [==============================] - 26s 260ms/step - loss: 0.3876 - acc: 0.8253 - val_loss: 0.2414 - val_acc: 0.8953
Epoch 16/100
100/100 [==============================] - 26s 260ms/step - loss: 0.3751 - acc: 0.8287 - val_loss: 0.2248 - val_acc: 0.9124
Epoch 17/100
100/100 [==============================] - 26s 256ms/step - loss: 0.3623 - acc: 0.8270 - val_loss: 0.2225 - val_acc: 0.9094
Epoch 18/100
100/100 [==============================] - 26s 263ms/step - loss: 0.3671 - acc: 0.8270 - val_loss: 0.2214 - val_acc: 0.9104
Epoch 19/100
100/100 [==============================] - 26s 259ms/step - loss: 0.3612 - acc: 0.8437 - val_loss: 0.2396 - val_acc: 0.9023
Epoch 20/100
100/100 [==============================] - 26s 259ms/step - loss: 0.3651 - acc: 0.8287 - val_loss: 0.2275 - val_acc: 0.9114
Epoch 21/100
100/100 [==============================] - 26s 259ms/step - loss: 0.3798 - acc: 0.8230 - val_loss: 0.2251 - val_acc: 0.9114
Epoch 22/100
100/100 [==============================] - 26s 257ms/step - loss: 0.3355 - acc: 0.8463 - val_loss: 0.2133 - val_acc: 0.9124
Epoch 23/100
100/100 [==============================] - 26s 265ms/step - loss: 0.3667 - acc: 0.8277 - val_loss: 0.2297 - val_acc: 0.9053
Epoch 24/100
100/100 [==============================] - 26s 256ms/step - loss: 0.3449 - acc: 0.8420 - val_loss: 0.2100 - val_acc: 0.9174
Epoch 25/100
100/100 [==============================] - 26s 256ms/step - loss: 0.3506 - acc: 0.8433 - val_loss: 0.2096 - val_acc: 0.9184
Epoch 26/100
100/100 [==============================] - 26s 259ms/step - loss: 0.3460 - acc: 0.8490 - val_loss: 0.2090 - val_acc: 0.9184
Epoch 27/100
100/100 [==============================] - 26s 263ms/step - loss: 0.3458 - acc: 0.8407 - val_loss: 0.2093 - val_acc: 0.9215
Epoch 28/100
100/100 [==============================] - 26s 264ms/step - loss: 0.3396 - acc: 0.8470 - val_loss: 0.2275 - val_acc: 0.9084
Epoch 29/100
100/100 [==============================] - 26s 263ms/step - loss: 0.3524 - acc: 0.8483 - val_loss: 0.2070 - val_acc: 0.9194
Epoch 30/100
100/100 [==============================] - 26s 259ms/step - loss: 0.3411 - acc: 0.8410 - val_loss: 0.2055 - val_acc: 0.9225
Epoch 31/100
100/100 [==============================] - 26s 260ms/step - loss: 0.3437 - acc: 0.8500 - val_loss: 0.2079 - val_acc: 0.9265
Epoch 32/100
100/100 [==============================] - 26s 258ms/step - loss: 0.3298 - acc: 0.8543 - val_loss: 0.2134 - val_acc: 0.9174
Epoch 33/100
100/100 [==============================] - 26s 259ms/step - loss: 0.3462 - acc: 0.8447 - val_loss: 0.2256 - val_acc: 0.9134
Epoch 34/100
100/100 [==============================] - 26s 258ms/step - loss: 0.3183 - acc: 0.8597 - val_loss: 0.2074 - val_acc: 0.9215
Epoch 35/100
100/100 [==============================] - 26s 264ms/step - loss: 0.3352 - acc: 0.8500 - val_loss: 0.2148 - val_acc: 0.9204
Epoch 36/100
100/100 [==============================] - 26s 259ms/step - loss: 0.3195 - acc: 0.8543 - val_loss: 0.2033 - val_acc: 0.9265
Epoch 37/100
100/100 [==============================] - 26s 259ms/step - loss: 0.3314 - acc: 0.8510 - val_loss: 0.2046 - val_acc: 0.9194
Epoch 38/100
100/100 [==============================] - 26s 259ms/step - loss: 0.3325 - acc: 0.8497 - val_loss: 0.2160 - val_acc: 0.9174
Epoch 39/100
100/100 [==============================] - 26s 260ms/step - loss: 0.3430 - acc: 0.8470 - val_loss: 0.2257 - val_acc: 0.9074
Epoch 40/100
100/100 [==============================] - 26s 258ms/step - loss: 0.3294 - acc: 0.8503 - val_loss: 0.2195 - val_acc: 0.9144
Epoch 41/100
100/100 [==============================] - 26s 262ms/step - loss: 0.3359 - acc: 0.8473 - val_loss: 0.2095 - val_acc: 0.9235
Epoch 42/100
100/100 [==============================] - 26s 260ms/step - loss: 0.3335 - acc: 0.8480 - val_loss: 0.2087 - val_acc: 0.9235
Epoch 43/100
100/100 [==============================] - 26s 259ms/step - loss: 0.3197 - acc: 0.8600 - val_loss: 0.2078 - val_acc: 0.9215
Epoch 44/100
100/100 [==============================] - 26s 260ms/step - loss: 0.3117 - acc: 0.8530 - val_loss: 0.2193 - val_acc: 0.9164
Epoch 45/100
100/100 [==============================] - 26s 260ms/step - loss: 0.3115 - acc: 0.8590 - val_loss: 0.2028 - val_acc: 0.9235
Epoch 46/100
100/100 [==============================] - 26s 258ms/step - loss: 0.3356 - acc: 0.8507 - val_loss: 0.2074 - val_acc: 0.9225
Epoch 47/100
100/100 [==============================] - 26s 265ms/step - loss: 0.3286 - acc: 0.8503 - val_loss: 0.1995 - val_acc: 0.9245
Epoch 48/100
100/100 [==============================] - 26s 259ms/step - loss: 0.3124 - acc: 0.8600 - val_loss: 0.2280 - val_acc: 0.9063
Epoch 49/100
100/100 [==============================] - 26s 256ms/step - loss: 0.3026 - acc: 0.8693 - val_loss: 0.2054 - val_acc: 0.9265
Epoch 50/100
100/100 [==============================] - 26s 256ms/step - loss: 0.3224 - acc: 0.8553 - val_loss: 0.2164 - val_acc: 0.9164
Epoch 51/100
100/100 [==============================] - 26s 259ms/step - loss: 0.3241 - acc: 0.8553 - val_loss: 0.1999 - val_acc: 0.9285
Epoch 52/100
100/100 [==============================] - 26s 258ms/step - loss: 0.3202 - acc: 0.8597 - val_loss: 0.2101 - val_acc: 0.9245
Epoch 53/100
100/100 [==============================] - 26s 262ms/step - loss: 0.3250 - acc: 0.8563 - val_loss: 0.2292 - val_acc: 0.9084
Epoch 54/100
100/100 [==============================] - 26s 258ms/step - loss: 0.3142 - acc: 0.8573 - val_loss: 0.2005 - val_acc: 0.9285
Epoch 55/100
100/100 [==============================] - 26s 258ms/step - loss: 0.3120 - acc: 0.8567 - val_loss: 0.2027 - val_acc: 0.9275
Epoch 56/100
100/100 [==============================] - 26s 257ms/step - loss: 0.3416 - acc: 0.8530 - val_loss: 0.2124 - val_acc: 0.9174
Epoch 57/100
100/100 [==============================] - 26s 259ms/step - loss: 0.3094 - acc: 0.8600 - val_loss: 0.2025 - val_acc: 0.9285
Epoch 58/100
100/100 [==============================] - 26s 259ms/step - loss: 0.3000 - acc: 0.8667 - val_loss: 0.2128 - val_acc: 0.9225
Epoch 59/100
100/100 [==============================] - 26s 262ms/step - loss: 0.3085 - acc: 0.8663 - val_loss: 0.2109 - val_acc: 0.9194
Epoch 60/100
100/100 [==============================] - 25s 250ms/step - loss: 0.3012 - acc: 0.8710 - val_loss: 0.2086 - val_acc: 0.9265
Epoch 61/100
100/100 [==============================] - 25s 249ms/step - loss: 0.3066 - acc: 0.8583 - val_loss: 0.2112 - val_acc: 0.9194
Epoch 62/100
100/100 [==============================] - 25s 249ms/step - loss: 0.3063 - acc: 0.8627 - val_loss: 0.2072 - val_acc: 0.9255
Epoch 63/100
100/100 [==============================] - 25s 250ms/step - loss: 0.3196 - acc: 0.8630 - val_loss: 0.2164 - val_acc: 0.9174
Epoch 64/100
100/100 [==============================] - 25s 252ms/step - loss: 0.2919 - acc: 0.8760 - val_loss: 0.2025 - val_acc: 0.9265
Epoch 65/100
100/100 [==============================] - 25s 254ms/step - loss: 0.3087 - acc: 0.8677 - val_loss: 0.2104 - val_acc: 0.9204
Epoch 66/100
100/100 [==============================] - 25s 248ms/step - loss: 0.2962 - acc: 0.8773 - val_loss: 0.2094 - val_acc: 0.9295
Epoch 67/100
100/100 [==============================] - 25s 248ms/step - loss: 0.3073 - acc: 0.8627 - val_loss: 0.2082 - val_acc: 0.9265
Epoch 68/100
100/100 [==============================] - 25s 249ms/step - loss: 0.3032 - acc: 0.8650 - val_loss: 0.2000 - val_acc: 0.9305
Epoch 69/100
100/100 [==============================] - 25s 248ms/step - loss: 0.2962 - acc: 0.8683 - val_loss: 0.2018 - val_acc: 0.9285
Epoch 70/100
100/100 [==============================] - 25s 250ms/step - loss: 0.3092 - acc: 0.8597 - val_loss: 0.2018 - val_acc: 0.9315
Epoch 71/100
100/100 [==============================] - 26s 257ms/step - loss: 0.2826 - acc: 0.8740 - val_loss: 0.2003 - val_acc: 0.9285
Epoch 72/100
100/100 [==============================] - 25s 250ms/step - loss: 0.2883 - acc: 0.8757 - val_loss: 0.2037 - val_acc: 0.9335
Epoch 73/100
100/100 [==============================] - 25s 248ms/step - loss: 0.3179 - acc: 0.8550 - val_loss: 0.2168 - val_acc: 0.9184
Epoch 74/100
100/100 [==============================] - 25s 253ms/step - loss: 0.3176 - acc: 0.8597 - val_loss: 0.2014 - val_acc: 0.9295
Epoch 75/100
100/100 [==============================] - 26s 256ms/step - loss: 0.2895 - acc: 0.8713 - val_loss: 0.2221 - val_acc: 0.9184
Epoch 76/100
100/100 [==============================] - 25s 254ms/step - loss: 0.2851 - acc: 0.8770 - val_loss: 0.2016 - val_acc: 0.9255
Epoch 77/100
100/100 [==============================] - 26s 259ms/step - loss: 0.2956 - acc: 0.8700 - val_loss: 0.2058 - val_acc: 0.9245
Epoch 78/100
100/100 [==============================] - 25s 255ms/step - loss: 0.2926 - acc: 0.8723 - val_loss: 0.2049 - val_acc: 0.9275
Epoch 79/100
100/100 [==============================] - 26s 256ms/step - loss: 0.2975 - acc: 0.8657 - val_loss: 0.2058 - val_acc: 0.9275
Epoch 80/100
100/100 [==============================] - 25s 255ms/step - loss: 0.2960 - acc: 0.8720 - val_loss: 0.2138 - val_acc: 0.9265
Epoch 81/100
100/100 [==============================] - 25s 255ms/step - loss: 0.2955 - acc: 0.8767 - val_loss: 0.2098 - val_acc: 0.9265
Epoch 82/100
100/100 [==============================] - 26s 255ms/step - loss: 0.2866 - acc: 0.8707 - val_loss: 0.2202 - val_acc: 0.9174
Epoch 83/100
100/100 [==============================] - 26s 259ms/step - loss: 0.2773 - acc: 0.8817 - val_loss: 0.2091 - val_acc: 0.9265
Epoch 84/100
100/100 [==============================] - 26s 256ms/step - loss: 0.2917 - acc: 0.8727 - val_loss: 0.2002 - val_acc: 0.9245
Epoch 85/100
100/100 [==============================] - 26s 256ms/step - loss: 0.2936 - acc: 0.8677 - val_loss: 0.2106 - val_acc: 0.9265
Epoch 86/100
100/100 [==============================] - 26s 255ms/step - loss: 0.3082 - acc: 0.8617 - val_loss: 0.1996 - val_acc: 0.9275
Epoch 87/100
100/100 [==============================] - 26s 256ms/step - loss: 0.2828 - acc: 0.8743 - val_loss: 0.2031 - val_acc: 0.9235
Epoch 88/100
100/100 [==============================] - 26s 258ms/step - loss: 0.2799 - acc: 0.8773 - val_loss: 0.1999 - val_acc: 0.9325
Epoch 89/100
100/100 [==============================] - 26s 262ms/step - loss: 0.2803 - acc: 0.8773 - val_loss: 0.2098 - val_acc: 0.9285
Epoch 90/100
100/100 [==============================] - 26s 257ms/step - loss: 0.3028 - acc: 0.8687 - val_loss: 0.2029 - val_acc: 0.9215
Epoch 91/100
100/100 [==============================] - 26s 256ms/step - loss: 0.2837 - acc: 0.8857 - val_loss: 0.2022 - val_acc: 0.9235
Epoch 92/100
100/100 [==============================] - 26s 257ms/step - loss: 0.2839 - acc: 0.8753 - val_loss: 0.2082 - val_acc: 0.9345
Epoch 93/100
100/100 [==============================] - 26s 257ms/step - loss: 0.2715 - acc: 0.8830 - val_loss: 0.2050 - val_acc: 0.9255
Epoch 94/100
100/100 [==============================] - 26s 257ms/step - loss: 0.2956 - acc: 0.8723 - val_loss: 0.2302 - val_acc: 0.9104
Epoch 95/100
100/100 [==============================] - 26s 259ms/step - loss: 0.2745 - acc: 0.8803 - val_loss: 0.2098 - val_acc: 0.9275
Epoch 96/100
100/100 [==============================] - 26s 261ms/step - loss: 0.2764 - acc: 0.8764 - val_loss: 0.2124 - val_acc: 0.9265
Epoch 97/100
100/100 [==============================] - 26s 257ms/step - loss: 0.2828 - acc: 0.8790 - val_loss: 0.2156 - val_acc: 0.9255
Epoch 98/100
100/100 [==============================] - 26s 256ms/step - loss: 0.2903 - acc: 0.8753 - val_loss: 0.2224 - val_acc: 0.9204
Epoch 99/100
100/100 [==============================] - 26s 255ms/step - loss: 0.2771 - acc: 0.8823 - val_loss: 0.2046 - val_acc: 0.9275
Epoch 100/100
100/100 [==============================] - 26s 257ms/step - loss: 0.2711 - acc: 0.8874 - val_loss: 0.2150 - val_acc: 0.9255
model.save('cats_dogs_tlearn_img_aug_cnn.h5')
vgg_model.trainable = True
set_trainable = False
for layer in vgg_model.layers:
if layer.name in ['block5_conv1', 'block4_conv1']:
set_trainable = True
if set_trainable:
layer.trainable = True
else:
layer.trainable = False
layers = [(layer, layer.name, layer.trainable) for layer in vgg_model.layers]
pd.DataFrame(layers, columns=['Layer Type', 'Layer Name', 'Layer Trainable'])
Layer Type | Layer Name | Layer Trainable | |
---|---|---|---|
0 | <keras.engine.input_layer.InputLayer object at 0x7f3a8df32f60> | input_1 | False |
1 | <keras.layers.convolutional.Conv2D object at 0x7f3a8dde2780> | block1_conv1 | False |
2 | <keras.layers.convolutional.Conv2D object at 0x7f3a8df32278> | block1_conv2 | False |
3 | <keras.layers.pooling.MaxPooling2D object at 0x7f3aa0092b00> | block1_pool | False |
4 | <keras.layers.convolutional.Conv2D object at 0x7f3a8ddce6d8> | block2_conv1 | False |
5 | <keras.layers.convolutional.Conv2D object at 0x7f3a88fdecf8> | block2_conv2 | False |
6 | <keras.layers.pooling.MaxPooling2D object at 0x7f3a8d02b630> | block2_pool | False |
7 | <keras.layers.convolutional.Conv2D object at 0x7f3a8d02b518> | block3_conv1 | False |
8 | <keras.layers.convolutional.Conv2D object at 0x7f3a8cfdc2b0> | block3_conv2 | False |
9 | <keras.layers.convolutional.Conv2D object at 0x7f3a8cfee908> | block3_conv3 | False |
10 | <keras.layers.pooling.MaxPooling2D object at 0x7f3a8cf8ad30> | block3_pool | False |
11 | <keras.layers.convolutional.Conv2D object at 0x7f3a8cf8ac88> | block4_conv1 | True |
12 | <keras.layers.convolutional.Conv2D object at 0x7f3a8cfb7ba8> | block4_conv2 | True |
13 | <keras.layers.convolutional.Conv2D object at 0x7f3a8cf62240> | block4_conv3 | True |
14 | <keras.layers.pooling.MaxPooling2D object at 0x7f3a8cf7cbe0> | block4_pool | True |
15 | <keras.layers.convolutional.Conv2D object at 0x7f3a8cf7c6a0> | block5_conv1 | True |
16 | <keras.layers.convolutional.Conv2D object at 0x7f3a8cf2a400> | block5_conv2 | True |
17 | <keras.layers.convolutional.Conv2D object at 0x7f3a8cebfa58> | block5_conv3 | True |
18 | <keras.layers.pooling.MaxPooling2D object at 0x7f3a8ceed6d8> | block5_pool | True |
19 | <keras.layers.core.Flatten object at 0x7f3a8ce4cd68> | flatten_4 | True |
train_datagen = ImageDataGenerator(rescale=1./255, zoom_range=0.3, rotation_range=50,
width_shift_range=0.2, height_shift_range=0.2, shear_range=0.2,
horizontal_flip=True, fill_mode='nearest')
val_datagen = ImageDataGenerator(rescale=1./255)
train_generator = train_datagen.flow(train_imgs, train_labels_enc, batch_size=30)
val_generator = val_datagen.flow(validation_imgs, validation_labels_enc, batch_size=20)
from keras.layers import Conv2D, MaxPooling2D, Flatten, Dense, Dropout, InputLayer
from keras.models import Sequential
from keras import optimizers
model = Sequential()
model.add(vgg_model)
model.add(Dense(512, activation='relu', input_dim=input_shape))
model.add(Dropout(0.3))
model.add(Dense(512, activation='relu'))
model.add(Dropout(0.3))
model.add(Dense(1, activation='sigmoid'))
model.compile(loss='binary_crossentropy',
optimizer=optimizers.RMSprop(lr=1e-5),
metrics=['accuracy'])
history = model.fit_generator(train_generator, steps_per_epoch=100, epochs=100,
validation_data=val_generator, validation_steps=50,
verbose=1)
Epoch 1/100
100/100 [==============================] - 34s 341ms/step - loss: 0.5933 - acc: 0.6693 - val_loss: 0.3330 - val_acc: 0.8550
Epoch 2/100
100/100 [==============================] - 32s 316ms/step - loss: 0.4064 - acc: 0.8137 - val_loss: 0.2034 - val_acc: 0.9114
Epoch 3/100
100/100 [==============================] - 32s 317ms/step - loss: 0.3370 - acc: 0.8497 - val_loss: 0.1853 - val_acc: 0.9215
Epoch 4/100
100/100 [==============================] - 32s 317ms/step - loss: 0.2852 - acc: 0.8687 - val_loss: 0.1426 - val_acc: 0.9376
Epoch 5/100
100/100 [==============================] - 32s 316ms/step - loss: 0.2730 - acc: 0.8843 - val_loss: 0.1340 - val_acc: 0.9416
Epoch 6/100
100/100 [==============================] - 32s 317ms/step - loss: 0.2422 - acc: 0.8943 - val_loss: 0.1400 - val_acc: 0.9396
Epoch 7/100
100/100 [==============================] - 32s 316ms/step - loss: 0.2297 - acc: 0.9077 - val_loss: 0.1432 - val_acc: 0.9406
Epoch 8/100
100/100 [==============================] - 32s 316ms/step - loss: 0.2116 - acc: 0.9107 - val_loss: 0.1378 - val_acc: 0.9386
Epoch 9/100
100/100 [==============================] - 32s 317ms/step - loss: 0.1939 - acc: 0.9190 - val_loss: 0.1113 - val_acc: 0.9537
Epoch 10/100
100/100 [==============================] - 32s 316ms/step - loss: 0.1704 - acc: 0.9337 - val_loss: 0.1130 - val_acc: 0.9577
Epoch 11/100
100/100 [==============================] - 32s 317ms/step - loss: 0.1783 - acc: 0.9243 - val_loss: 0.1208 - val_acc: 0.9406
Epoch 12/100
100/100 [==============================] - 32s 318ms/step - loss: 0.1462 - acc: 0.9377 - val_loss: 0.1165 - val_acc: 0.9557
Epoch 13/100
100/100 [==============================] - 32s 316ms/step - loss: 0.1626 - acc: 0.9350 - val_loss: 0.0973 - val_acc: 0.9637
Epoch 14/100
100/100 [==============================] - 32s 316ms/step - loss: 0.1523 - acc: 0.9400 - val_loss: 0.1293 - val_acc: 0.9476
Epoch 15/100
100/100 [==============================] - 32s 316ms/step - loss: 0.1411 - acc: 0.9400 - val_loss: 0.0948 - val_acc: 0.9668
Epoch 16/100
100/100 [==============================] - 32s 317ms/step - loss: 0.1328 - acc: 0.9470 - val_loss: 0.1188 - val_acc: 0.9507
Epoch 17/100
100/100 [==============================] - 32s 317ms/step - loss: 0.1241 - acc: 0.9520 - val_loss: 0.0931 - val_acc: 0.9668
Epoch 18/100
100/100 [==============================] - 32s 316ms/step - loss: 0.1181 - acc: 0.9483 - val_loss: 0.0862 - val_acc: 0.9688
Epoch 19/100
100/100 [==============================] - 32s 316ms/step - loss: 0.1173 - acc: 0.9540 - val_loss: 0.0967 - val_acc: 0.9658
Epoch 20/100
100/100 [==============================] - 32s 316ms/step - loss: 0.1176 - acc: 0.9537 - val_loss: 0.0933 - val_acc: 0.9718
Epoch 21/100
100/100 [==============================] - 32s 317ms/step - loss: 0.1039 - acc: 0.9573 - val_loss: 0.1188 - val_acc: 0.9627
Epoch 22/100
100/100 [==============================] - 32s 317ms/step - loss: 0.0990 - acc: 0.9580 - val_loss: 0.1191 - val_acc: 0.9587
Epoch 23/100
100/100 [==============================] - 32s 316ms/step - loss: 0.1020 - acc: 0.9627 - val_loss: 0.1326 - val_acc: 0.9567
Epoch 24/100
100/100 [==============================] - 32s 316ms/step - loss: 0.0838 - acc: 0.9690 - val_loss: 0.0937 - val_acc: 0.9738
Epoch 25/100
100/100 [==============================] - 32s 317ms/step - loss: 0.0852 - acc: 0.9627 - val_loss: 0.1325 - val_acc: 0.9607
Epoch 26/100
100/100 [==============================] - 32s 317ms/step - loss: 0.0854 - acc: 0.9667 - val_loss: 0.1981 - val_acc: 0.9456
Epoch 27/100
100/100 [==============================] - 32s 317ms/step - loss: 0.0737 - acc: 0.9687 - val_loss: 0.1150 - val_acc: 0.9688
Epoch 28/100
100/100 [==============================] - 32s 316ms/step - loss: 0.0750 - acc: 0.9737 - val_loss: 0.1081 - val_acc: 0.9708
Epoch 29/100
100/100 [==============================] - 32s 317ms/step - loss: 0.0804 - acc: 0.9697 - val_loss: 0.0981 - val_acc: 0.9698
Epoch 30/100
100/100 [==============================] - 32s 317ms/step - loss: 0.0866 - acc: 0.9660 - val_loss: 0.1052 - val_acc: 0.9718
Epoch 31/100
100/100 [==============================] - 32s 317ms/step - loss: 0.0709 - acc: 0.9723 - val_loss: 0.1118 - val_acc: 0.9698
Epoch 32/100
100/100 [==============================] - 32s 317ms/step - loss: 0.0662 - acc: 0.9723 - val_loss: 0.1295 - val_acc: 0.9658
Epoch 33/100
100/100 [==============================] - 32s 316ms/step - loss: 0.0710 - acc: 0.9720 - val_loss: 0.1157 - val_acc: 0.9688
Epoch 34/100
100/100 [==============================] - 32s 317ms/step - loss: 0.0624 - acc: 0.9733 - val_loss: 0.1366 - val_acc: 0.9637
Epoch 35/100
100/100 [==============================] - 32s 316ms/step - loss: 0.0664 - acc: 0.9707 - val_loss: 0.1182 - val_acc: 0.9637
Epoch 36/100
100/100 [==============================] - 32s 317ms/step - loss: 0.0619 - acc: 0.9810 - val_loss: 0.1106 - val_acc: 0.9708
Epoch 37/100
100/100 [==============================] - 32s 316ms/step - loss: 0.0475 - acc: 0.9833 - val_loss: 0.1228 - val_acc: 0.9718
Epoch 38/100
100/100 [==============================] - 32s 316ms/step - loss: 0.0642 - acc: 0.9780 - val_loss: 0.1519 - val_acc: 0.9577
Epoch 39/100
100/100 [==============================] - 32s 316ms/step - loss: 0.0604 - acc: 0.9753 - val_loss: 0.1180 - val_acc: 0.9718
Epoch 40/100
100/100 [==============================] - 32s 316ms/step - loss: 0.0603 - acc: 0.9820 - val_loss: 0.1325 - val_acc: 0.9698
Epoch 41/100
100/100 [==============================] - 32s 317ms/step - loss: 0.0608 - acc: 0.9773 - val_loss: 0.1073 - val_acc: 0.9738
Epoch 42/100
100/100 [==============================] - 32s 315ms/step - loss: 0.0426 - acc: 0.9863 - val_loss: 0.1553 - val_acc: 0.9658
Epoch 43/100
100/100 [==============================] - 32s 316ms/step - loss: 0.0492 - acc: 0.9810 - val_loss: 0.1288 - val_acc: 0.9708
Epoch 44/100
100/100 [==============================] - 32s 316ms/step - loss: 0.0454 - acc: 0.9807 - val_loss: 0.2012 - val_acc: 0.9577
Epoch 45/100
100/100 [==============================] - 32s 316ms/step - loss: 0.0590 - acc: 0.9807 - val_loss: 0.1550 - val_acc: 0.9648
Epoch 46/100
100/100 [==============================] - 32s 316ms/step - loss: 0.0455 - acc: 0.9840 - val_loss: 0.1295 - val_acc: 0.9728
Epoch 47/100
100/100 [==============================] - 32s 316ms/step - loss: 0.0474 - acc: 0.9843 - val_loss: 0.1577 - val_acc: 0.9627
Epoch 48/100
100/100 [==============================] - 32s 316ms/step - loss: 0.0416 - acc: 0.9840 - val_loss: 0.1411 - val_acc: 0.9678
Epoch 49/100
100/100 [==============================] - 31s 314ms/step - loss: 0.0344 - acc: 0.9870 - val_loss: 0.1308 - val_acc: 0.9748
Epoch 50/100
100/100 [==============================] - 32s 316ms/step - loss: 0.0423 - acc: 0.9847 - val_loss: 0.1662 - val_acc: 0.9668
Epoch 51/100
100/100 [==============================] - 32s 315ms/step - loss: 0.0402 - acc: 0.9850 - val_loss: 0.1562 - val_acc: 0.9708
Epoch 52/100
100/100 [==============================] - 32s 316ms/step - loss: 0.0434 - acc: 0.9847 - val_loss: 0.1887 - val_acc: 0.9567
Epoch 53/100
100/100 [==============================] - 32s 317ms/step - loss: 0.0456 - acc: 0.9840 - val_loss: 0.1759 - val_acc: 0.9637
Epoch 54/100
100/100 [==============================] - 32s 315ms/step - loss: 0.0345 - acc: 0.9887 - val_loss: 0.2266 - val_acc: 0.9496
Epoch 55/100
100/100 [==============================] - 32s 316ms/step - loss: 0.0323 - acc: 0.9877 - val_loss: 0.1827 - val_acc: 0.9688
Epoch 56/100
100/100 [==============================] - 32s 316ms/step - loss: 0.0386 - acc: 0.9867 - val_loss: 0.1480 - val_acc: 0.9708
Epoch 57/100
100/100 [==============================] - 31s 315ms/step - loss: 0.0389 - acc: 0.9860 - val_loss: 0.1858 - val_acc: 0.9658
Epoch 58/100
100/100 [==============================] - 32s 315ms/step - loss: 0.0371 - acc: 0.9880 - val_loss: 0.1928 - val_acc: 0.9648
Epoch 59/100
100/100 [==============================] - 31s 315ms/step - loss: 0.0344 - acc: 0.9867 - val_loss: 0.1438 - val_acc: 0.9718
Epoch 60/100
100/100 [==============================] - 32s 316ms/step - loss: 0.0388 - acc: 0.9847 - val_loss: 0.1735 - val_acc: 0.9688
Epoch 61/100
100/100 [==============================] - 32s 315ms/step - loss: 0.0355 - acc: 0.9890 - val_loss: 0.1567 - val_acc: 0.9748
Epoch 62/100
100/100 [==============================] - 32s 315ms/step - loss: 0.0376 - acc: 0.9853 - val_loss: 0.1692 - val_acc: 0.9648
Epoch 63/100
100/100 [==============================] - 32s 315ms/step - loss: 0.0292 - acc: 0.9897 - val_loss: 0.1776 - val_acc: 0.9658
Epoch 64/100
100/100 [==============================] - 32s 315ms/step - loss: 0.0354 - acc: 0.9887 - val_loss: 0.1807 - val_acc: 0.9658
Epoch 65/100
100/100 [==============================] - 32s 315ms/step - loss: 0.0323 - acc: 0.9870 - val_loss: 0.2022 - val_acc: 0.9658
Epoch 66/100
100/100 [==============================] - 32s 316ms/step - loss: 0.0391 - acc: 0.9850 - val_loss: 0.1842 - val_acc: 0.9648
Epoch 67/100
100/100 [==============================] - 32s 316ms/step - loss: 0.0290 - acc: 0.9900 - val_loss: 0.1705 - val_acc: 0.9708
Epoch 68/100
100/100 [==============================] - 32s 316ms/step - loss: 0.0340 - acc: 0.9877 - val_loss: 0.1627 - val_acc: 0.9708
Epoch 69/100
100/100 [==============================] - 32s 316ms/step - loss: 0.0265 - acc: 0.9890 - val_loss: 0.1717 - val_acc: 0.9708
Epoch 70/100
100/100 [==============================] - 32s 316ms/step - loss: 0.0311 - acc: 0.9900 - val_loss: 0.4104 - val_acc: 0.9366
Epoch 71/100
100/100 [==============================] - 32s 315ms/step - loss: 0.0310 - acc: 0.9900 - val_loss: 0.2189 - val_acc: 0.9627
Epoch 72/100
100/100 [==============================] - 32s 315ms/step - loss: 0.0348 - acc: 0.9897 - val_loss: 0.1395 - val_acc: 0.9698
Epoch 73/100
100/100 [==============================] - 32s 315ms/step - loss: 0.0296 - acc: 0.9897 - val_loss: 0.1766 - val_acc: 0.9658
Epoch 74/100
100/100 [==============================] - 31s 315ms/step - loss: 0.0265 - acc: 0.9903 - val_loss: 0.2309 - val_acc: 0.9577
Epoch 75/100
100/100 [==============================] - 32s 315ms/step - loss: 0.0349 - acc: 0.9863 - val_loss: 0.1571 - val_acc: 0.9688
Epoch 76/100
100/100 [==============================] - 32s 315ms/step - loss: 0.0227 - acc: 0.9917 - val_loss: 0.2125 - val_acc: 0.9648
Epoch 77/100
100/100 [==============================] - 32s 316ms/step - loss: 0.0311 - acc: 0.9893 - val_loss: 0.2300 - val_acc: 0.9658
Epoch 78/100
100/100 [==============================] - 32s 316ms/step - loss: 0.0431 - acc: 0.9883 - val_loss: 0.1675 - val_acc: 0.9708
Epoch 79/100
100/100 [==============================] - 32s 315ms/step - loss: 0.0294 - acc: 0.9920 - val_loss: 0.1909 - val_acc: 0.9668
Epoch 80/100
100/100 [==============================] - 32s 316ms/step - loss: 0.0258 - acc: 0.9927 - val_loss: 0.1996 - val_acc: 0.9668
Epoch 81/100
100/100 [==============================] - 31s 315ms/step - loss: 0.0280 - acc: 0.9937 - val_loss: 0.1779 - val_acc: 0.9728
Epoch 82/100
100/100 [==============================] - 32s 316ms/step - loss: 0.0264 - acc: 0.9907 - val_loss: 0.1750 - val_acc: 0.9658
Epoch 83/100
100/100 [==============================] - 32s 315ms/step - loss: 0.0332 - acc: 0.9897 - val_loss: 0.1785 - val_acc: 0.9678
Epoch 84/100
100/100 [==============================] - 31s 315ms/step - loss: 0.0211 - acc: 0.9947 - val_loss: 0.1914 - val_acc: 0.9698
Epoch 85/100
100/100 [==============================] - 32s 316ms/step - loss: 0.0320 - acc: 0.9893 - val_loss: 0.1750 - val_acc: 0.9718
Epoch 86/100
100/100 [==============================] - 31s 315ms/step - loss: 0.0245 - acc: 0.9907 - val_loss: 0.1977 - val_acc: 0.9668
Epoch 87/100
100/100 [==============================] - 32s 315ms/step - loss: 0.0238 - acc: 0.9923 - val_loss: 0.1635 - val_acc: 0.9728
Epoch 88/100
100/100 [==============================] - 32s 315ms/step - loss: 0.0159 - acc: 0.9937 - val_loss: 0.1901 - val_acc: 0.9688
Epoch 89/100
100/100 [==============================] - 32s 315ms/step - loss: 0.0232 - acc: 0.9923 - val_loss: 0.1985 - val_acc: 0.9658
Epoch 90/100
100/100 [==============================] - 32s 315ms/step - loss: 0.0172 - acc: 0.9947 - val_loss: 0.2372 - val_acc: 0.9678
Epoch 91/100
100/100 [==============================] - 32s 315ms/step - loss: 0.0340 - acc: 0.9893 - val_loss: 0.1622 - val_acc: 0.9698
Epoch 92/100
100/100 [==============================] - 32s 315ms/step - loss: 0.0228 - acc: 0.9917 - val_loss: 0.2475 - val_acc: 0.9688
Epoch 93/100
100/100 [==============================] - 32s 315ms/step - loss: 0.0325 - acc: 0.9923 - val_loss: 0.1918 - val_acc: 0.9678
Epoch 94/100
100/100 [==============================] - 32s 315ms/step - loss: 0.0329 - acc: 0.9893 - val_loss: 0.2077 - val_acc: 0.9637
Epoch 95/100
100/100 [==============================] - 31s 315ms/step - loss: 0.0210 - acc: 0.9927 - val_loss: 0.3841 - val_acc: 0.9547
Epoch 96/100
100/100 [==============================] - 32s 315ms/step - loss: 0.0216 - acc: 0.9933 - val_loss: 0.2069 - val_acc: 0.9738
Epoch 97/100
100/100 [==============================] - 32s 315ms/step - loss: 0.0349 - acc: 0.9920 - val_loss: 0.2394 - val_acc: 0.9617
Epoch 98/100
100/100 [==============================] - 31s 314ms/step - loss: 0.0207 - acc: 0.9950 - val_loss: 0.2262 - val_acc: 0.9658
Epoch 99/100
100/100 [==============================] - 32s 315ms/step - loss: 0.0236 - acc: 0.9910 - val_loss: 0.2090 - val_acc: 0.9688
Epoch 100/100
100/100 [==============================] - 32s 322ms/step - loss: 0.0158 - acc: 0.9950 - val_loss: 0.3115 - val_acc: 0.9637
model.save('cats_dogs_tlearn_finetune_img_aug_cnn.h5')
# load dependencies
import glob
import numpy as np
import matplotlib.pyplot as plt
from keras.preprocessing.image import load_img, img_to_array, array_to_img
from keras.models import load_model
import model_evaluation_utils as meu
%matplotlib inline
# load saved models
basic_cnn = load_model('cats_dogs_basic_cnn.h5')
img_aug_cnn = load_model('cats_dogs_cnn_img_aug.h5')
tl_cnn = load_model('cats_dogs_tlearn_basic_cnn.h5')
tl_img_aug_cnn = load_model('cats_dogs_tlearn_img_aug_cnn.h5')
tl_img_aug_finetune_cnn = load_model('cats_dogs_tlearn_finetune_img_aug_cnn.h5')
# load other configurations
IMG_DIM = (150, 150)
input_shape = (150, 150, 3)
num2class_label_transformer = lambda l: ['cat' if x == 0 else 'dog' for x in l]
class2num_label_transformer = lambda l: [0 if x == 'cat' else 1 for x in l]
# load VGG model for bottleneck features
from keras.applications import vgg16
from keras.models import Model
import keras
vgg = vgg16.VGG16(include_top=False, weights='imagenet',
input_shape=input_shape)
output = vgg.layers[-1].output
output = keras.layers.Flatten()(output)
vgg_model = Model(vgg.input, output)
vgg_model.trainable = False
def get_bottleneck_features(model, input_imgs):
features = model.predict(input_imgs, verbose=0)
return features
IMG_DIM = (150, 150)
test_files = glob.glob('test_data/*')
test_imgs = [img_to_array(load_img(img, target_size=IMG_DIM)) for img in test_files]
test_imgs = np.array(test_imgs)
test_labels = [fn.split('/')[1].split('.')[0].strip() for fn in test_files]
test_imgs_scaled = test_imgs.astype('float32')
test_imgs_scaled /= 255
test_labels_enc = class2num_label_transformer(test_labels)
print('Test dataset shape:', test_imgs.shape)
print(test_labels[0:5], test_labels_enc[0:5])
Test dataset shape: (990, 150, 150, 3)
['dog', 'dog', 'cat', 'dog', 'dog'] [1, 1, 0, 1, 1]
predictions = basic_cnn.predict_classes(test_imgs_scaled, verbose=0)
predictions = num2class_label_transformer(predictions)
meu.display_model_performance_metrics(true_labels=test_labels, predicted_labels=predictions,
classes=list(set(test_labels)))
Model Performance metrics:
------------------------------
Accuracy: 0.7826
Precision: 0.7832
Recall: 0.7826
F1 Score: 0.7826
Model Classification report:
------------------------------
precision recall f1-score support
dog 0.80 0.77 0.78 502
cat 0.77 0.80 0.78 487
micro avg 0.78 0.78 0.78 989
macro avg 0.78 0.78 0.78 989
weighted avg 0.78 0.78 0.78 989
Prediction Confusion Matrix:
------------------------------
Predicted:
dog cat
Actual: dog 385 117
cat 98 389
predictions = img_aug_cnn.predict_classes(test_imgs_scaled, verbose=0)
predictions = num2class_label_transformer(predictions)
meu.display_model_performance_metrics(true_labels=test_labels, predicted_labels=predictions,
classes=list(set(test_labels)))
Model Performance metrics:
------------------------------
Accuracy: 0.8342
Precision: 0.8343
Recall: 0.8342
F1 Score: 0.8342
Model Classification report:
------------------------------
precision recall f1-score support
dog 0.84 0.83 0.84 502
cat 0.83 0.84 0.83 487
micro avg 0.83 0.83 0.83 989
macro avg 0.83 0.83 0.83 989
weighted avg 0.83 0.83 0.83 989
Prediction Confusion Matrix:
------------------------------
Predicted:
dog cat
Actual: dog 416 86
cat 78 409
test_bottleneck_features = get_bottleneck_features(vgg_model, test_imgs_scaled)
predictions = tl_cnn.predict_classes(test_bottleneck_features, verbose=0)
predictions = num2class_label_transformer(predictions)
meu.display_model_performance_metrics(true_labels=test_labels, predicted_labels=predictions,
classes=list(set(test_labels)))
Model Performance metrics:
------------------------------
Accuracy: 0.9181
Precision: 0.9181
Recall: 0.9181
F1 Score: 0.9181
Model Classification report:
------------------------------
precision recall f1-score support
dog 0.92 0.92 0.92 502
cat 0.92 0.92 0.92 487
micro avg 0.92 0.92 0.92 989
macro avg 0.92 0.92 0.92 989
weighted avg 0.92 0.92 0.92 989
Prediction Confusion Matrix:
------------------------------
Predicted:
dog cat
Actual: dog 462 40
cat 41 446
predictions = tl_img_aug_cnn.predict_classes(test_imgs_scaled, verbose=0)
predictions = num2class_label_transformer(predictions)
meu.display_model_performance_metrics(true_labels=test_labels, predicted_labels=predictions,
classes=list(set(test_labels)))
Model Performance metrics:
------------------------------
Accuracy: 0.9141
Precision: 0.9156
Recall: 0.9141
F1 Score: 0.9139
Model Classification report:
------------------------------
precision recall f1-score support
dog 0.89 0.95 0.92 502
cat 0.94 0.88 0.91 487
micro avg 0.91 0.91 0.91 989
macro avg 0.92 0.91 0.91 989
weighted avg 0.92 0.91 0.91 989
Prediction Confusion Matrix:
------------------------------
Predicted:
dog cat
Actual: dog 475 27
cat 58 429
predictions = tl_img_aug_finetune_cnn.predict_classes(test_imgs_scaled, verbose=0)
predictions = num2class_label_transformer(predictions)
meu.display_model_performance_metrics(true_labels=test_labels, predicted_labels=predictions,
classes=list(set(test_labels)))
Model Performance metrics:
------------------------------
Accuracy: 0.9838
Precision: 0.9838
Recall: 0.9838
F1 Score: 0.9838
Model Classification report:
------------------------------
precision recall f1-score support
cat 0.98 0.99 0.98 496
dog 0.99 0.98 0.98 494
micro avg 0.98 0.98 0.98 990
macro avg 0.98 0.98 0.98 990
weighted avg 0.98 0.98 0.98 990
Prediction Confusion Matrix:
------------------------------
Predicted:
cat dog
Actual: cat 489 7
dog 9 485
meu.plot_model_roc_curve(basic_cnn, test_imgs_scaled,
true_labels=test_labels_enc,
class_names=[0, 1])
# best model - transfer learning with fine-tuning & image augmentation
meu.plot_model_roc_curve(tl_img_aug_finetune_cnn, test_imgs_scaled,
true_labels=test_labels_enc,
class_names=[0, 1])