Create CNN Model and Optimize it using Keras Tuner mkdir ~ kaggle cp kaggle json ~ kaggle chmod 600 ~ kagglekaggle json kaggle datasets download d slothkong10 monkey speci. Create CNN Model and Optimize it using Keras Tuner mkdir ~ kaggle cp kaggle json ~ kaggle chmod 600 ~ kagglekaggle json kaggle datasets download d slothkong10 monkey speci.
Trang 1## Create CNN Model and Optimize it using Keras Tuner
####!mkdir ~/.kaggle
####!cp kaggle.json ~/.kaggle/
####!chmod 600 ~/.kaggle/kaggle.json
####!kaggle datasets download -d slothkong/10-monkey-species
####! unzip 10-monkey-species.zip
from google colab import files
Trang 2from google.colab import files
from google.colab import drive
drive.mount('/content/drive',)
Drive already mounted at /content/drive; to attempt to forcibly remount, call drive.mount("/conte
### Optimizing the Keras-Tuner
###!pip install keras-tuner
import tensorflow as tf
from tensorflow import keras
import numpy as np
print(tf. version )
2.5.0
import pandas as pd
import numpy as np
import itertools
from sklearn import metrics
from sklearn.metrics import confusion_matrix
from tensorflow.keras.preprocessing.image import ImageDataGenerator, img_to_array, load_img
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Conv2D,Flatten,Dropout,Dense
Trang 3from tensorflow.keras.optimizers import Adam
from tensorflow import keras
from tensorflow.keras import layers
from kerastuner.tuners import RandomSearch
from tensorflow.compat.v1 import ConfigProto
from tensorflow.compat.v1 import InteractiveSession
config = ConfigProto()
config.gpu_options.per_process_gpu_memory_fraction = 0.5
config.gpu_options.allow_growth = True
session = InteractiveSession(config=config)
/usr/local/lib/python3.7/dist-packages/tensorflow/python/client/session.py:1761: UserWarning: An warnings.warn('An interactive session is already active This can '
# Convolutional Neural Network
# Importing the libraries
import tensorflow as tf
from tensorflow.keras.preprocessing.image import ImageDataGenerator
#Default dimensions we found online
img_width, img_height = 224, 224
Trang 4#Create a bottleneck file
top_model_weights_path = 'optimize_fc_model.h5'
# loading up our datasets
train_data_dir = '/content/training/training'
validation_data_dir = '/content/validation/validation'
# test_data_dir = '/content/test'
# number of epochs to train top model
epochs = 7 #this has been changed after multiple model run
# batch size used by flow_from_directory and predict_generator batch_size = 50
# loading training data
train_datagen = ImageDataGenerator(
rescale=1./255,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True)
train_generator = train_datagen.flow_from_directory(
'/content/training/training',
target_size=(64, 64),
batch_size=32,
class_mode='categorical')
Found 1098 images belonging to 11 classes
Trang 5# loading testing data
test_datagen = ImageDataGenerator(rescale=1./255)
test_generator = train_datagen.flow_from_directory(
'/content/validation/validation',
target_size=(64, 64),
batch_size=32,
class_mode='categorical')
Found 272 images belonging to 11 classes
# initialising sequential model and adding layers to it
cnn = tf.keras.models.Sequential()
cnn.add(tf.keras.layers.Conv2D(filters=48, kernel_size=3, activation='relu', input_shape=[64, 64, 3])) cnn.add(tf.keras.layers.MaxPool2D(pool_size=2, strides=2))
cnn.add(tf.keras.layers.Conv2D(filters=48, kernel_size=3, activation='relu'))
cnn.add(tf.keras.layers.MaxPool2D(pool_size=2, strides=2))
cnn.add(tf.keras.layers.Conv2D(filters=32, kernel_size=3, activation='relu'))
cnn.add(tf.keras.layers.MaxPool2D(pool_size=2, strides=2))
cnn.add(tf.keras.layers.Flatten())
cnn.add(tf.keras.layers.Dense(128, activation='relu'))
cnn.add(tf.keras.layers.Dense(64, activation='relu'))
cnn.add(tf.keras.layers.Dense(11, activation='softmax'))
cnn.compile(optimizer="adam", loss="categorical_crossentropy", metrics=["accuracy"])
history = cnn fit(x=train generator validation data=test generator epochs=25 batch size=50)
Trang 6history = cnn.fit(x=train_generator, validation_data=test_generator,epochs=25,batch_size=50)
Epoch 1/25
35/35 [==============================] - 36s 1s/step - loss: 2.3320 - accuracy: 0.1485 - val_loss Epoch 2/25
35/35 [==============================] - 34s 998ms/step - loss: 2.0866 - accuracy: 0.2468 - val_l Epoch 3/25
35/35 [==============================] - 34s 957ms/step - loss: 1.8086 - accuracy: 0.3679 - val_l Epoch 4/25
35/35 [==============================] - 34s 973ms/step - loss: 1.6248 - accuracy: 0.4308 - val_l Epoch 5/25
35/35 [==============================] - 35s 995ms/step - loss: 1.4666 - accuracy: 0.4836 - val_l Epoch 6/25
35/35 [==============================] - 35s 1s/step - loss: 1.3259 - accuracy: 0.5410 - val_loss Epoch 7/25
35/35 [==============================] - 35s 985ms/step - loss: 1.2201 - accuracy: 0.5893 - val_l Epoch 8/25
35/35 [==============================] - 36s 1s/step - loss: 1.2016 - accuracy: 0.5783 - val_loss Epoch 9/25
35/35 [==============================] - 35s 1s/step - loss: 1.1860 - accuracy: 0.5801 - val_loss Epoch 10/25
35/35 [==============================] - 35s 1s/step - loss: 1.0415 - accuracy: 0.6403 - val_loss Epoch 11/25
35/35 [==============================] - 35s 991ms/step - loss: 0.9906 - accuracy: 0.6494 - val_l Epoch 12/25
35/35 [==============================] - 35s 983ms/step - loss: 1.0043 - accuracy: 0.6384 - val_l Epoch 13/25
35/35 [==============================] - 34s 990ms/step - loss: 0.8997 - accuracy: 0.6885 - val_l Epoch 14/25
35/35 [==============================] - 35s 1s/step - loss: 0.8433 - accuracy: 0.7022 - val_loss Epoch 15/25
Trang 735/35 [==============================] - 36s 1s/step - loss: 0.8521 - accuracy: 0.7067 - val_loss Epoch 16/25
35/35 [==============================] - 37s 1s/step - loss: 0.7924 - accuracy: 0.7177 - val_loss Epoch 17/25
35/35 [==============================] - 36s 1s/step - loss: 0.7527 - accuracy: 0.7432 - val_loss Epoch 18/25
35/35 [==============================] - 37s 1s/step - loss: 0.7653 - accuracy: 0.7195 - val_loss Epoch 19/25
35/35 [==============================] - 37s 1s/step - loss: 0.6716 - accuracy: 0.7687 - val_loss Epoch 20/25
35/35 [==============================] - 36s 1s/step - loss: 0.7026 - accuracy: 0.7623 - val_loss Epoch 21/25
35/35 [==============================] - 36s 1s/step - loss: 0.6732 - accuracy: 0.7659 - val_loss Epoch 22/25
35/35 [==============================] - 35s 1s/step - loss: 0.6195 - accuracy: 0.7805 - val_loss Epoch 23/25
35/35 [==============================] - 34s 965ms/step - loss: 0.5811 - accuracy: 0.7951 - val_l Epoch 24/25
35/35 [==============================] - 36s 1s/step - loss: 0.5338 - accuracy: 0.8224 - val_loss Epoch 25/25
35/35 [==============================] - 35s 1s/step - loss: 0.4787 - accuracy: 0.8279 - val_loss
# summarize history for accuracy
plt.plot(history.history['accuracy'])
plt.plot(history.history['val_accuracy'])
plt.title('model accuracy')
plt ylabel('accuracy')
import matplotlib.pyplot as plt
Trang 8plt.ylabel( accuracy )
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left') plt.show()
# summarize history for loss
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
Trang 9# Evaluating model on validation data
evaluate = cnn.evaluate(test_generator)
print(evaluate)
9/9 [==============================] - 8s 875ms/step - loss: 0.9517 - accuracy: 0.7096 [0.9517170190811157, 0.7095588445663452]
def give_accuracy():
p=cnn.predict(test_generator)
Trang 10cm=confusion_matrix(y_true=test_generator.classes,y_pred=np.argmax(p,axis=-1))
acc=cm.trace()/cm.sum()
print('The Classification Report \n', cm)
print(f'Accuracy: {acc*100}')
give_accuracy()
The Classification Report
[[2 2 1 6 1 1 3 4 2 4]
[2 1 2 3 0 6 4 4 4 2]
[3 5 3 1 2 2 3 5 3 0]
[3 4 3 1 3 6 3 3 0 4]
[2 4 1 1 3 4 3 5 1 2]
[2 1 2 4 2 3 1 3 3 7]
[1 1 3 3 4 1 3 6 3 1]
[3 2 5 5 2 1 1 6 1 2]
[4 2 6 3 1 3 2 3 3 0]
[4 2 3 5 1 2 4 2 0 3]]
Accuracy: 10.294117647058822
import numpy as np
from tensorflow.keras.preprocessing import image
test_image = image.load_img('/content/validation/validation/n1/n100.jpg', target_size = (64,64)) test_image = image.img_to_array(test_image)
test_image=test_image/255
test_image = np.expand_dims(test_image, axis = 0)
result = cnn.predict(test_image)
Trang 11test = np.array(test_image)
# making predictions
#prediction = np.argmax(cnn.predict(test_image), axis=-1)
prediction = np.argmax(cnn.predict(test_image))
prediction
8
output = { 0:'mantled_howler',1:'patas_monkey',2:'bald_uakari',3:'japanese_macaque',4:'pygmy_marmoset',
print("The prediction Of the Image is : ", output[prediction])
The prediction Of the Image is : black_headed_night_monkey
# show the image
import matplotlib.pyplot as plt
test_image = image.load_img('/content/validation/validation/n1/n100.jpg', target_size = (64,64))
plt.axis('off')
plt.imshow(test_image)
plt.show()
Trang 13 0s completed at 2:14 PM