Learn A-Z Deep Learning in 15 Days
What is Data Augmentation?
Data augmentation is the process of increasing the amount and diversity of data. We do not collect new data, rather we transform the already present data. Data augmentation techniques such as Rotation, Shearing, Zooming, Cropping, Flipping, and Changing the brightness level are commonly used to train large neural networks.
Imp:-Use Kaggle Kernel to Run Code. Download the Cat Vs Dog dataset from the link below.
import cv2
import os
import numpy as np
import os
import numpy as np
Unzip the Dataset
!unzip ../input/train.zip
!unzip ../input/test1.zip
Import the Train
IMAGE_WIDTH = 128
IMAGE_HEIGHT = 128
IMAGE_CHANNELS = 1
IMAGE_SIZE=(IMAGE_WIDTH, IMAGE_HEIGHT)
directory = "/kaggle/working/train"
data = []
label = []
for filename in os.listdir(directory):
image = cv2.imread(directory+r'/'+filename,0)
if image is None:
continue
image = cv2.resize(image,IMAGE_SIZE)
category = filename.split('.')[0]
if category == 'dog':
label.append(1)
else:
label.append(0)
data.append(image/255)
IMAGE_HEIGHT = 128
IMAGE_CHANNELS = 1
IMAGE_SIZE=(IMAGE_WIDTH, IMAGE_HEIGHT)
directory = "/kaggle/working/train"
data = []
label = []
for filename in os.listdir(directory):
image = cv2.imread(directory+r'/'+filename,0)
if image is None:
continue
image = cv2.resize(image,IMAGE_SIZE)
category = filename.split('.')[0]
if category == 'dog':
label.append(1)
else:
label.append(0)
data.append(image/255)
List to Array Conversion
data=np.array(data)
data=data.reshape((data.shape)[0],(data.shape)[1],(data.shape)[2],1)
label=np.array(label)
print(data.shape)
print(label.shape)
data=data.reshape((data.shape)[0],(data.shape)[1],(data.shape)[2],1)
label=np.array(label)
print(data.shape)
print(label.shape)
Train Test Split
from sklearn.model_selection import train_test_split
x_train, x_val, y_train, y_val = train_test_split(data, label, test_size=0.3, random_state=42)
x_train, x_val, y_train, y_val = train_test_split(data, label, test_size=0.3, random_state=42)
from keras.utils import np_utils
y_train = np_utils.to_categorical(y_train,num_classes=2)
y_val = np_utils.to_categorical(y_val,num_classes=2)
y_train = np_utils.to_categorical(y_train,num_classes=2)
y_val = np_utils.to_categorical(y_val,num_classes=2)
Data Augmentation
from keras.preprocessing.image import ImageDataGenerator
train_datagen = ImageDataGenerator(
rotation_range=20,
width_shift_range=0.2,
height_shift_range=0.2,
brightness_range=(0.3, 0.7),
zoom_range=[0.5, 1.5],
horizontal_flip=True
)
validation_datagen = ImageDataGenerator()
train_generator = train_datagen.flow(x_train, y_train, batch_size = 64)
validation_generator = validation_datagen.flow(x_val, y_val, batch_size= 64)
train_datagen = ImageDataGenerator(
rotation_range=20,
width_shift_range=0.2,
height_shift_range=0.2,
brightness_range=(0.3, 0.7),
zoom_range=[0.5, 1.5],
horizontal_flip=True
)
validation_datagen = ImageDataGenerator()
train_generator = train_datagen.flow(x_train, y_train, batch_size = 64)
validation_generator = validation_datagen.flow(x_val, y_val, batch_size= 64)
CNN Architecture
from keras.models import Sequential, Model
from keras.layers import Dense, Flatten, Activation, Dropout, Conv2D, MaxPooling2D, AveragePooling2D, Input, BatchNormalization
from keras.preprocessing.image import ImageDataGenerator
from keras import optimizers
img_input = Input(shape=(IMAGE_WIDTH, IMAGE_HEIGHT, IMAGE_CHANNELS))
x = Conv2D(32, (3, 3),activation='relu',padding='same')(img_input)
x = BatchNormalization()(x)
x = MaxPooling2D((2, 2), strides=(2, 2))(x)
x = Dropout(0.25)(x)
x = Conv2D(64, (3, 3),activation='relu',padding='same')(img_input)
x = BatchNormalization()(x)
x = MaxPooling2D((2, 2), strides=(2, 2))(x)
x = Dropout(0.25)(x)
x = Conv2D(128, (3, 3),activation='relu',padding='same')(img_input)
x = BatchNormalization()(x)
x = MaxPooling2D((2, 2), strides=(2, 2))(x)
x = Dropout(0.25)(x)
x = Flatten()(x)
x = Dense(512, activation='relu')(x)
x = Dropout(0.5)(x)
x = Dense(2, activation='softmax', name='predictions')(x)
model = Model(img_input , x)
model.summary()
model.compile(loss='categorical_crossentropy',optimizer=optimizers.adam(lr=1e-6),metrics=['acc'])
from keras.layers import Dense, Flatten, Activation, Dropout, Conv2D, MaxPooling2D, AveragePooling2D, Input, BatchNormalization
from keras.preprocessing.image import ImageDataGenerator
from keras import optimizers
img_input = Input(shape=(IMAGE_WIDTH, IMAGE_HEIGHT, IMAGE_CHANNELS))
x = Conv2D(32, (3, 3),activation='relu',padding='same')(img_input)
x = BatchNormalization()(x)
x = MaxPooling2D((2, 2), strides=(2, 2))(x)
x = Dropout(0.25)(x)
x = Conv2D(64, (3, 3),activation='relu',padding='same')(img_input)
x = BatchNormalization()(x)
x = MaxPooling2D((2, 2), strides=(2, 2))(x)
x = Dropout(0.25)(x)
x = Conv2D(128, (3, 3),activation='relu',padding='same')(img_input)
x = BatchNormalization()(x)
x = MaxPooling2D((2, 2), strides=(2, 2))(x)
x = Dropout(0.25)(x)
x = Flatten()(x)
x = Dense(512, activation='relu')(x)
x = Dropout(0.5)(x)
x = Dense(2, activation='softmax', name='predictions')(x)
model = Model(img_input , x)
model.summary()
model.compile(loss='categorical_crossentropy',optimizer=optimizers.adam(lr=1e-6),metrics=['acc'])
from keras.callbacks import ModelCheckpoint, EarlyStopping
filepath = "./cp-{epoch:02d}.h5"
checkpoint = ModelCheckpoint(filepath,
monitor="val_loss",
mode="min",
save_best_only = True,
verbose=1)
earlystop = EarlyStopping(monitor = 'val_loss',
min_delta = 0,
patience =4,
verbose = 1,
restore_best_weights = True)
# put our call backs into a callback list
callbacks = [earlystop, checkpoint]
filepath = "./cp-{epoch:02d}.h5"
checkpoint = ModelCheckpoint(filepath,
monitor="val_loss",
mode="min",
save_best_only = True,
verbose=1)
earlystop = EarlyStopping(monitor = 'val_loss',
min_delta = 0,
patience =4,
verbose = 1,
restore_best_weights = True)
# put our call backs into a callback list
callbacks = [earlystop, checkpoint]
Start the Training
model.fit_generator(train_generator,steps_per_epoch = train_generator.n// train_generator.batch_size, epochs=50, validation_data = validation_generator, validation_steps = validation_generator.n// validation_generator.batch_size, verbose=1, callbacks = callbacks)
### Graph Epoch vs acc AND Epoch vs Loss
import matplotlib.pyplot as plt
acc = model.history.history['acc']
val_acc = model.history.history['val_acc']
loss = model.history.history['loss']
val_loss = model.history.history['val_loss']
epochs = range(1, len(acc) + 1)
plt.plot(epochs, acc, 'blue', label='Training acc')
plt.plot(epochs, val_acc, 'red', label='Validation acc')
plt.title('Training and validation accuracy')
plt.legend()
plt.figure()
plt.plot(epochs, loss, 'blue', label='Training loss')
plt.plot(epochs, val_loss, 'red', label='Validation loss')
plt.title('Training and validation loss')
plt.legend()
plt.show()
import matplotlib.pyplot as plt
acc = model.history.history['acc']
val_acc = model.history.history['val_acc']
loss = model.history.history['loss']
val_loss = model.history.history['val_loss']
epochs = range(1, len(acc) + 1)
plt.plot(epochs, acc, 'blue', label='Training acc')
plt.plot(epochs, val_acc, 'red', label='Validation acc')
plt.title('Training and validation accuracy')
plt.legend()
plt.figure()
plt.plot(epochs, loss, 'blue', label='Training loss')
plt.plot(epochs, val_loss, 'red', label='Validation loss')
plt.title('Training and validation loss')
plt.legend()
plt.show()
IMAGE_WIDTH = 128
IMAGE_HEIGHT = 128
IMAGE_CHANNELS = 1
IMAGE_SIZE=(IMAGE_WIDTH, IMAGE_HEIGHT)
directory = "/kaggle/working/test1"
data = []
for filename in os.listdir(directory):
image = cv2.imread(directory+r'/'+filename,0)
if image is None:
continue
image = cv2.resize(image,IMAGE_SIZE)
data.append(image/255)
IMAGE_HEIGHT = 128
IMAGE_CHANNELS = 1
IMAGE_SIZE=(IMAGE_WIDTH, IMAGE_HEIGHT)
directory = "/kaggle/working/test1"
data = []
for filename in os.listdir(directory):
image = cv2.imread(directory+r'/'+filename,0)
if image is None:
continue
image = cv2.resize(image,IMAGE_SIZE)
data.append(image/255)
data = np.array(data)
test_image = np.array(data.reshape((data.shape)[0],(data.shape)[1],(data.shape)[2],1))
test_image = np.array(data.reshape((data.shape)[0],(data.shape)[1],(data.shape)[2],1))
predictions = model.predict(test_image)
results = np.argmax(predictions, axis = 1)
results = np.argmax(predictions, axis = 1)
key={0:'cat',1:'dog'}
label_prediction=[key[r] for r in results]
import matplotlib.image as img
import matplotlib.pyplot as plt
nb_rows = 3
nb_cols = 3
fig, axs = plt.subplots(nb_rows, nb_cols, figsize=(6, 6), dpi =100)
n = 0
for i in range(0, nb_rows):
for j in range(0, nb_cols):
axs[i,j].set_title(label_prediction[n])
axs[i,j].imshow(data[n],cmap = "gray")
n += 1
plt.tight_layout()
plt.show()
import matplotlib.pyplot as plt
nb_rows = 3
nb_cols = 3
fig, axs = plt.subplots(nb_rows, nb_cols, figsize=(6, 6), dpi =100)
n = 0
for i in range(0, nb_rows):
for j in range(0, nb_cols):
axs[i,j].set_title(label_prediction[n])
axs[i,j].imshow(data[n],cmap = "gray")
n += 1
plt.tight_layout()
plt.show()
import pandas as pd
df=pd.DataFrame(data={'imagename':os.listdir(directory), 'predicted_labels': label_prediction})
df.head()
df=pd.DataFrame(data={'imagename':os.listdir(directory), 'predicted_labels': label_prediction})
df.head()
df.to_csv('submission_new_model.csv', index=False, header=True)
In the next blog, we will start Deep Learning Classification with Transfer Learning.
https://sngurukuls247.blogspot.com/2020/05/deep-learning-4-classificationwithtrans.html
Feel free contact me on-
Email - sn.gurukul24.7uk@gmail.com
No comments:
Post a Comment