Learn practical skills, build real-world projects, and advance your career
Created 5 years ago
from datetime import datetime
import keras
from keras.models import Sequential
from keras.layers import Dense, Activation, Dropout, Flatten, Conv2D, MaxPooling2D
from keras.layers.normalization import BatchNormalization
import numpy as np
def alexnet(input_data_shape=(224, 224, 3, ), number_of_classes=10):
model = Sequential()
# 1st Convolutional Layer
model.add(Conv2D(filters=96, input_shape=input_data_shape, kernel_size=(5, 5), strides=(4, 4), padding='same', activation='relu'))
# Batch Normalization
model.add(BatchNormalization())
# Max Pooling
model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2), padding='same'))
# 2nd Convolutional Layer
model.add(Conv2D(filters=256, kernel_size=(5, 5), strides=(1, 1), padding='same', activation='relu'))
# Batch Normalization
model.add(BatchNormalization())
# Max Pooling
model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2), padding='same'))
# 3rd Convolutional Layer
model.add(Conv2D(filters=384, kernel_size=(5, 5), strides=(1, 1), padding='same', activation='relu'))
# 4th Convolutional Layer
model.add(Conv2D(filters=384, kernel_size=(5, 5), strides=(1, 1), padding='same', activation='relu'))
# 5th Convolutional Layer
model.add(Conv2D(filters=256, kernel_size=(5, 5), strides=(1, 1), padding='same', activation='relu'))
# Batch Normalization
model.add(BatchNormalization())
# Max Pooling
model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2), padding='same'))
# Flatten the feature maps to pass them to Fully Connected Layers
model.add(Flatten())
# Fully Connected Layers
model.add(Dense(4096, activation='relu'))
model.add(Dense(4096, activation='relu'))
model.add(Dense(number_of_classes, activation='softmax'))
model.summary()
return model
Using TensorFlow backend.
def vgg_16(input_data_shape=(224, 224, 3,), number_of_classes=10):
model = Sequential()
# Block 1
model.add(Conv2D(filters=64, input_shape=input_data_shape, kernel_size=(5, 5), padding='same', activation='relu'))
model.add(Conv2D(filters=64, input_shape=input_data_shape, kernel_size=(5, 5), padding='same', activation='relu'))
model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2), padding='same'))
# Block 2
model.add(Conv2D(filters=128, input_shape=input_data_shape, kernel_size=(5, 5), padding='same', activation='relu'))
model.add(Conv2D(filters=128, input_shape=input_data_shape, kernel_size=(5, 5), padding='same', activation='relu'))
model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2), padding='same'))
# Block 3
model.add(Conv2D(filters=256, input_shape=input_data_shape, kernel_size=(5, 5), padding='same', activation='relu'))
model.add(Conv2D(filters=256, input_shape=input_data_shape, kernel_size=(5, 5), padding='same', activation='relu'))
model.add(Conv2D(filters=256, input_shape=input_data_shape, kernel_size=(5, 5), padding='same', activation='relu'))
model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2), padding='same'))
# Block 4
model.add(Conv2D(filters=512, input_shape=input_data_shape, kernel_size=(5, 5), padding='same', activation='relu'))
model.add(Conv2D(filters=512, input_shape=input_data_shape, kernel_size=(5, 5), padding='same', activation='relu'))
model.add(Conv2D(filters=512, input_shape=input_data_shape, kernel_size=(5, 5), padding='same', activation='relu'))
model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2), padding='same'))
# Block 5
model.add(Conv2D(filters=512, input_shape=input_data_shape, kernel_size=(5, 5), padding='same', activation='relu'))
model.add(Conv2D(filters=512, input_shape=input_data_shape, kernel_size=(5, 5), padding='same', activation='relu'))
model.add(Conv2D(filters=512, input_shape=input_data_shape, kernel_size=(5, 5), padding='same', activation='relu'))
model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2), padding='same'))
# Flatten the feature maps to pass them to Fully Connected Layers
model.add(Flatten())
# fully connected layers
model.add(Dense(4096, activation='relu'))
model.add(Dense(4096, activation='relu'))
model.add(Dense(number_of_classes, activation='softmax'))
# Create model.
model.summary()
return model
Set the inference hyper-parameters:
batch_size = 128
num_classes = 10
num_of_training_iteration = 100