Learn practical skills, build real-world projects, and advance your career
from datetime import datetime
import keras
from keras import backend as K
K.set_image_dim_ordering('th')
from keras.models import Sequential
from keras.layers import Dense, Activation, Dropout, Flatten, Conv2D, MaxPooling2D
import numpy as np


def alexnet(input_data_shape=(224, 224, 3, ), number_of_classes=10):
    model = Sequential()

    # 1st Convolutional Layer
    model.add(Conv2D(filters=96, input_shape=input_data_shape, kernel_size=(5, 5), strides=(4, 4), padding='same', activation='relu'))
    # Max Pooling
    model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2), padding='same'))

    # 2nd Convolutional Layer
    model.add(Conv2D(filters=256, kernel_size=(5, 5), strides=(1, 1), padding='same', activation='relu'))
    # Max Pooling
    model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2), padding='same'))

    # 3rd Convolutional Layer
    model.add(Conv2D(filters=384, kernel_size=(5, 5), strides=(1, 1), padding='same', activation='relu'))
    # Max Pooling
    model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2), padding='same'))
    # 4th Convolutional Layer
    model.add(Conv2D(filters=384, kernel_size=(5, 5), strides=(1, 1), padding='same', activation='relu'))
    # Max Pooling
    model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2), padding='same'))
    # 5th Convolutional Layer
    model.add(Conv2D(filters=256, kernel_size=(5, 5), strides=(1, 1), padding='same', activation='relu'))
    # Max Pooling
    model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2), padding='same'))

    # Flatten the feature maps to pass them to Fully Connected Layers
    model.add(Flatten())

    # Fully Connected Layers
    model.add(Dense(4096, activation='relu'))
    model.add(Dense(4096, activation='relu'))
    model.add(Dense(number_of_classes, activation='softmax'))

    model.summary()
    return model
def vgg_16(input_data_shape=(224, 224, 3,), number_of_classes=10):
    model = Sequential()
    # Block 1
    model.add(Conv2D(filters=64, input_shape=input_data_shape, kernel_size=(5, 5), padding= 'same', activation='relu'))
    model.add(Conv2D(filters=64, input_shape=input_data_shape, kernel_size=(5, 5), padding= 'same', activation='relu'))
    model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2), padding='same'))

    # Block 2
    model.add(Conv2D(filters=128, input_shape=input_data_shape, kernel_size=(5, 5), padding= 'same', activation='relu'))
    model.add(Conv2D(filters=128, input_shape=input_data_shape, kernel_size=(5, 5), padding= 'same', activation='relu'))
    model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2), padding='same'))

    # Block 3
    model.add(Conv2D(filters=256, input_shape=input_data_shape, kernel_size=(5, 5), padding= 'same', activation='relu'))
    model.add(Conv2D(filters=256, input_shape=input_data_shape, kernel_size=(5, 5), padding= 'same', activation='relu'))
    model.add(Conv2D(filters=256, input_shape=input_data_shape, kernel_size=(5, 5), padding= 'same', activation='relu'))
    model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2), padding='same'))

    # Block 4
    model.add(Conv2D(filters=512, input_shape=input_data_shape, kernel_size=(5, 5), padding= 'same', activation='relu'))
    model.add(Conv2D(filters=512, input_shape=input_data_shape, kernel_size=(5, 5), padding= 'same', activation='relu'))
    model.add(Conv2D(filters=512, input_shape=input_data_shape, kernel_size=(5, 5), padding= 'same', activation='relu'))
    model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2), padding='same'))

    # Block 5
    model.add(Conv2D(filters=512, input_shape=input_data_shape, kernel_size=(5, 5), padding= 'same', activation='relu'))
    model.add(Conv2D(filters=512, input_shape=input_data_shape, kernel_size=(5, 5), padding= 'same', activation='relu'))
    model.add(Conv2D(filters=512, input_shape=input_data_shape, kernel_size=(5, 5), padding= 'same', activation='relu'))
    model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2), padding='same'))

    # Flatten the feature maps to pass them to Fully Connected Layers
    model.add(Flatten())

    # fully connected layers
    model.add(Dense(4096, activation='relu'))
    model.add(Dense(4096, activation='relu'))
    model.add(Dense(number_of_classes, activation='softmax'))

    # Create model.
    model.summary()

    return model

Set the inference hyper-parameters:

batch_size = 128
num_classes = 10
num_of_training_iteration = 100

Set the input image shape/size