Jovian
⭐️
Sign In
In [2]:
!pip install jovian  -q
Building wheel for jovian (setup.py) ... done Building wheel for uuid (setup.py) ... done
In [2]:
import jovian
In [4]:
from datetime import datetime
import keras
from keras.models import Sequential
from keras.layers import Dense, Activation, Dropout, Flatten, Conv2D, MaxPooling2D
import numpy as np


def alexnet(input_data_shape=(224, 224, 3, ), number_of_classes=10):
    model = Sequential()

    # 1st Convolutional Layer
    model.add(Conv2D(filters=96, input_shape=input_data_shape, kernel_size=(11, 11), strides=(4, 4), padding='valid', activation='relu'))
    # Max Pooling
    model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2), padding='valid'))

    # 2nd Convolutional Layer
    model.add(Conv2D(filters=256, kernel_size=(11, 11), strides=(1, 1), padding='valid', activation='relu'))
    # Max Pooling
    model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2), padding='valid'))

    # 3rd Convolutional Layer
    model.add(Conv2D(filters=384, kernel_size=(3, 3), strides=(1, 1), padding='valid', activation='relu'))

    # 4th Convolutional Layer
    model.add(Conv2D(filters=384, kernel_size=(3, 3), strides=(1, 1), padding='valid', activation='relu'))

    # 5th Convolutional Layer
    model.add(Conv2D(filters=256, kernel_size=(3, 3), strides=(1, 1), padding='valid', activation='relu'))
    # Max Pooling
    model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2), padding='valid'))

    # Flatten the feature maps to pass them to Fully Connected Layers
    model.add(Flatten())

    # Fully Connected Layers
    model.add(Dense(4096, activation='relu'))
    model.add(Dense(4096, activation='relu'))
    model.add(Dense(number_of_classes, activation='softmax'))

    model.summary()
    return model
Using TensorFlow backend.
In [ ]:
def vgg_16(input_data_shape=(224, 224, 3,), number_of_classes=10):
    model = Sequential()
    # Block 1
    model.add(Conv2D(filters=64, input_shape=input_data_shape, kernel_size=(3, 3), padding='same', activation='relu'))
    model.add(Conv2D(filters=64, input_shape=input_data_shape, kernel_size=(3, 3), padding='same', activation='relu'))
    model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))

    # Block 2
    model.add(Conv2D(filters=128, input_shape=input_data_shape, kernel_size=(3, 3), padding='same', activation='relu'))
    model.add(Conv2D(filters=128, input_shape=input_data_shape, kernel_size=(3, 3), padding='same', activation='relu'))
    model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))

    # Block 3
    model.add(Conv2D(filters=256, input_shape=input_data_shape, kernel_size=(3, 3), padding='same', activation='relu'))
    model.add(Conv2D(filters=256, input_shape=input_data_shape, kernel_size=(3, 3), padding='same', activation='relu'))
    model.add(Conv2D(filters=256, input_shape=input_data_shape, kernel_size=(3, 3), padding='same', activation='relu'))
    model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))

    # Block 4
    model.add(Conv2D(filters=512, input_shape=input_data_shape, kernel_size=(3, 3), padding='same', activation='relu'))
    model.add(Conv2D(filters=512, input_shape=input_data_shape, kernel_size=(3, 3), padding='same', activation='relu'))
    model.add(Conv2D(filters=512, input_shape=input_data_shape, kernel_size=(3, 3), padding='same', activation='relu'))
    model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))

    # Block 5
    model.add(Conv2D(filters=512, input_shape=input_data_shape, kernel_size=(3, 3), padding='same', activation='relu'))
    model.add(Conv2D(filters=512, input_shape=input_data_shape, kernel_size=(3, 3), padding='same', activation='relu'))
    model.add(Conv2D(filters=512, input_shape=input_data_shape, kernel_size=(3, 3), padding='same', activation='relu'))
    model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))

    # Flatten the feature maps to pass them to Fully Connected Layers
    model.add(Flatten())

    # fully connected layers
    model.add(Dense(4096, activation='relu'))
    model.add(Dense(4096, activation='relu'))
    model.add(Dense(number_of_classes, activation='softmax'))

    # Create model.
    model.summary()

    return model
In [ ]:
def Cnn(input_data_shape=(224, 224, 3,), number_of_classes=10):
    model = Sequential()
    # Block 1
    model.add(Conv2D(filters=64, input_shape=input_data_shape, kernel_size=(5, 5), padding='same', activation='relu'))
    model.add(Conv2D(filters=64, input_shape=input_data_shape, kernel_size=(5, 5), padding='same', activation='relu'))
    model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))

    # Block 2
    model.add(Conv2D(filters=128, input_shape=input_data_shape, kernel_size=(5, 5), padding='same', activation='relu'))
    model.add(Conv2D(filters=128, input_shape=input_data_shape, kernel_size=(5, 5), padding='same', activation='relu'))
    model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))

    # Block 3
    model.add(Conv2D(filters=256, input_shape=input_data_shape, kernel_size=(5, 5), padding='same', activation='relu'))
    model.add(Conv2D(filters=256, input_shape=input_data_shape, kernel_size=(5, 5), padding='same', activation='relu'))
    model.add(Conv2D(filters=256, input_shape=input_data_shape, kernel_size=(5, 5), padding='same', activation='relu'))
    model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))

    # Block 4
    model.add(Conv2D(filters=512, input_shape=input_data_shape, kernel_size=(5, 5), padding='same', activation='relu'))
    model.add(Conv2D(filters=512, input_shape=input_data_shape, kernel_size=(5, 5), padding='same', activation='relu'))
    model.add(Conv2D(filters=512, input_shape=input_data_shape, kernel_size=(5, 5), padding='same', activation='relu'))
    model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))

    # Block 5
    model.add(Conv2D(filters=512, input_shape=input_data_shape, kernel_size=(5, 5), padding='same', activation='relu'))
    model.add(Conv2D(filters=512, input_shape=input_data_shape, kernel_size=(5, 5), padding='same', activation='relu'))
    model.add(Conv2D(filters=512, input_shape=input_data_shape, kernel_size=(5, 5), padding='same', activation='relu'))
    model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))

    # Flatten the feature maps to pass them to Fully Connected Layers
    model.add(Flatten())

    # fully connected layers
    model.add(Dense(4096, activation='relu'))
    model.add(Dense(4096, activation='relu'))
    model.add(Dense(number_of_classes, activation='softmax'))

    # Create model.
    model.summary()

    return model
In [ ]:
batch_size = 128
num_classes = 10
num_of_training_iteration = 100
In [ ]:
input_data_shape = (224, 224, 3)
In [10]:
alexnet_model = alexnet(input_data_shape=input_data_shape)
WARNING: Logging before flag parsing goes to stderr. W0726 05:12:38.137492 139850074171264 deprecation_wrapper.py:119] From /usr/local/lib/python3.6/dist-packages/keras/backend/tensorflow_backend.py:74: The name tf.get_default_graph is deprecated. Please use tf.compat.v1.get_default_graph instead. W0726 05:12:38.182701 139850074171264 deprecation_wrapper.py:119] From /usr/local/lib/python3.6/dist-packages/keras/backend/tensorflow_backend.py:517: The name tf.placeholder is deprecated. Please use tf.compat.v1.placeholder instead. W0726 05:12:38.190217 139850074171264 deprecation_wrapper.py:119] From /usr/local/lib/python3.6/dist-packages/keras/backend/tensorflow_backend.py:4138: The name tf.random_uniform is deprecated. Please use tf.random.uniform instead. W0726 05:12:38.218965 139850074171264 deprecation_wrapper.py:119] From /usr/local/lib/python3.6/dist-packages/keras/backend/tensorflow_backend.py:3976: The name tf.nn.max_pool is deprecated. Please use tf.nn.max_pool2d instead.
_________________________________________________________________ Layer (type) Output Shape Param # ================================================================= conv2d_1 (Conv2D) (None, 54, 54, 96) 34944 _________________________________________________________________ max_pooling2d_1 (MaxPooling2 (None, 27, 27, 96) 0 _________________________________________________________________ conv2d_2 (Conv2D) (None, 17, 17, 256) 2973952 _________________________________________________________________ max_pooling2d_2 (MaxPooling2 (None, 8, 8, 256) 0 _________________________________________________________________ conv2d_3 (Conv2D) (None, 6, 6, 384) 885120 _________________________________________________________________ conv2d_4 (Conv2D) (None, 4, 4, 384) 1327488 _________________________________________________________________ conv2d_5 (Conv2D) (None, 2, 2, 256) 884992 _________________________________________________________________ max_pooling2d_3 (MaxPooling2 (None, 1, 1, 256) 0 _________________________________________________________________ flatten_1 (Flatten) (None, 256) 0 _________________________________________________________________ dense_1 (Dense) (None, 4096) 1052672 _________________________________________________________________ dense_2 (Dense) (None, 4096) 16781312 _________________________________________________________________ dense_3 (Dense) (None, 10) 40970 ================================================================= Total params: 23,981,450 Trainable params: 23,981,450 Non-trainable params: 0 _________________________________________________________________
In [11]:
vgg16_model = vgg_16(input_data_shape=input_data_shape)
_________________________________________________________________ Layer (type) Output Shape Param # ================================================================= conv2d_6 (Conv2D) (None, 224, 224, 64) 1792 _________________________________________________________________ conv2d_7 (Conv2D) (None, 224, 224, 64) 36928 _________________________________________________________________ max_pooling2d_4 (MaxPooling2 (None, 112, 112, 64) 0 _________________________________________________________________ conv2d_8 (Conv2D) (None, 112, 112, 128) 73856 _________________________________________________________________ conv2d_9 (Conv2D) (None, 112, 112, 128) 147584 _________________________________________________________________ max_pooling2d_5 (MaxPooling2 (None, 56, 56, 128) 0 _________________________________________________________________ conv2d_10 (Conv2D) (None, 56, 56, 256) 295168 _________________________________________________________________ conv2d_11 (Conv2D) (None, 56, 56, 256) 590080 _________________________________________________________________ conv2d_12 (Conv2D) (None, 56, 56, 256) 590080 _________________________________________________________________ max_pooling2d_6 (MaxPooling2 (None, 28, 28, 256) 0 _________________________________________________________________ conv2d_13 (Conv2D) (None, 28, 28, 512) 1180160 _________________________________________________________________ conv2d_14 (Conv2D) (None, 28, 28, 512) 2359808 _________________________________________________________________ conv2d_15 (Conv2D) (None, 28, 28, 512) 2359808 _________________________________________________________________ max_pooling2d_7 (MaxPooling2 (None, 14, 14, 512) 0 _________________________________________________________________ conv2d_16 (Conv2D) (None, 14, 14, 512) 2359808 _________________________________________________________________ conv2d_17 (Conv2D) (None, 14, 14, 512) 2359808 _________________________________________________________________ conv2d_18 (Conv2D) (None, 14, 14, 512) 2359808 _________________________________________________________________ max_pooling2d_8 (MaxPooling2 (None, 7, 7, 512) 0 _________________________________________________________________ flatten_2 (Flatten) (None, 25088) 0 _________________________________________________________________ dense_4 (Dense) (None, 4096) 102764544 _________________________________________________________________ dense_5 (Dense) (None, 4096) 16781312 _________________________________________________________________ dense_6 (Dense) (None, 10) 40970 ================================================================= Total params: 134,301,514 Trainable params: 134,301,514 Non-trainable params: 0 _________________________________________________________________
In [12]:
Cnn_model = Cnn(input_data_shape=input_data_shape)
_________________________________________________________________ Layer (type) Output Shape Param # ================================================================= conv2d_19 (Conv2D) (None, 224, 224, 64) 4864 _________________________________________________________________ conv2d_20 (Conv2D) (None, 224, 224, 64) 102464 _________________________________________________________________ max_pooling2d_9 (MaxPooling2 (None, 112, 112, 64) 0 _________________________________________________________________ conv2d_21 (Conv2D) (None, 112, 112, 128) 204928 _________________________________________________________________ conv2d_22 (Conv2D) (None, 112, 112, 128) 409728 _________________________________________________________________ max_pooling2d_10 (MaxPooling (None, 56, 56, 128) 0 _________________________________________________________________ conv2d_23 (Conv2D) (None, 56, 56, 256) 819456 _________________________________________________________________ conv2d_24 (Conv2D) (None, 56, 56, 256) 1638656 _________________________________________________________________ conv2d_25 (Conv2D) (None, 56, 56, 256) 1638656 _________________________________________________________________ max_pooling2d_11 (MaxPooling (None, 28, 28, 256) 0 _________________________________________________________________ conv2d_26 (Conv2D) (None, 28, 28, 512) 3277312 _________________________________________________________________ conv2d_27 (Conv2D) (None, 28, 28, 512) 6554112 _________________________________________________________________ conv2d_28 (Conv2D) (None, 28, 28, 512) 6554112 _________________________________________________________________ max_pooling2d_12 (MaxPooling (None, 14, 14, 512) 0 _________________________________________________________________ conv2d_29 (Conv2D) (None, 14, 14, 512) 6554112 _________________________________________________________________ conv2d_30 (Conv2D) (None, 14, 14, 512) 6554112 _________________________________________________________________ conv2d_31 (Conv2D) (None, 14, 14, 512) 6554112 _________________________________________________________________ max_pooling2d_13 (MaxPooling (None, 7, 7, 512) 0 _________________________________________________________________ flatten_3 (Flatten) (None, 25088) 0 _________________________________________________________________ dense_7 (Dense) (None, 4096) 102764544 _________________________________________________________________ dense_8 (Dense) (None, 4096) 16781312 _________________________________________________________________ dense_9 (Dense) (None, 10) 40970 ================================================================= Total params: 160,453,450 Trainable params: 160,453,450 Non-trainable params: 0 _________________________________________________________________
In [13]:
alexnet_model.compile(loss=keras.losses.categorical_crossentropy, optimizer='adam', metrics=["accuracy"])

alexnet_inference_time = []

# dummy tensor to check the inference time of each network
x_test = np.random.rand(batch_size, input_data_shape[0], input_data_shape[1], input_data_shape[2])

for _ in range(num_of_training_iteration):
    alexnet_inference_start = datetime.now()
    alexnet_inference = alexnet_model.predict_classes(x_test)
    alexnet_inference_finish = datetime.now()
    alexnet_inference_time.append(alexnet_inference_finish - alexnet_inference_start)
print("Average Inference time for AlexNet: {}".format(np.mean(alexnet_inference_time)))

W0726 05:13:42.058984 139850074171264 deprecation_wrapper.py:119] From /usr/local/lib/python3.6/dist-packages/keras/optimizers.py:790: The name tf.train.Optimizer is deprecated. Please use tf.compat.v1.train.Optimizer instead. W0726 05:13:42.090618 139850074171264 deprecation_wrapper.py:119] From /usr/local/lib/python3.6/dist-packages/keras/backend/tensorflow_backend.py:3295: The name tf.log is deprecated. Please use tf.math.log instead. W0726 05:13:42.400215 139850074171264 deprecation_wrapper.py:119] From /usr/local/lib/python3.6/dist-packages/keras/backend/tensorflow_backend.py:2741: The name tf.Session is deprecated. Please use tf.compat.v1.Session instead.
Average Inference time for AlexNet: 0:00:00.248749
In [14]:
vgg16_model.compile(loss=keras.losses.categorical_crossentropy, optimizer='adam', metrics=["accuracy"])
vgg16_inference_time = []
for _ in range(num_of_training_iteration):
    vgg16_inference_start = datetime.now()
    vgg16_inference = vgg16_model.predict_classes(x_test)
    vgg16_inference_finish = datetime.now()
    vgg16_inference_time.append(vgg16_inference_finish - vgg16_inference_start)

print("Average Inference time for VGG-16: {}".format(np.mean(vgg16_inference_time)))
Average Inference time for VGG-16: 0:00:01.233920
In [15]:
Cnn_model.compile(loss=keras.losses.categorical_crossentropy, optimizer='adam', metrics=["accuracy"])

Cnn_inference_time = []

# dummy tensor to check the inference time of each network
x_test = np.random.rand(batch_size, input_data_shape[0], input_data_shape[1], input_data_shape[2])

for _ in range(num_of_training_iteration):
    Cnn_inference_start = datetime.now()
    Cnn_inference = Cnn_model.predict_classes(x_test)
    Cnn_inference_finish = datetime.now()
    Cnn_inference_time.append(Cnn_inference_finish - Cnn_inference_start)
print("Average Inference time for Cnn: {}".format(np.mean(Cnn_inference_time)))

Average Inference time for Cnn: 0:00:01.886415
In [4]:
jovian.log_metrics({
    'AlexNet_parameters': 23981451,
    'AlexNet_Inference_Time': '0:00:00.248749',
    'vgg_parameters': 134301504,
    'vgg_inference_Time': '0:00:01.233920',
    'Cnn_parameters': 160453450,
    'Cnn_Inference_Time': '0:00:01.886415'
})
[jovian] Metrics logged.
In [ ]:
jovian.commit()
[jovian] Saving notebook..
[jovian] Updating notebook "bf0276c8638d4b0b89733ad89d32d795" on https://jvn.io [jovian] Uploading notebook..
In [ ]: