Jovian
⭐️
Sign In

Camera Model Identification using Convolutional Neural Nets

This was done as part of a research project at MIT Manipal.

In [72]:
from __future__ import absolute_import, division, print_function, unicode_literals

import tensorflow as tf
from tensorflow.keras import datasets, layers, models, callbacks

print(tf.version.VERSION)
print(tf.keras.__version__)
1.14.0 2.2.4-tf
In [142]:
# All the non TF imports
import re
import os
import pandas as pd
from datetime import datetime
import matplotlib.pyplot as plt
import numpy as np
from scipy import ndimage
from PIL import Image
from sklearn.preprocessing import LabelBinarizer
from sklearn.model_selection import train_test_split
from sklearn.metrics import classification_report, confusion_matrix
from sklearn.utils.multiclass import unique_labels
import seaborn as sn
from IPython.display import display, HTML

CMI using Deep Convolutional Neural Networks

In [74]:
# A slightly "wider", but sill very simple highpass filter 
kernel = np.array([[-1, 2, -2, 2, -1],
                   [2,  -6,  8,  -6, 2],
                   [-2,  8,  -12,  8, -2],
                   [2,  -6,  8,  -6, 2],
                   [-1, 2, -2, 2, -1]])
def get_denoised(channel):
    orig_channel = channel
    channel = np.array(channel, dtype=float)
    highpass_5x5 = ndimage.convolve(channel, kernel)
    proc_image = Image.fromarray(orig_channel - highpass_5x5)
    return np.array(proc_image.resize((100,100), Image.ANTIALIAS))

def denoise_image(image):
    r, g, b = image.split()
    return np.dstack((get_denoised(r),get_denoised(g),get_denoised(b)))
    
    

#     proc_image.show()
In [76]:
## Part 1: Making a high pass filter
folder = '/Users/rohitsanjay/research_work/dresden-images-3'
os.chdir(folder)
os.listdir()
images = []
camera_models = []

def get_camera_model(image_name):
    camera_model = re.findall(r'^[^_]*_[^_]*', image_name)
    return camera_model[0]
# Training data
for image in os.listdir():
    if image.endswith('.JPG'):  
        print('Processing {}'.format(image))
        # Appending current camera model to camera_models set
        camera_model = get_camera_model(image)
        camera_models.append(camera_model)
        
        image = Image.open(image).resize((100,100), Image.ANTIALIAS)
        image_array = np.array(image, dtype=float)
        denoised_array = image_array - denoise_image(image)
        # Appending image array to images array
        images.append(image_array)
Processing Panasonic_DMC-FZ50_0_27103.JPG Processing Canon_PowerShotA640_0_4708.JPG Processing Nikon_D200_1_18350.JPG Processing Nikon_D200_1_18344.JPG Processing Canon_PowerShotA640_0_4697.JPG Processing Canon_PowerShotA640_0_4683.JPG Processing Nikon_D200_1_18378.JPG Processing Nikon_D200_0_16097.JPG Processing Canon_PowerShotA640_0_4668.JPG Processing Panasonic_DMC-FZ50_0_27088.JPG Processing Panasonic_DMC-FZ50_0_27089.JPG Processing Nikon_D200_1_18386.JPG Processing Canon_PowerShotA640_0_4669.JPG Processing Canon_PowerShotA640_0_4682.JPG Processing Canon_PowerShotA640_0_4696.JPG Processing Canon_PowerShotA640_0_4709.JPG Processing Nikon_D200_0_16109.JPG Processing Nikon_D200_0_16121.JPG Processing Panasonic_DMC-FZ50_0_27102.JPG Processing Panasonic_DMC-FZ50_0_27100.JPG Processing Nikon_D200_0_16123.JPG Processing Canon_PowerShotA640_0_4680.JPG Processing Canon_PowerShotA640_0_4694.JPG Processing Nikon_D200_1_18384.JPG Processing Nikon_D200_0_16095.JPG Processing Canon_PowerShotA640_0_4695.JPG Processing Canon_PowerShotA640_0_4681.JPG Processing Nikon_D200_1_18352.JPG Processing Nikon_D200_1_18346.JPG Processing Panasonic_DMC-FZ50_0_27115.JPG Processing Panasonic_DMC-FZ50_0_27101.JPG Processing Panasonic_DMC-FZ50_0_27105.JPG Processing Panasonic_DMC-FZ50_0_27111.JPG Processing Canon_PowerShotA640_0_4685.JPG Processing Canon_PowerShotA640_0_4691.JPG Processing Nikon_D200_1_18342.JPG Processing Nikon_D200_1_18356.JPG Processing Nikon_D200_0_16091.JPG Processing Nikon_D200_1_18380.JPG Processing Canon_PowerShotA640_0_4690.JPG Processing Canon_PowerShotA640_0_4684.JPG Processing Nikon_D200_0_16127.JPG Processing Panasonic_DMC-FZ50_0_27104.JPG Processing Nikon_D200_0_16119.JPG Processing Nikon_D200_0_16125.JPG Processing Nikon_D200_0_16131.JPG Processing Canon_PowerShotA640_0_4692.JPG Processing Canon_PowerShotA640_0_4686.JPG Processing Nikon_D200_1_18382.JPG Processing Canon_PowerShotA640_0_4679.JPG Processing Panasonic_DMC-FZ50_0_27099.JPG Processing Panasonic_DMC-FZ50_0_27098.JPG Processing Nikon_D200_0_16087.JPG Processing Canon_PowerShotA640_0_4678.JPG Processing Nikon_D200_0_16093.JPG Processing Nikon_D200_1_18340.JPG Processing Nikon_D200_1_18354.JPG Processing Canon_PowerShotA640_0_4687.JPG Processing Canon_PowerShotA640_0_4693.JPG Processing Nikon_D200_1_18368.JPG Processing Panasonic_DMC-FZ50_0_27107.JPG Processing Panasonic_DMC-FZ50_0_27113.JPG Processing Agfa_Sensor530s_0_2258.JPG Processing Agfa_Sensor530s_0_2270.JPG Processing Agfa_Sensor530s_0_2264.JPG Processing Agfa_Sensor530s_0_2265.JPG Processing Agfa_Sensor530s_0_2271.JPG Processing Agfa_Sensor530s_0_2259.JPG Processing Agfa_Sensor530s_0_2267.JPG Processing Agfa_Sensor530s_0_2273.JPG Processing Agfa_Sensor530s_0_2272.JPG Processing Agfa_Sensor530s_0_2266.JPG Processing Nikon_D200_0_16009.JPG Processing Agfa_Sensor530s_0_2289.JPG Processing Agfa_Sensor530s_0_2262.JPG Processing Agfa_Sensor530s_0_2276.JPG Processing Agfa_Sensor530s_0_2277.JPG Processing Agfa_Sensor530s_0_2263.JPG Processing Agfa_Sensor530s_0_2288.JPG Processing Agfa_Sensor530s_0_2275.JPG Processing Agfa_Sensor530s_0_2261.JPG Processing Agfa_Sensor530s_0_2249.JPG Processing Agfa_Sensor530s_0_2248.JPG Processing Agfa_Sensor530s_0_2260.JPG Processing Agfa_Sensor530s_0_2274.JPG Processing Agfa_Sensor530s_0_2292.JPG Processing Agfa_Sensor530s_0_2286.JPG Processing Agfa_Sensor530s_0_2279.JPG Processing Agfa_Sensor530s_0_2251.JPG Processing Agfa_Sensor530s_0_2245.JPG Processing Agfa_Sensor530s_0_2244.JPG Processing Agfa_Sensor530s_0_2250.JPG Processing Agfa_Sensor530s_0_2278.JPG Processing Agfa_Sensor530s_0_2287.JPG Processing Agfa_Sensor530s_0_2293.JPG Processing Agfa_Sensor530s_0_2285.JPG Processing Agfa_Sensor530s_0_2291.JPG Processing Agfa_Sensor530s_0_2246.JPG Processing Agfa_Sensor530s_0_2252.JPG Processing Agfa_Sensor530s_0_2253.JPG Processing Agfa_Sensor530s_0_2247.JPG Processing Agfa_Sensor530s_0_2290.JPG Processing Agfa_Sensor530s_0_2284.JPG Processing Agfa_Sensor530s_0_2280.JPG Processing Agfa_Sensor530s_0_2257.JPG Processing Agfa_Sensor530s_0_2256.JPG Processing Nikon_D200_0_16011.JPG Processing Agfa_Sensor530s_0_2281.JPG Processing Agfa_Sensor530s_0_2283.JPG Processing Agfa_Sensor530s_0_2254.JPG Processing Agfa_Sensor530s_0_2268.JPG Processing Agfa_Sensor530s_0_2269.JPG Processing Agfa_Sensor530s_0_2255.JPG Processing Agfa_Sensor530s_0_2282.JPG Processing Nikon_D200_0_16101.JPG Processing Nikon_D200_0_16115.JPG Processing Nikon_D200_0_16129.JPG Processing Canon_PowerShotA640_0_4701.JPG Processing Canon_PowerShotA640_0_4675.JPG Processing Panasonic_DMC-FZ50_0_27081.JPG Processing Panasonic_DMC-FZ50_0_27095.JPG Processing Panasonic_DMC-FZ50_0_27094.JPG Processing Panasonic_DMC-FZ50_0_27080.JPG Processing Canon_PowerShotA640_0_4674.JPG Processing Nikon_D200_1_18358.JPG Processing Nikon_D200_1_18364.JPG Processing Nikon_D200_1_18370.JPG Processing Canon_PowerShotA640_0_4700.JPG Processing Panasonic_DMC-FZ50_0_27109.JPG Processing Canon_PowerShotA640_0_4702.JPG Processing Nikon_D200_1_18366.JPG Processing Canon_PowerShotA640_0_4689.JPG Processing Nikon_D200_1_18372.JPG Processing Canon_PowerShotA640_0_4676.JPG Processing Nikon_D200_0_16089.JPG Processing Panasonic_DMC-FZ50_0_27096.JPG Processing Panasonic_DMC-FZ50_0_27082.JPG Processing Panasonic_DMC-FZ50_0_27083.JPG Processing Panasonic_DMC-FZ50_0_27097.JPG Processing Canon_PowerShotA640_0_4677.JPG Processing Canon_PowerShotA640_0_4663.JPG Processing Canon_PowerShotA640_0_4688.JPG Processing Canon_PowerShotA640_0_4703.JPG Processing Nikon_D200_0_16103.JPG Processing Nikon_D200_0_16117.JPG Processing Nikon_D200_0_16113.JPG Processing Nikon_D200_0_16107.JPG Processing Canon_PowerShotA640_0_4707.JPG Processing Canon_PowerShotA640_0_4698.JPG Processing Canon_PowerShotA640_0_4667.JPG Processing Canon_PowerShotA640_0_4673.JPG Processing Panasonic_DMC-FZ50_0_27093.JPG Processing Panasonic_DMC-FZ50_0_27087.JPG Processing Panasonic_DMC-FZ50_0_27086.JPG Processing Panasonic_DMC-FZ50_0_27092.JPG Processing Canon_PowerShotA640_0_4672.JPG Processing Canon_PowerShotA640_0_4666.JPG Processing Nikon_D200_0_16099.JPG Processing Nikon_D200_1_18376.JPG Processing Canon_PowerShotA640_0_4699.JPG Processing Nikon_D200_1_18362.JPG Processing Canon_PowerShotA640_0_4706.JPG Processing Canon_PowerShotA640_0_4712.JPG Processing Canon_PowerShotA640_0_4704.JPG Processing Canon_PowerShotA640_0_4710.JPG Processing Nikon_D200_1_18348.JPG Processing Nikon_D200_1_18374.JPG Processing Nikon_D200_1_18360.JPG Processing Canon_PowerShotA640_0_4670.JPG Processing Canon_PowerShotA640_0_4664.JPG Processing Panasonic_DMC-FZ50_0_27084.JPG Processing Panasonic_DMC-FZ50_0_27090.JPG Processing Nikon_D200_1_18176.JPG Processing Panasonic_DMC-FZ50_0_27091.JPG Processing Panasonic_DMC-FZ50_0_27085.JPG Processing Canon_PowerShotA640_0_4665.JPG Processing Canon_PowerShotA640_0_4671.JPG Processing Canon_PowerShotA640_0_4711.JPG Processing Canon_PowerShotA640_0_4705.JPG Processing Nikon_D200_0_16111.JPG Processing Nikon_D200_0_16105.JPG
In [83]:
encoder = LabelBinarizer()
labels = np.array(encoder.fit_transform(camera_models))

num_labels = len(set(camera_models))
print("There are {} classes".format(num_labels))

df = pd.DataFrame(camera_models)
print(df[0].value_counts())

data_train, data_test, labels_train, labels_test = train_test_split(images, labels, test_size=0.30, random_state=42)

print('The training data is {0} long and the testing data is {1} long'.format(len(data_train), len(data_test)))
There are 4 classes Agfa_Sensor530s 50 Canon_PowerShotA640 50 Nikon_D200 50 Panasonic_DMC-FZ50 31 Name: 0, dtype: int64 The training data is 126 long and the testing data is 55 long
In [78]:
# ConvNet
def get_model():
    model = models.Sequential()
    model.add(layers.Conv2D(32, (3, 3), activation='relu', input_shape=(100, 100, 3), data_format="channels_last"))
    model.add(layers.Conv2D(64, (3, 3), activation='relu'))
    model.add(layers.Conv2D(64, (3, 3), activation='relu'))
    model.add(layers.Conv2D(64, (3, 3), activation='relu'))
    model.add(layers.MaxPooling2D((5, 5)))
    model.add(layers.Flatten())
    model.add(layers.Dense(30, activation='relu'))
    model.add(layers.Dense(num_labels, activation='softmax'))
    return model

model = get_model()
model.summary()

model.compile(optimizer='adam',
              loss='categorical_crossentropy',
              metrics=['accuracy'])
Model: "sequential_1" _________________________________________________________________ Layer (type) Output Shape Param # ================================================================= conv2d_4 (Conv2D) (None, 98, 98, 32) 896 _________________________________________________________________ conv2d_5 (Conv2D) (None, 96, 96, 64) 18496 _________________________________________________________________ conv2d_6 (Conv2D) (None, 94, 94, 64) 36928 _________________________________________________________________ conv2d_7 (Conv2D) (None, 92, 92, 64) 36928 _________________________________________________________________ max_pooling2d_1 (MaxPooling2 (None, 18, 18, 64) 0 _________________________________________________________________ flatten_1 (Flatten) (None, 20736) 0 _________________________________________________________________ dense_2 (Dense) (None, 30) 622110 _________________________________________________________________ dense_3 (Dense) (None, 4) 124 ================================================================= Total params: 715,482 Trainable params: 715,482 Non-trainable params: 0 _________________________________________________________________
In [79]:
history = model.fit(np.array(data_train), 
          labels_train, 
          epochs=10)
# summarize history for accuracy
Epoch 1/10 126/126 [==============================] - 11s 86ms/sample - loss: 1.9983 - acc: 0.1746 Epoch 2/10 126/126 [==============================] - 9s 72ms/sample - loss: 3.2544 - acc: 0.3889 Epoch 3/10 126/126 [==============================] - 9s 73ms/sample - loss: 1.9427 - acc: 0.1429 Epoch 4/10 126/126 [==============================] - 12s 95ms/sample - loss: 1.3262 - acc: 0.5714 Epoch 5/10 126/126 [==============================] - 10s 76ms/sample - loss: 0.7421 - acc: 0.8730 Epoch 6/10 126/126 [==============================] - 9s 75ms/sample - loss: 0.4648 - acc: 1.0000 Epoch 7/10 126/126 [==============================] - 10s 76ms/sample - loss: 1.7271 - acc: 0.9921 Epoch 8/10 126/126 [==============================] - 9s 73ms/sample - loss: 0.2227 - acc: 0.9921 Epoch 9/10 126/126 [==============================] - 9s 74ms/sample - loss: 0.8810 - acc: 0.9444 Epoch 10/10 126/126 [==============================] - 9s 75ms/sample - loss: 0.7922 - acc: 0.9444
In [90]:
# Evaluate the model on the test data using `evaluate`
print('\n# Evaluate on test data')
results = model.evaluate(np.array(data_test), labels_test)
print('test loss, test acc:', results)

# Generate predictions (probabilities -- the output of the last layer)
# on new data using `predict`
print('\n# Generate predictions for samples')
predictions = model.predict(np.array(data_test))
predictions = np.around(predictions)
# Evaluate on test data 55/55 [==============================] - 1s 18ms/sample - loss: 0.1623 - acc: 0.9455 test loss, test acc: [0.16225660985166376, 0.94545454] # Generate predictions for samples
In [81]:
# Testing the model for a random image
testing_image = '/Users/rohitsanjay/research_work/dresden-images/Nikon_D70_0_19593.JPG'
image = Image.open(testing_image).resize((100,100), Image.ANTIALIAS)
image_array = np.array(image, dtype=float)
sample = []
sample.append(image_array)
model.predict(np.array(sample))
Out[81]:
array([[0., 0., 0., 1.]], dtype=float32)
In [143]:
labels_true = np.argmax(labels_test, axis=1)
labels_pred = np.argmax(predictions, axis=1)
cm = confusion_matrix(labels_true, labels_pred)
df_cm = pd.DataFrame(cm, columns=encoder.classes_, index=encoder.classes_)
display(HTML(df_cm.to_html()))
In [ ]:
os.chdir('/Users/rohitsanjay/cmi-cnn')
import jovian
jovian.commit()
[jovian] Saving notebook..
In [ ]: