Jovian
⭐️
Sign In
In [4]:
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import jovian
In [2]:
train = pd.read_csv('C:\\Users\\vsneh\\Udemy-notebooks\\sign-language-kaggle\\sign_mnist_train.csv')
test = pd.read_csv('C:\\Users\\vsneh\\Udemy-notebooks\\sign-language-kaggle\\sign_mnist_test.csv')
In [3]:
train_labels = train['label'].values
In [4]:
test_labels = test['label'].values
In [5]:
train.drop('label',axis = 1,inplace=True)
In [6]:
test.drop('label',axis = 1,inplace=True)
In [7]:
type(test)
Out[7]:
pandas.core.frame.DataFrame
In [8]:
train = np.array([np.reshape(i,(28,28)).flatten() for i in train.values])
In [9]:
test = np.array([np.reshape(i,(28,28)).flatten() for i in test.values])
In [ ]:
 
In [10]:
plt.imshow(train[6].reshape(28,28))
Out[10]:
<matplotlib.image.AxesImage at 0x180d5b27548>
Notebook Image
In [11]:
from sklearn.preprocessing import LabelBinarizer
l_b = LabelBinarizer()
train_labels = l_b.fit_transform(train_labels)
In [12]:
train_labels
Out[12]:
array([[0, 0, 0, ..., 0, 0, 0],
       [0, 0, 0, ..., 0, 0, 0],
       [0, 0, 1, ..., 0, 0, 0],
       ...,
       [0, 0, 0, ..., 0, 0, 0],
       [0, 0, 0, ..., 0, 0, 0],
       [0, 0, 0, ..., 0, 1, 0]])
In [13]:
from sklearn.model_selection import train_test_split
In [14]:
x_train, x_test, y_train, y_test = train_test_split(train, train_labels, test_size = 0.3, random_state = 101)
In [15]:
import keras
from keras.models import Sequential
from keras.layers import Dense, Conv2D, MaxPooling2D, Flatten, Dropout
Using TensorFlow backend.
In [16]:
batch_size = 128
num_classes = 24
epochs = 25
In [17]:
x_train = x_train / 255
x_test = x_test / 255
In [18]:
test_labels = l_b.fit_transform(test_labels)
In [19]:
x_train.shape
Out[19]:
(19218, 784)
In [20]:
x_train = x_train.reshape(x_train.shape[0], 28, 28, 1)
In [21]:
x_train.shape
Out[21]:
(19218, 28, 28, 1)
In [22]:
x_test = x_test.reshape(x_test.shape[0], 28, 28, 1)
In [23]:
plt.imshow(x_train[0].reshape(28,28))
Out[23]:
<matplotlib.image.AxesImage at 0x1809024ee08>
Notebook Image
In [28]:
model = Sequential()
model.add(Conv2D(64, kernel_size=(3,3), activation = 'relu', input_shape=(28, 28 ,1) ))
model.add(MaxPooling2D(pool_size = (2, 2)))

model.add(Conv2D(64, kernel_size = (3, 3), activation = 'relu'))
model.add(MaxPooling2D(pool_size = (2, 2)))

model.add(Conv2D(64, kernel_size = (3, 3), activation = 'relu'))
model.add(MaxPooling2D(pool_size = (2, 2)))

model.add(Flatten())
model.add(Dense(128, activation = 'relu'))
model.add(Dropout(0.20))
model.add(Dense(num_classes, activation = 'softmax'))
In [29]:
model.compile(loss = keras.losses.categorical_crossentropy, optimizer=keras.optimizers.Adam(),
              metrics=['accuracy'])
In [30]:
history = model.fit(x_train, y_train, validation_data = (x_test, y_test), epochs=epochs, batch_size=batch_size)
Train on 19218 samples, validate on 8237 samples Epoch 1/25 19218/19218 [==============================] - 14s 705us/step - loss: 2.6482 - accuracy: 0.1953 - val_loss: 1.6937 - val_accuracy: 0.4777 Epoch 2/25 19218/19218 [==============================] - 13s 692us/step - loss: 1.2711 - accuracy: 0.5802 - val_loss: 0.8214 - val_accuracy: 0.7352 Epoch 3/25 19218/19218 [==============================] - 14s 704us/step - loss: 0.7344 - accuracy: 0.7487 - val_loss: 0.4553 - val_accuracy: 0.8604 Epoch 4/25 19218/19218 [==============================] - 14s 708us/step - loss: 0.4834 - accuracy: 0.8347 - val_loss: 0.2903 - val_accuracy: 0.9196 Epoch 5/25 19218/19218 [==============================] - 14s 708us/step - loss: 0.3232 - accuracy: 0.8935 - val_loss: 0.2183 - val_accuracy: 0.9371 Epoch 6/25 19218/19218 [==============================] - 13s 702us/step - loss: 0.2189 - accuracy: 0.9309 - val_loss: 0.1410 - val_accuracy: 0.9602 Epoch 7/25 19218/19218 [==============================] - 13s 700us/step - loss: 0.1505 - accuracy: 0.9544 - val_loss: 0.0872 - val_accuracy: 0.9836 Epoch 8/25 19218/19218 [==============================] - 13s 700us/step - loss: 0.1115 - accuracy: 0.9666 - val_loss: 0.0663 - val_accuracy: 0.9819 Epoch 9/25 19218/19218 [==============================] - 13s 701us/step - loss: 0.0926 - accuracy: 0.9707 - val_loss: 0.0344 - val_accuracy: 0.9945 Epoch 10/25 19218/19218 [==============================] - 13s 702us/step - loss: 0.0608 - accuracy: 0.9829 - val_loss: 0.0353 - val_accuracy: 0.9941 Epoch 11/25 19218/19218 [==============================] - 14s 709us/step - loss: 0.0470 - accuracy: 0.9871 - val_loss: 0.0171 - val_accuracy: 0.9978 Epoch 12/25 19218/19218 [==============================] - 14s 711us/step - loss: 0.0395 - accuracy: 0.9898 - val_loss: 0.0152 - val_accuracy: 0.9981 Epoch 13/25 19218/19218 [==============================] - 14s 704us/step - loss: 0.0298 - accuracy: 0.9921 - val_loss: 0.0104 - val_accuracy: 0.9987 Epoch 14/25 19218/19218 [==============================] - 13s 702us/step - loss: 0.0338 - accuracy: 0.9908 - val_loss: 0.0101 - val_accuracy: 0.9989 Epoch 15/25 19218/19218 [==============================] - 14s 706us/step - loss: 0.0311 - accuracy: 0.9902 - val_loss: 0.0053 - val_accuracy: 0.9994 Epoch 16/25 19218/19218 [==============================] - 14s 704us/step - loss: 0.0189 - accuracy: 0.9953 - val_loss: 0.0086 - val_accuracy: 0.9990 Epoch 17/25 19218/19218 [==============================] - 14s 705us/step - loss: 0.0281 - accuracy: 0.9915 - val_loss: 0.0035 - val_accuracy: 0.9999 Epoch 18/25 19218/19218 [==============================] - 14s 707us/step - loss: 0.0167 - accuracy: 0.9959 - val_loss: 0.0037 - val_accuracy: 0.9995 Epoch 19/25 19218/19218 [==============================] - 14s 705us/step - loss: 0.0129 - accuracy: 0.9967 - val_loss: 0.0017 - val_accuracy: 1.0000 Epoch 20/25 19218/19218 [==============================] - 14s 707us/step - loss: 0.0141 - accuracy: 0.9961 - val_loss: 0.0034 - val_accuracy: 0.9999 Epoch 21/25 19218/19218 [==============================] - 14s 706us/step - loss: 0.0129 - accuracy: 0.9963 - val_loss: 0.0014 - val_accuracy: 0.9999 Epoch 22/25 19218/19218 [==============================] - 14s 708us/step - loss: 0.0094 - accuracy: 0.9979 - val_loss: 0.0018 - val_accuracy: 0.9999 Epoch 23/25 19218/19218 [==============================] - 288s 15ms/step - loss: 0.0092 - accuracy: 0.9978 - val_loss: 0.0013 - val_accuracy: 0.9999 Epoch 24/25 19218/19218 [==============================] - 16s 856us/step - loss: 0.0155 - accuracy: 0.9956 - val_loss: 0.0014 - val_accuracy: 0.9999 Epoch 25/25 19218/19218 [==============================] - 21s 1ms/step - loss: 0.0088 - accuracy: 0.9978 - val_loss: 0.0102 - val_accuracy: 0.9965
In [31]:
model.evaluate(x_test,y_test)
8237/8237 [==============================] - 2s 230us/step
Out[31]:
[0.010224563727316786, 0.9964792728424072]
In [32]:
from sklearn.metrics import classification_report

predictions = model.predict_classes(x_test)
In [33]:
predictions
Out[33]:
array([ 5, 11,  4, ...,  2, 19, 14], dtype=int64)
In [34]:
predictions = keras.utils.to_categorical(predictions)
In [35]:
print(classification_report(y_test,predictions))
precision recall f1-score support 0 1.00 1.00 1.00 336 1 1.00 1.00 1.00 302 2 1.00 1.00 1.00 353 3 1.00 1.00 1.00 350 4 1.00 1.00 1.00 280 5 1.00 1.00 1.00 369 6 1.00 1.00 1.00 314 7 1.00 1.00 1.00 326 8 1.00 1.00 1.00 344 9 1.00 1.00 1.00 334 10 1.00 1.00 1.00 344 11 0.93 1.00 0.96 338 12 1.00 0.99 0.99 353 13 1.00 1.00 1.00 351 14 1.00 1.00 1.00 302 15 1.00 1.00 1.00 399 16 1.00 1.00 1.00 402 17 1.00 0.94 0.97 367 18 1.00 1.00 1.00 361 19 1.00 1.00 1.00 359 20 1.00 1.00 1.00 316 21 1.00 1.00 1.00 383 22 1.00 1.00 1.00 331 23 1.00 1.00 1.00 323 micro avg 1.00 1.00 1.00 8237 macro avg 1.00 1.00 1.00 8237 weighted avg 1.00 1.00 1.00 8237 samples avg 1.00 1.00 1.00 8237
In [36]:
from sklearn.metrics import accuracy_score
In [37]:
accuracy_score(y_test, predictions)
Out[37]:
0.9964793007162802
In [38]:
test[0].shape

Out[38]:
(784,)

Single Image Prediction

In [39]:
model.save('projectx.h5')
In [ ]:
 
In [2]:
from keras.models import load_model
model = load_model('projectx.h5')
Using TensorFlow backend.
In [4]:
img = test[1000]
pre =model.predict_classes(img.reshape(1,28,28,1))

--------------------------------------------------------------------------- NameError Traceback (most recent call last) <ipython-input-4-4fa13dfbfbb4> in <module> ----> 1 img = test[1000] 2 pre =model.predict_classes(img.reshape(1,28,28,1)) NameError: name 'test' is not defined
In [53]:
pre[0]
Out[53]:
3
In [55]:
img.shape
Out[55]:
(784,)
In [ ]:
 
In [3]:
import cv2
import numpy as np
import os
import matplotlib.pyplot as plt
import glob
import time
In [13]:
from keras.preprocessing import image
testing = image.load_img('testing_hand.jpg',target_size=(28,28))
In [14]:
plt.imshow(testing)
Out[14]:
<matplotlib.image.AxesImage at 0x23bba9a9488>
Notebook Image
In [15]:
testing = image.img_to_array(testing)
In [16]:
testing = cv2.cvtColor(testing,cv2.COLOR_BGR2GRAY)
In [17]:
testing.shape
Out[17]:
(28, 28)
In [18]:
k = model.predict_classes(testing.reshape(1,28,28,1))
In [19]:
print(k[0])
2

Part 2 - Image extraction form video

In [11]:
def clean(paths):
    for k in paths:
        os.unlink(k)
In [12]:

hand_cascade = cv2.CascadeClassifier('C:/Users/vsneh/Udemy-notebooks/DATA/haarcascades/custom-hand-cascade.xml')
count = 0
cap = cv2.VideoCapture(0)
a = [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26]
b = ['a','b','c','d','e','f','g','h','i','k','l','m','n','o','p','q','r','s','t','u','v','w','x','y']
ls = []
Ls = []

while True:
    ret,frame = cap.read()
    frame = cv2.cvtColor(frame,cv2.COLOR_BGR2GRAY)
    
#     hand_location = hand_cascade.detectMultiScale(frame,scaleFactor = 1.3,minNeighbors = 1)
#     for x,y,w,h in hand_location:
#         count = count+1
#         if(count >10):
#             break
#         cv2.rectangle(frame,(x,y),(x+w,y+h),[0,255,0],10)
#         #testing = image.img_to_array(testing)
#         #ls.append(model.predict_classes(frame.reshape(1,28,28,1)))
#         cv2.imwrite("C:/Users/vsneh/Udemy-notebooks/X_DATA/%d.jpg"%count,frame[y:y+h,x:x+w])

    count = count+1
    cv2.rectangle(frame,(300,300),(100,100),(255,255,0),3)
    frame_cut = frame[100:300,100:300]
    #cv2.imshow('frame',frame)
    frame_cut = cv2.flip(frame_cut,2)
    cv2.imwrite('C:\\Users\\vsneh\\Udemy-notebooks\\X_DATA\\%d.jpg'%count,frame_cut)
    
    #time.sleep(1)
    #if(count>10):
    #    break
    
    lst = []
    paths =glob.glob("C:/Users/vsneh/Udemy-notebooks/X_DATA/*")
    #print(paths) 
    for i in paths:
        #print(i)
        testing = image.load_img(i,target_size=(28,28))
        testing = image.img_to_array(testing)
        testing = cv2.cvtColor(testing,cv2.COLOR_BGR2GRAY)
        k = model.predict_classes(testing.reshape(1,28,28,1))
        lst.append(k)
        #print(testing)
    counter = 0
    num = 0 
      
    for i in lst: 
        fre = lst.count(i) 
        if(fre > counter): 
            counter = fre
            num = i 
    pred = num

    val = b[int(pred)]
    Ls.append(val)
    
    cv2.putText(frame,val,(150,180),cv2.FONT_HERSHEY_SIMPLEX,1,(0,255,0),5)
    cv2.imshow('result',frame)
    if(count>10):
        clean(paths)
        count = 0
    if cv2.waitKey(1) == ord('q'):
        break

cap.release() 
cv2.destroyAllWindows()


In [20]:
Ls
Out[20]:
['p',
 'p',
 'p',
 'p',
 'p',
 'p',
 'p',
 'p',
 'p',
 'p',
 'p',
 'p',
 'p',
 'p',
 'p',
 'p',
 'p',
 'p',
 'p',
 'p',
 'p',
 'p',
 'p',
 'p',
 'p',
 'p',
 'p',
 'p',
 'p',
 'p',
 'p',
 'p',
 'p',
 'p',
 'p',
 'p',
 'p',
 'p',
 'p',
 'p',
 'p',
 'p',
 'p',
 'p',
 'p',
 'p',
 'p',
 'p',
 'p',
 'p',
 'p',
 'p',
 'p',
 'p',
 'p',
 'p',
 'p',
 'p',
 'p',
 'p',
 'p',
 'p',
 'p',
 'p',
 'p',
 'p',
 'p',
 'p',
 'p',
 'p',
 'p',
 'p',
 'p',
 'p',
 'p',
 'p',
 'p',
 'p',
 'p',
 'p',
 'p',
 'p',
 'p',
 'p',
 'p',
 'p',
 'p',
 'p',
 'p',
 'p',
 'p',
 'p',
 'p',
 'p',
 'p',
 'p',
 'p',
 'p',
 'p',
 'p',
 'p',
 'p',
 'p',
 'p',
 'p',
 'p',
 'p',
 'p',
 'p',
 'p',
 'p',
 'p',
 'p',
 'p',
 'p',
 'p',
 'p',
 'p',
 'p',
 'p',
 'p',
 'p',
 'p',
 'p',
 'p',
 'p',
 'p',
 'p',
 'p',
 'p',
 'p',
 'p',
 'p',
 'p',
 'p',
 'p',
 'p',
 'p',
 'p',
 'p',
 'p',
 'p',
 'p',
 'p',
 'p',
 'p',
 'p',
 'p',
 'p',
 'p',
 'p',
 'p',
 'p',
 'p',
 'p',
 'p',
 'p',
 'p',
 'p',
 'p',
 'p',
 'p',
 'p',
 'p',
 'p',
 'x',
 'x',
 'x',
 'x',
 'x',
 'x',
 'x',
 'x',
 'x',
 'x',
 'x',
 'x',
 'x',
 'x',
 'x',
 'l',
 'l',
 'l',
 'l',
 'l',
 'l',
 'l',
 'h',
 'h',
 'h',
 'h',
 'h',
 'h',
 'h',
 'h',
 'h',
 'h',
 'h',
 'h',
 'h',
 'h',
 'h',
 'h',
 'h',
 'h',
 'h',
 'h',
 'h',
 'h',
 'g',
 'g',
 'f',
 'f',
 'f',
 'f',
 'f',
 'f',
 'f',
 'f',
 'f',
 'f',
 'f',
 'f',
 'f',
 'f',
 'f',
 'f',
 'f',
 'f',
 'f',
 'f',
 'i',
 'i',
 'x',
 'x',
 'x',
 'i',
 'i',
 'i',
 'i',
 'i',
 't',
 't',
 't',
 't',
 't',
 't',
 't',
 't',
 't',
 't',
 't',
 't',
 't',
 't',
 't',
 't',
 't',
 't',
 't',
 't',
 't',
 't',
 't',
 'f',
 'f',
 'f',
 'f',
 'f',
 'f',
 'f',
 'f',
 'f',
 'f',
 'f',
 'f',
 'f',
 'f',
 'f',
 'f',
 'f',
 'f',
 'f',
 'f',
 'f',
 'f',
 'l',
 'l',
 'l',
 'l',
 'l',
 'l',
 'l',
 'l',
 'l',
 'l',
 'l',
 'f',
 'f',
 'f',
 'p',
 'p',
 'p',
 'p',
 'p',
 'p',
 'p',
 'p',
 'p',
 'p',
 'p',
 'p',
 'p',
 'p',
 'p',
 'p',
 'p',
 'p',
 'p',
 'p',
 'p',
 'p',
 'p',
 'p',
 'p',
 'p',
 'p',
 'p',
 'p',
 'p',
 'p',
 'p',
 'p',
 'p',
 'p',
 'p',
 'p',
 'p',
 'p',
 'p',
 'p',
 'p',
 'p',
 'p',
 'p',
 'p',
 'p',
 'p',
 'p',
 'p',
 'p',
 'p',
 'p',
 'p',
 'p',
 'p',
 'p',
 'p',
 'p',
 'p',
 'p',
 'p',
 'p',
 'p',
 'p',
 'p',
 'p',
 'p',
 'p',
 'p',
 'p',
 'p',
 'p',
 'p',
 'x',
 'x',
 'x',
 'x',
 'x',
 'x',
 'x',
 'c',
 'c',
 'c',
 'c',
 'c',
 'c',
 'c',
 'c',
 'c',
 'c',
 'c',
 'c',
 'c',
 'c',
 'c',
 'c',
 'c',
 'c',
 'c',
 'c',
 'c',
 'c',
 'c',
 'c',
 'c',
 'c',
 'x',
 'x',
 'x',
 'x',
 't',
 't',
 't',
 't',
 't',
 't',
 't',
 't',
 't',
 't',
 't',
 't',
 't',
 't',
 't',
 't',
 't',
 't',
 't',
 't',
 't',
 't',
 't',
 't',
 't',
 't',
 't',
 't',
 't',
 'p',
 'p',
 'p',
 'p',
 'p',
 'p',
 'p',
 'p',
 'p',
 'p',
 'p',
 'p',
 'p',
 'p',
 'p',
 'p',
 'p',
 'p',
 'p',
 'p',
 'p',
 'p',
 'p',
 'p',
 'p',
 'p',
 'p',
 'p',
 'p',
 'p',
 'p',
 'p',
 'p',
 'p',
 'p',
 'p',
 'p',
 'p',
 'p',
 'p',
 'p',
 'p',
 'p',
 'p',
 'p',
 'p',
 'p',
 'p',
 'p',
 'p',
 'p',
 'p',
 'p',
 'p',
 'p',
 'p',
 'p',
 'p',
 'p',
 'p',
 'p',
 'p',
 'p',
 'p',
 'p',
 'p',
 'p',
 'p',
 'p',
 'p',
 'p',
 'p',
 'p',
 'p',
 'p',
 'p',
 'p',
 'p',
 'p',
 'p',
 'p',
 'p',
 'p',
 'p',
 'p',
 'p',
 'p',
 'p',
 'x',
 'x',
 'x',
 'x',
 'x',
 'x',
 'x',
 'x',
 'x',
 'x',
 'x',
 'x',
 'x',
 'x',
 'x',
 'x',
 'x',
 'x',
 'x',
 'x',
 'x',
 'x',
 'p',
 'p',
 'p',
 'p',
 'p',
 'p',
 'p',
 'p',
 'x',
 'x',
 'x',
 'x',
 'x',
 'x',
 'x',
 'x',
 'x',
 'x',
 'x',
 'x',
 'x',
 'x',
 'x',
 'x',
 'x',
 'x',
 'x',
 'x',
 'x',
 'x',
 'x',
 'x',
 'p',
 'p',
 'p',
 'p',
 'p',
 'p',
 'p',
 'p',
 'p',
 'p',
 'p',
 'p',
 'p',
 'p']

import glob

def pred():
    lst = []
    paths = "C:/Users/vsneh/Udemy-notebooks/X_DATA"
    #print(paths) 
    for i in os.listdir(paths):
        #print(i)
        testing = image.load_img(i,target_size=(28,28))
        testing = image.img_to_array(testing)
        testing = cv2.cvtColor(testing,cv2.COLOR_BGR2GRAY)
        k = model.predict_classes(testing.reshape(1,28,28,1))
        lst.append(k)
        #print(testing)
    count = 0
    num = lst[0] 
      
    for i in lst: 
        fre = lst.count(i) 
        if(fre > count): 
            count = fre
            num = i 
  
    return num
    
        
In [14]:
def pred():
    lst = []
    paths = "C:/Users/vsneh/Udemy-notebooks/X_DATA"
    #print(paths) 
    for i in os.listdir(paths):
        #print(i)
        testing = image.load_img(i,target_size=(28,28))
        testing = image.img_to_array(testing)
        testing = cv2.cvtColor(testing,cv2.COLOR_BGR2GRAY)
        k = model.predict_classes(testing.reshape(1,28,28,1))
        lst.append(k)
        #print(testing)
    count = 0
    num = lst[0] 
      
    for i in lst: 
        fre = lst.count(i) 
        if(fre > count): 
            count = fre
            num = i 
  
    return num

def clean():
    paths = "C:/Users/vsneh/Udemy-notebooks/X_DATA"
    for i in os.listdir(paths):
        os.unlink(i)

def val(l):
    a = [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26]
    b = ['a','b','c','d','e','f','g','h','i','k','l','m','n','o','p','q','r','s','t','u','v','w','x','y']
    return(b[1])     
In [36]:
import glob
paths = "C:/Users/vsneh/Udemy-notebooks/X_DATA/"
#print(os.listdir(paths)) 
for i in glob.glob("C:/Users/vsneh/Udemy-notebooks/X_DATA/*"):
    print(i)
#for i in os.listdir(paths):
    #print("v")
    #testing = image.load_img(i,target_size=(28,28))
    #print(testing)
C:/Users/vsneh/Udemy-notebooks/X_DATA\1.jpg C:/Users/vsneh/Udemy-notebooks/X_DATA\testing_hand.jpg C:/Users/vsneh/Udemy-notebooks/X_DATA\testing_hand2.jpg
In [41]:
paths =glob.glob("C:/Users/vsneh/Udemy-notebooks/X_DATA/*")
    #print(paths) 
for i in paths:
    print(i)
C:/Users/vsneh/Udemy-notebooks/X_DATA\1.jpg C:/Users/vsneh/Udemy-notebooks/X_DATA\testing_hand.jpg C:/Users/vsneh/Udemy-notebooks/X_DATA\testing_hand2.jpg
In [ ]:
jovian.commit()
[jovian] Saving notebook..
In [ ]: