Learn practical skills, build real-world projects, and advance your career
import jovian
jovian.commit(environment='none')
[jovian] Attempting to save notebook.. [jovian] Updating notebook "v-snehith999/helmet-detection-opencv-dnn" on https://jovian.ml/ [jovian] Uploading notebook.. [jovian] Capturing environment.. [jovian] Committed successfully! https://jovian.ml/v-snehith999/helmet-detection-opencv-dnn
class YOLO:

    def __init__(self, config, model, labels, size=416, confidence=0.5, threshold=0.3):
        self.confidence = confidence
        self.threshold = threshold
        self.size = size
        self.labels = labels
        self.net = cv2.dnn.readNetFromDarknet(config, model)

    def inference(self, image):
        ih, iw = image.shape[:2]

        ln = self.net.getLayerNames()
        ln = [ln[i[0] - 1] for i in self.net.getUnconnectedOutLayers()]

        blob = cv2.dnn.blobFromImage(image, 1 / 255.0, (self.size, self.size), swapRB=True, crop=False)
        self.net.setInput(blob)
        start = time.time()
        layerOutputs = self.net.forward(ln)
        end = time.time()
        inference_time = end - start

        boxes = []
        confidences = []
        classIDs = []

        for output in layerOutputs:
            # loop over each of the detections
            for detection in output:
                # extract the class ID and confidence (i.e., probability) of
                # the current object detection
                scores = detection[5:]
                classID = np.argmax(scores)
                confidence = scores[classID]
                # filter out weak predictions by ensuring the detected
                # probability is greater than the minimum probability
                if confidence > self.confidence:
                    # scale the bounding box coordinates back relative to the
                    # size of the image, keeping in mind that YOLO actually
                    # returns the center (x, y)-coordinates of the bounding
                    # box followed by the boxes' width and height
                    box = detection[0:4] * np.array([iw, ih, iw, ih])
                    (centerX, centerY, width, height) = box.astype("int")
                    # use the center (x, y)-coordinates to derive the top and
                    # and left corner of the bounding box
                    x = int(centerX - (width / 2))
                    y = int(centerY - (height / 2))
                    # update our list of bounding box coordinates, confidences,
                    # and class IDs
                    boxes.append([x, y, int(width), int(height)])
                    confidences.append(float(confidence))
                    classIDs.append(classID)

        idxs = cv2.dnn.NMSBoxes(boxes, confidences, self.confidence, self.threshold)

        results = []
        if len(idxs) > 0:
            for i in idxs.flatten():
                # extract the bounding box coordinates
                x, y = (boxes[i][0], boxes[i][1])
                w, h = (boxes[i][2], boxes[i][3])
                id = classIDs[i]
                confidence = confidences[i]

                results.append((id, self.labels[id], confidence, x, y, w, h))

        return iw, ih, inference_time, results
import cv2
import numpy as np
from yolo import YOLO 
from keras.models import load_model
from PIL import Image
from keras.preprocessing import image
import random 

new_model = load_model('Letter_Model_V3_999_1')
alph = 'ABCDEFGHIJKLMNOPQRSTUVWXY'
alph_dict = {}
for i,n in enumerate(alph):
    alph_dict.update({i:n})

camera=cv2.VideoCapture(0)
cv2.namedWindow("test")
img_counter = 0
List_alph = [i for i in alph]
letter = random.choice(List_alph)
while True:
    ret, frame = camera.read()
    if not ret:
        print("failed to grab frame")
        break
    #color = (0, 255, 255)
    #cv2.putText(frame,letter,(20,20),cv2.FONT_HERSHEY_SIMPLEX,
    #            0.5, color, 2)
    
    else:
        try:
            x=frame
            img_counter += 1
            print("frame: ", img_counter)

            yolo = YOLO("./models/cross-hands.cfg", "./models/cross-hands.weights", ["hand"])
            width, height, inference_time, results = yolo.inference(x)
            for detection in results:
                id, name, confidence, x, y, w, h = detection
                cx = x + (w / 2)
                cy = y + (h / 2)
                crop_img = frame[y-50:y+h+50, x-50:x+w+50]

                im = Image.fromarray(crop_img)
                im.save("your_file.png")
                im = image.load_img('your_file.png',target_size=(28,28),color_mode='grayscale')
                new_img = image.img_to_array(im)
                cv2.imshow("cropped", new_img)

                the_class = new_model.predict_classes(new_img.reshape(1,28,28,1))
                text = alph_dict[the_class[0]]
                # draw a bounding box rectangle and label on the image
                color = (0, 255, 255)
                cv2.rectangle(frame, (x, y), (x + w, y + h), color, 2)
                #text = "%s (%s)" % (name, round(confidence, 2))
                cv2.putText(frame, text, (x, y - 5), cv2.FONT_HERSHEY_SIMPLEX,
                    0.5, color, 2)

            cv2.imshow("preview", frame)
            if cv2.waitKey(1)==ord('q'):
                break
        except:
            pass

camera.release()

cv2.destroyAllWindows()