Learn practical skills, build real-world projects, and advance your career
import jovian
jovian.commit(environment=None)
[jovian] Attempting to save notebook..
import cv2
import time
import numpy as np
import matplotlib.pyplot as plt
from PIL import Image
from keras.preprocessing import image
from keras.models import load_model
import random 
Using TensorFlow backend.
class YOLO:

    def __init__(self, config, model, labels, size=416, confidence=0.5, threshold=0.3):
        self.confidence = confidence
        self.threshold = threshold
        self.size = size
        self.labels = labels
        self.net = cv2.dnn.readNetFromDarknet(config, model)

    def inference(self, image):
        ih, iw = image.shape[:2]

        ln = self.net.getLayerNames()
        ln = [ln[i[0] - 1] for i in self.net.getUnconnectedOutLayers()]

        blob = cv2.dnn.blobFromImage(image, 1 / 255.0, (self.size, self.size), swapRB=True, crop=False)
        self.net.setInput(blob)
        start = time.time()
        layerOutputs = self.net.forward(ln)
        end = time.time()
        inference_time = end - start

        boxes = []
        confidences = []
        classIDs = []

        for output in layerOutputs:
            for detection in output:
                scores = detection[5:]
                classID = np.argmax(scores)
                confidence = scores[classID]
                if confidence > self.confidence:
                    box = detection[0:4] * np.array([iw, ih, iw, ih])
                    (centerX, centerY, width, height) = box.astype("int")
                    x = int(centerX - (width / 2))
                    y = int(centerY - (height / 2))
                    boxes.append([x, y, int(width), int(height)])
                    confidences.append(float(confidence))
                    classIDs.append(classID)

        idxs = cv2.dnn.NMSBoxes(boxes, confidences, self.confidence, self.threshold)

        results = []
        if len(idxs) > 0:
            for i in idxs.flatten():
                x, y = (boxes[i][0], boxes[i][1])
                w, h = (boxes[i][2], boxes[i][3])
                id = classIDs[i]
                confidence = confidences[i]

                results.append((id, self.labels[id], confidence, x, y, w, h))

        return iw, ih, inference_time, results
cap=cv2.VideoCapture('vid1.mp4')
img_counter = 0
fourcc = cv2.VideoWriter_fourcc(*'XVID')
out = cv2.VideoWriter('output.mp4', fourcc, 20.0, (640,  480))
yolo = YOLO("yolov3_V1_config.cfg", "yolov3_V1_config_1000.weights", ["Poacher"])
while True:
    ret, frame = cap.read()
    img_counter += 1
    print("frame: ", img_counter)
    width, height, inference_time, results = yolo.inference(frame)
    for detection in results:
        id, name, confidence, x, y, w, h = detection
        color = (0, 255, 255)
        cv2.rectangle(frame, (x, y), (x + w, y + h), color, 2)
        cv2.putText(frame, 'Poacher', (x, y - 5), cv2.FONT_HERSHEY_SIMPLEX,0.5, color, 2)
    
    out.write(frame)
    prv = cv2.resize(frame, (500,500))
    cv2.imshow("preview", prv)
    if cv2.waitKey(1)==ord('q'):
        break
        
cap.release()
out.release()
cv2.destroyAllWindows()
frame: 1 frame: 2 frame: 3 frame: 4 frame: 5 frame: 6 frame: 7 frame: 8 frame: 9 frame: 10 frame: 11 frame: 12 frame: 13 frame: 14 frame: 15 frame: 16 frame: 17 frame: 18 frame: 19 frame: 20 frame: 21 frame: 22 frame: 23 frame: 24 frame: 25 frame: 26 frame: 27 frame: 28 frame: 29 frame: 30 frame: 31 frame: 32 frame: 33 frame: 34 frame: 35 frame: 36 frame: 37 frame: 38 frame: 39 frame: 40 frame: 41 frame: 42 frame: 43 frame: 44 frame: 45 frame: 46 frame: 47 frame: 48 frame: 49 frame: 50 frame: 51 frame: 52 frame: 53 frame: 54 frame: 55 frame: 56 frame: 57 frame: 58 frame: 59 frame: 60 frame: 61 frame: 62 frame: 63 frame: 64 frame: 65 frame: 66 frame: 67 frame: 68 frame: 69 frame: 70 frame: 71 frame: 72 frame: 73 frame: 74 frame: 75 frame: 76 frame: 77 frame: 78 frame: 79 frame: 80 frame: 81 frame: 82 frame: 83 frame: 84 frame: 85 frame: 86 frame: 87 frame: 88 frame: 89 frame: 90 frame: 91 frame: 92 frame: 93 frame: 94 frame: 95 frame: 96 frame: 97 frame: 98 frame: 99 frame: 100 frame: 101 frame: 102 frame: 103 frame: 104 frame: 105 frame: 106 frame: 107 frame: 108 frame: 109 frame: 110 frame: 111 frame: 112 frame: 113 frame: 114 frame: 115 frame: 116 frame: 117 frame: 118 frame: 119 frame: 120 frame: 121 frame: 122 frame: 123 frame: 124 frame: 125 frame: 126 frame: 127 frame: 128 frame: 129 frame: 130 frame: 131 frame: 132 frame: 133 frame: 134 frame: 135 frame: 136 frame: 137
frame = cv2.imread('C:\\Users\\vsneh\\Udemy-notebooks\\Poacher Detection\\test_images\\pic1.jpg')
x=frame

yolo = YOLO("yolov3_V1_config.cfg", "yolov3_V1_config_1000.weights", ["Poacher"])
width, height, inference_time, results = yolo.inference(x)

# print(results)
for detection in results:
    id, name, confidence, x, y, w, h = detection
    color = (0, 255, 255)
    cv2.rectangle(frame, (x, y), (x + w, y + h), color, 2)
    cv2.putText(frame, 'Poacher', (x, y - 5), cv2.FONT_HERSHEY_SIMPLEX,0.5, color, 2)
    
    print(x,y)
frame = cv2.resize(frame, (250,250))
cv2.imshow("preview", frame)
cv2.waitKey(0)
-1