обнаружение объектов с помощью YOLO в 2 потоковых камерах с использованием многопоточности и opencv

#python #opencv #object-detection #yolo

#питон #opencv #обнаружение объектов #йоло

Вопрос:

Я хочу выполнять обнаружение объектов с помощью YOLO в 2 потоковых камерах. Я использую многопоточность, и результат такой же, как на видео. Таким образом, при использовании 2 потоков камеры возникает ошибка обнаружения объекта (много случайных прямоугольников), но если она выполняется только в 1 потоке, обнаружение объекта работает. почему это произошло? Это из-за использования одного и того же графического процессора? Спасибо вам за помощь

ссылки на видео: https://drive.google.com/file/d/1tayLjULfTbzfwbtgnfLkGVJ2YGSDWdd9/view?usp=sharing

Код:

 from pengaturan import social_distancing_config as config
from pengaturan.detection import detect_people
from scipy.spatial import distance as dist
import numpy as np
import argparse
import imutils
import cv2
import os
import time #buat fps calculator
import threading

# construct the argument parse and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-i", "--input", type=str, default="",
    help="path to (optional) input video file")
ap.add_argument("-o", "--output", type=str, default="",
    help="path to (optional) output video file")
ap.add_argument("-d", "--display", type=int, default=1,
    help="whether or not output frame should be displayed")
args = vars(ap.parse_args())

# load the COCO class labels our YOLO model was trained on
labelsPath = os.path.sep.join([config.MODEL_PATH, "coco.names"])
LABELS = open(labelsPath).read().strip().split("n")

# derive the paths to the YOLO weights and model configuration
weightsPath = os.path.sep.join([config.MODEL_PATH, "yolov3.weights"])
configPath = os.path.sep.join([config.MODEL_PATH, "yolov3.cfg"])

# load our YOLO object detector trained on COCO dataset (80 classes)
print("[INFO] loading YOLO from disk...")
net = cv2.dnn.readNetFromDarknet(configPath, weightsPath)

# check if we are going to use GPU
if config.USE_GPU:
    # set CUDA as the preferable backend and target
    print("[INFO] setting preferable backend and target to CUDA...")
    net.setPreferableBackend(cv2.dnn.DNN_BACKEND_CUDA)
    net.setPreferableTarget(cv2.dnn.DNN_TARGET_CUDA)

# determine only the *output* layer names that we need from YOLO
ln = net.getLayerNames()
ln = [ln[i - 1] for i in net.getUnconnectedOutLayers()]

threadLock=threading.Lock()
# locks are usually used implement synchronous access to shared resources 。
# create a lock object for each shared resource , 
# called when you need to access the resource acquire method to get the lock object 
# (if other threads have already acquired the lock, the current thread has to wait for it to be released )
class myThread (threading.Thread):
    maxRetries=20
    def __init__(self, threadID, name,video_url):
        threading.Thread.__init__(self)
        self.threadID = threadID
        self.name = name
        self.video_url=video_url

    def attemptRead(self,cvVideo):
        threadLock.acquire()
        (isRead,cvImage)=cvVideo.read()
        threadLock.release()
        if isRead==False:
            count=1
            while isRead==False and count<myThread.maxRetries:
                threadLock.acquire()
                (isRead,cvImage)=cvVideo.read()
                threadLock.release()
                print (self.name ' try no: ',count)
                count =1
        return (isRead,cvImage)

    def run(self):
        print ("Starting "   self.name)
        windowName = self.name
        cv2.namedWindow(windowName)
        vs = cv2.VideoCapture(self.video_url)
        fps = vs.get(cv2.CAP_PROP_FPS)
        print("Frames per second camera: {0}".format(fps))
        writer = None

        #bagian ngitung FPS
        fps_start_time = 0
        fps = 0

        while True:
            #bagian ngitung FPS
            fps_start_time = time.time()

            # read the next frame from the file
            (grabbed,frame)=self.attemptRead(vs)
            
            # if the frame was not grabbed, then we have reached the end
            # of the stream
            if grabbed==False:
                break

            # resize the frame and then detect people (and only people) in it
            frame = imutils.resize(frame, width=700)
            results = detect_people(frame, net, ln,
             personIdx=LABELS.index("person"))

            # initialize the set of indexes that violate the minimum social
            # distance
            violate = set()

            # ensure there are *at least* two people detections (required in
            # order to compute our pairwise distance maps)
            if len(results) >= 2:
                # extract all centroids from the results and compute the
                # Euclidean distances between all pairs of the centroids
                centroids = np.array([r[2] for r in results])
                D = dist.cdist(centroids, centroids, metric="euclidean")

                # loop over the upper triangular of the distance matrix
                for i in range(0, D.shape[0]):
                    for j in range(i   1, D.shape[1]):
                        # check to see if the distance between any two
                        # centroid pairs is less than the configured number
                        # of pixels
                        if D[i, j] < config.MIN_DISTANCE:
                            # update our violation set with the indexes of
                            # the centroid pairs
                            violate.add(i)
                            violate.add(j)

            # loop over the results
            for (i, (prob, bbox, centroid)) in enumerate(results):
                # extract the bounding box and centroid coordinates, then
                # initialize the color of the annotation
                (startX, startY, endX, endY) = bbox
                (cX, cY) = centroid
                color = (0, 255, 0)

                # if the index pair exists within the violation set, then
                # update the color
                if i in violate:
                    color = (0, 0, 255)

                # draw (1) a bounding box around the person and (2) the
                # centroid coordinates of the person,
                cv2.rectangle(frame, (startX, startY), (endX, endY), color, 2)
                cv2.circle(frame, (cX, cY), 5, color, 1)

            # draw the total number of social distancing violations on the
            # output frame
            text = "Pelanggaran Physical Distancing : {}".format(len(violate))
            cv2.putText(frame, text, (10, frame.shape[0] - 25),
                cv2.FONT_HERSHEY_SIMPLEX, 0.85, (0, 0, 255), 3)

            #bagian ngitung FPS 
            fps_end_time = time.time()
            time_diff = fps_end_time - fps_start_time
            fps = 1/(time_diff)

            fps_text = "FPS: {:.2f}".format(fps)

            cv2.putText(frame, fps_text, (5,30), cv2.FONT_HERSHEY_COMPLEX, 1, (0,255,255), 1)

            if args["display"] > 0:
                # show the output frame
                cv2.imshow(windowName, frame)
                key = cv2.waitKey(1) amp; 0xFF

                # if the `q` key was pressed, break from the loop
                if key == ord("q") or key==27:
                    break

        cv2.destroyWindow(windowName)
        print (self.name   "Exiting")

def main():
    thread1 = myThread(1, "Thread1",0)
    thread2 = myThread(2, "Thread2",'http://192.168.1.7:8080/video')
    # thread3 = myThread(3, "Thread3",'http://192.168.43.1:8080/video')

    thread1.start()
    thread2.start()
    # thread3.start()

print ("Exiting Main Thread")

if __name__ == '__main__':
    main()