Fix QThread
-
Please help me understand why nothing is happening, no errors are displayed, and the video is not displayed (((
I have two files, it seems to me that invideo.py
, since I only run the constructor, it builds and exits and nothing happens.
But how can I start aThread
from another file.from object_detection.video_detector import * import os videoPath = "" def main_video_obj(videoPath, grid_video_detect, video_play_btn, video_stop_btn): rootDirectory = os.path.dirname(__file__) configPath = os.path.join(rootDirectory, "data", "ssd_mobilenet_v3_large_coco_2020_01_14.pbtxt") modelPath = os.path.join(rootDirectory, "data", "frozen_inference_graph.pb") classesPath = os.path.join(rootDirectory, "data", "coco.names") Video_Detector_obj(videoPath[0], configPath, modelPath, classesPath, grid_video_detect, video_play_btn, video_stop_btn) if __name__== '__main__': main_video_obj()
video_detector.py
class Video_Detector_obj(QWidget): def __init__(self, videoPath, configPath, modelPath, classesPath, grid_video_detect, video_play_btn, video_stop_btn): super().__init__() self.videoPath = videoPath self.configPath = configPath self.modelPath = modelPath self.classesPath = classesPath self.video_play_btn = video_play_btn self.video_stop_btn = video_stop_btn self.load_video = QLabel() self.th = Thread() self.th.finished.connect(self.close) self.th.updateFrame.connect(self.setImage) grid_video_detect.addWidget(self.load_video, 0, 1) video_play_btn.clicked.connect(self.start) video_stop_btn.clicked.connect(self.kill_thread) video_stop_btn.setEnabled(False) @Slot() def kill_thread(self): print("Finishing...") self.video_play_btn.setEnabled(True) self.video_stop_btn.setEnabled(False) self.th.cap.release() cv2.destroyAllWindows() self.status = False self.th.terminate() time.sleep(1) @Slot() def start(self): print("Starting...") self.video_play_btn.setEnabled(False) self.video_stop_btn.setEnabled(True) self.th.set_files(self.videoPath, self.configPath, self.modelPath, self.classesPath) self.th.start() @Slot(QImage) def setImage(self, image): self.load_video.setPixmap(QPixmap.fromImage(image)) class Thread(QThread): updateFrame = Signal(QImage) def __init__(self, parent = None): QThread.__init__(self, parent) #self.trained_file = None self.status = True self.cap = True self.videoPath = None self.configPath = None self.modelPath = None self.classesPath = None def set_files(self, videoPath, configPath, modelPath, classesPath): self.videoPath = videoPath self.configPath = configPath self.modelPath = modelPath self.classesPath = classesPath self.net = cv2.dnn_DetectionModel(self.modelPath, self.configPath) self.net.setInputSize(320, 320) self.net.setInputScale(1.0/127.5) self.net.setInputMean((127.5, 127.5, 127.5)) self.net.setInputSwapRB(True) self.readClasses() def readClasses(self): with open(self.classesPath, 'r') as f: self.classesList = f.read().splitlines() self.classesList.insert(0, '__Background__') self.colorList = np.random.uniform(low=0, high=255, size=(len(self.classesList), 3)) print(self.classesList) def run(self): self.cap = cv2.VideoCapture(self.videoPath) if (self.cap.isOpened()==False): print("Error opening file...") return startTime = 0 while self.status: ret, frame = self.cap.read() if not ret: continue currentTime = time.time() fps = 1/(currentTime - startTime) startTime = currentTime classLabelIDs, confidences, bboxs = self.net.detect(frame, confThreshold = 0.5) bboxs = list(bboxs) confidences = list(np.array(confidences).reshape(1,-1)[0]) confidences = list(map(float, confidences)) bboxIdx = cv2.dnn.NMSBoxes(bboxs, confidences, score_threshold = 0.5, nms_threshold = 0.2) if len(bboxIdx) != 0: for i in range(0, len(bboxIdx)): bbox = bboxs[np.squeeze(bboxIdx[i])] classConfidence = confidences[np.squeeze(bboxIdx[i])] classLabelID = np.squeeze(classLabelIDs[np.squeeze(bboxIdx[i])]) classLabel = self.classesList[classLabelID] classColor = [int(c) for c in self.colorList[classLabelID]] displayText = "{}:{:.2f}".format(classLabel, classConfidence) x,y,w,h = bbox cv2.rectangle(frame, (x,y), (x+w, y+h), color=classColor, thickness=1) cv2.putText(frame, displayText, (x, y-10), cv2.FONT_HERSHEY_PLAIN, 1, classColor, 2) cv2.putText(frame, "FPS: " + str(int(fps)), (20, 70), cv2.FONT_HERSHEY_PLAIN, 2, (0, 255, 0), 2) #cv2.imshow("Result", image) color_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) color_frame = imutils.resize(frame, width = 1500) h, w, ch = color_frame.shape image = QImage(color_frame.data, w, h, ch * w, QImage.Format_RGB888) self.updateFrame.emit(image) cv2.destroyAllWindows() sys.exit(-1)
-
Hi,
You don't create a QApplication object, you don't start the main event loop, you don't show your widget.
-
Hi,
You don't create a QApplication object, you don't start the main event loop, you don't show your widget.
-
What other file ?
-
Please make a minimal script that uses your custom thread and a label so it can be checked and tested.
-
Please make a minimal script that uses your custom thread and a label so it can be checked and tested.
from object_detection.video_detector import * import os videoPath = "" def main_video_obj(videoPath, grid_video_detect, video_play_btn, video_stop_btn): rootDirectory = os.path.dirname(__file__) configPath = os.path.join(rootDirectory, "data", "ssd_mobilenet_v3_large_coco_2020_01_14.pbtxt") modelPath = os.path.join(rootDirectory, "data", "frozen_inference_graph.pb") classesPath = os.path.join(rootDirectory, "data", "coco.names") Video_Detector_obj(videoPath[0], configPath, modelPath, classesPath, grid_video_detect, video_play_btn, video_stop_btn) #detector.onVideo(grid_video_detect) if __name__== '__main__': main_video_obj()
video_detector.py file
class Video_Detector_obj(QWidget): def __init__(self, videoPath, configPath, modelPath, classesPath, grid_video_detect, video_play_btn, video_stop_btn): super().__init__() self.videoPath = videoPath self.configPath = configPath self.modelPath = modelPath self.classesPath = classesPath self.load_video = QLabel() self.th = Thread() self.th.finished.connect(self.close) self.th.updateFrame.connect(self.setImage) grid_video_detect.addWidget(self.load_video, 0, 1) video_play_btn.clicked.connect(self.start) video_stop_btn.clicked.connect(self.kill_thread) video_stop_btn.setEnabled(False) @Slot() def kill_thread(self): print("Finishing...") self.video_play_btn.setEnabled(True) self.video_stop_btn.setEnabled(False) self.th.cap.release() cv2.destroyAllWindows() self.status = False self.th.terminate() time.sleep(1) @Slot() def start(self): print("Starting...") self.video_play_btn.setEnabled(False) self.video_stop_btn.setEnabled(True) self.th.set_files(self.videoPath, self.configPath, self.modelPath, self.classesPath) self.th.start() @Slot(QImage) def setImage(self, image): self.load_video.setPixmap(QPixmap.fromImage(image)) class Thread(QThread): updateFrame = Signal(QImage) def __init__(self, parent = None): QThread.__init__(self, parent) self.status = True self.cap = True def set_files(self, videoPath, configPath, modelPath, classesPath): self.videoPath = videoPath self.configPath = configPath self.modelPath = modelPath self.classesPath = classesPath def run(self): self.cap = cv2.VideoCapture(0) startTime = 0 while self.status: ret, frame = self.cap.read() if not ret: continue currentTime = time.time() fps = 1/(currentTime - startTime) startTime = currentTime classLabelIDs, confidences, bboxs = self.net.detect(frame, confThreshold = 0.5) bboxs = list(bboxs) confidences = list(np.array(confidences).reshape(1,-1)[0]) confidences = list(map(float, confidences)) bboxIdx = cv2.dnn.NMSBoxes(bboxs, confidences, score_threshold = 0.5, nms_threshold = 0.2) if len(bboxIdx) != 0: for i in range(0, len(bboxIdx)): bbox = bboxs[np.squeeze(bboxIdx[i])] classConfidence = confidences[np.squeeze(bboxIdx[i])] classLabelID = np.squeeze(classLabelIDs[np.squeeze(bboxIdx[i])]) classLabel = self.classesList[classLabelID] classColor = [int(c) for c in self.colorList[classLabelID]] displayText = "{}:{:.2f}".format(classLabel, classConfidence) #cv2.imshow("Result", image) color_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) h, w, ch = color_frame.shape image = QImage(color_frame.data, w, h, ch * w, QImage.Format_RGB888) self.updateFrame.emit(image) sys.exit(-1)
Something like that, there may be errors with object class files.
If they interfere, you can remove them. It will be great if you can run class Thread in video.py.
It is possible without a detector, the main thing is that the camera appears
Later I will add those object class files to the run function. -
Please make a minimal script that uses your custom thread and a label so it can be checked and tested.
@SGaist Thanks, I already solved the issue, a stupid mistake was enough, I didn’t pass a button from ui, but self.button, so the
Qthread
didn’t start.
But now it remembers previous video witch I import, and starts also a new one.
Maybe I needQt.ConnectionType.SingleShotConnection
but where to write it )))
Or somehow reloadQLabel
or clear previous video -
I don't see a reason for a single shot connection.
Implement proper clearing of the label, configuration of your detector, etc.
You really should write down the various workflows of your application.