Here's an example...
Uses Pi5, Hailo, Pan and Tilt https://shop.pimoroni.com/products/pan- ... 2408353287
tested with Pi v2 camera
It will return the camera to its centre position if it fails to detect the required object ( currently clock or person)
Will capture h264 videos to ram, then convert to mp4 and move to /user/Videos
Uses Pi5, Hailo, Pan and Tilt https://shop.pimoroni.com/products/pan- ... 2408353287
tested with Pi v2 camera
It will return the camera to its centre position if it fails to detect the required object ( currently clock or person)
Will capture h264 videos to ram, then convert to mp4 and move to /user/Videos
Code:
#!/usr/bin/env python3"""Example module for Hailo Detection with tracking."""import argparseimport cv2from picamera2 import MappedArray, Picamera2, Previewfrom picamera2.devices import Hailofrom picamera2.encoders import H264Encoderfrom picamera2.outputs import CircularOutputfrom libcamera import controlsimport timeimport pantilthatimport datetimeimport osimport globimport shutil# objects to detectobjects = ["clock","person"]# centralise pan and tilt camerahposn = 0vposn = 0pantilthat.pan(hposn)pantilthat.tilt(vposn)# initialisev_width = 1280 # video widthv_height = 960 # video heightv_length = 5 # seconds, minimum video lengthpre_frames = 5 # seconds, defines length of pre-detection bufferfps = 25 # video frame ratemp4_fps = 25 # mp4 frame ratemp4_timer = 10 # seconds, convert h264s to mp4s after this time if no detectionsmp4_anno = 1 # show time on video, 1 = yes, 0 = noshow_detects = 1 # show detections on video, 1 = yes, 0 = noram_limit = 150Users = []Users.append(os.getlogin())user = Users[0]h_user = "/home/" + os.getlogin( )# mp4_annotation parameterscolour = (255, 255, 255)origin = (184, int(v_height - 35))font = cv2.FONT_HERSHEY_SIMPLEXscale = 1thickness = 2def Camera_Version(): global cam1 if os.path.exists('/run/shm/libcams.txt'): os.rename('/run/shm/libcams.txt', '/run/shm/oldlibcams.txt') os.system("rpicam-vid --list-cameras >> /run/shm/libcams.txt") time.sleep(0.5) # read libcams.txt file camstxt = [] with open("/run/shm/libcams.txt", "r") as file: line = file.readline() while line: camstxt.append(line.strip()) line = file.readline() cam1 = camstxt[2][4:10]Camera_Version()def extract_detections(hailo_output, w, h, class_names, threshold=0.5): """Extract detections from the HailoRT-postprocess output.""" results = [] for class_id, detections in enumerate(hailo_output): for detection in detections: score = detection[4] if score >= threshold: y0, x0, y1, x1 = detection[:4] bbox = (int(x0 * w), int(y0 * h), int(x1 * w), int(y1 * h)) results.append([class_names[class_id], bbox, score]) return results# apply timestamp to videosdef apply_timestamp(request): global mp4_anno if mp4_anno == 1: timestamp = time.strftime("%Y/%m/%d %T") with MappedArray(request, "main") as m: lst = list(origin) lst[0] += 365 lst[1] -= 20 end_point = tuple(lst) cv2.rectangle(m.array, origin, end_point, (0,0,0), -1) cv2.putText(m.array, timestamp, origin, font, scale, colour, thickness)def draw_objects(request): current_detections = detections if current_detections: with MappedArray(request, "main") as m: for class_name, bbox, score in current_detections: x0, y0, x1, y1 = bbox label = f"{class_name} %{int(score * 100)}" cv2.rectangle(m.array, (x0, y0), (x1, y1), (0, 255, 0, 0), 2) cv2.putText(m.array, label, (x0 + 5, y0 + 15), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0, 0), 1, cv2.LINE_AA)if __name__ == "__main__": # Parse command-line arguments. parser = argparse.ArgumentParser(description="Detection Example") parser.add_argument("-m", "--model", help="Path for the HEF model.", default="/usr/share/hailo-models/yolov8s_h8l.hef") parser.add_argument("-l", "--labels", default="coco.txt", help="Path to a text file containing labels.") parser.add_argument("-s", "--score_thresh", type=float, default=0.5, help="Score threshold, must be a float between 0 and 1.") args = parser.parse_args() # Get the Hailo model, the input size it wants, and the size of our preview stream. with Hailo(args.model) as hailo: model_h, model_w, _ = hailo.get_input_shape() video_w, video_h = v_width, v_height # Load class names from the labels file with open(args.labels, 'r', encoding="utf-8") as f: class_names = f.read().splitlines() # The list of detected objects to draw. detections = None encoding = False # get free ram space st = os.statvfs("/run/shm/") freeram = (st.f_bavail * st.f_frsize)/1100000 # Configure and start Picamera2. with Picamera2() as picam2: main = {'size': (video_w, video_h), 'format': 'XRGB8888'} lores = {'size': (model_w, model_h), 'format': 'RGB888'} if cam1 == "imx708": controls2 = {'FrameRate': fps,"AfMode": controls.AfModeEnum.Continuous,"AfTrigger": controls.AfTriggerEnum.Start} else: controls2 = {'FrameRate': fps} config = picam2.create_preview_configuration(main, lores=lores, controls=controls2) picam2.configure(config) encoder = H264Encoder(2000000, repeat=True) encoder.output = CircularOutput(buffersize = pre_frames * fps) picam2.pre_callback = apply_timestamp picam2.start_preview(Preview.QTGL, x=0, y=0, width=480, height=480) picam2.start() picam2.start_encoder(encoder) encoding = False if show_detects == 1: picam2.pre_callback = draw_objects start = time.monotonic() startmp4 = time.monotonic() # Process each low resolution camera frame. while True: frame = picam2.capture_array('lores') # Run inference on the preprocessed frame results = hailo.run(frame) # Extract detections from the inference results detections = extract_detections(results, video_w, video_h, class_names, args.score_thresh) # detection if len(detections) != 0: for e in range(0,len(detections)): if detections[e][0] in objects: value = float(detections[e][2]) obj = detections[e][0] if value < 1 and value > args.score_thresh: start = time.monotonic() startrec = time.monotonic() startmp4 = time.monotonic() # start recording if not encoding and freeram > ram_limit: now = datetime.datetime.now() timestamp = now.strftime("%y%m%d_%H%M%S") encoder.output.fileoutput = "/run/shm/" + str(timestamp) + '.h264' encoder.output.start() encoding = True print("Start Record", timestamp) # determine object horzontal position hpos = int((detections[e][1][0] + detections[e][1][2])/2) hpos2 = hpos/400 hpos2 = min(hpos2,1) if hpos > (v_width/2) + 50: hposn -= hpos2 elif hpos < (v_width/2) - 50: hposn += hpos2 if hposn > 90 or hposn < -90: hposn = 0 # determine object vertical position vpos = int((detections[e][1][1] + detections[e][1][3])/2) vpos2 = vpos/320 vpos2 = min(vpos2,1) if vpos > (v_height/2) + 50: vposn += vpos2 elif vpos < (v_height/2) - 50: vposn -= vpos2 if vposn > 30 or vposn < -30: vposn = 0 # pan and tilt pantilthat.pan(hposn) pantilthat.tilt(vposn) else: # centralise if object lost for greater than 2 seconds if time.monotonic() - start > 2: print ("lost") pantilthat.pan(0) pantilthat.tilt(0) hposn = 0 vposn = 0 start = time.monotonic() # stop recording if encoding and (time.monotonic() - startrec > v_length or freeram <= ram_limit): now = datetime.datetime.now() timestamp2 = now.strftime("%y%m%d_%H%M%S") print("Stopped Record", timestamp2) encoder.output.stop() encoding = False startmp4 = time.monotonic() # make mp4s if time.monotonic() - startmp4 > mp4_timer and not encoding: startmp4 = time.monotonic() # convert h264 to mp4 h264s = glob.glob('/run/shm/2*.h264') h264s.sort(reverse = False) for x in range(0,len(h264s)): print(h264s[x][:-5] + '.mp4') cmd = 'ffmpeg -framerate ' + str(mp4_fps) + ' -i ' + h264s[x] + " -c copy " + h264s[x][:-5] + '.mp4' os.system(cmd) os.remove(h264s[x]) print("Saved",h264s[x][:-5] + '.mp4') Videos = glob.glob('/run/shm/*.mp4') Videos.sort() # move Video RAM mp4s to SD card for xx in range(0,len(Videos)): if not os.path.exists(h_user + "/" + '/Videos/' + Videos[xx]): shutil.move(Videos[xx], h_user + '/Videos/')Statistics: Posted by gordon77 — Sun Jul 06, 2025 1:29 pm