Quantcast
Channel: Raspberry Pi Forums
Viewing all articles
Browse latest Browse all 4833

General discussion • Object Detection with Servo motor fps drops

$
0
0
Hi, I'm trying EdjeElectronics object detection code with servo for my mini project but when i include the servo code in the object detection code my fps drops from 3-4 fps to 0.9 - 1 fps. I don't know much about coding im just try this just to have some hobby and still a beginner so I'm here to get some help. Thanks

here's the code:

Code:

# Import packagesimport osimport argparseimport cv2import numpy as npimport sysfrom threading import Threadimport importlib.utilimport RPi.GPIO as GPIOimport time### User-defined variables# Define and parse input argumentsparser = argparse.ArgumentParser()parser.add_argument('--modeldir', help='Folder the .tflite file is located in',                    required=True)parser.add_argument('--graph', help='Name of the .tflite file, if different than detect.tflite',                    default='detect.tflite')parser.add_argument('--labels', help='Name of the labelmap file, if different than labelmap.txt',                    default='labelmap.txt')args = parser.parse_args()# Model infoMODEL_NAME = args.modeldirGRAPH_NAME = args.graphLABELMAP_NAME = args.labelsuse_TPU = False# Program settingsmin_conf_threshold = 0.80resW, resH = 960, 540 # Resolution to run camera atimW, imH = resW, resH### Set up model parameters# Import TensorFlow libraries# If tflite_runtime is installed, import interpreter from tflite_runtime, else import from regular tensorflow# If using Coral Edge TPU, import the load_delegate librarypkg = importlib.util.find_spec('tflite_runtime')if pkg:    from tflite_runtime.interpreter import Interpreter    if use_TPU:        from tflite_runtime.interpreter import load_delegateelse:    from tensorflow.lite.python.interpreter import Interpreter    if use_TPU:        from tensorflow.lite.python.interpreter import load_delegate# Get path to current working directoryCWD_PATH = os.getcwd()# Path to .tflite file, which contains the model that is used for object detectionPATH_TO_CKPT = os.path.join(CWD_PATH,MODEL_NAME,GRAPH_NAME)# Path to label map filePATH_TO_LABELS = os.path.join(CWD_PATH,MODEL_NAME,LABELMAP_NAME)# Load the label mapwith open(PATH_TO_LABELS, 'r') as f:    labels = [line.strip() for line in f.readlines()]### Load Tensorflow Lite modelinterpreter = Interpreter(model_path=PATH_TO_CKPT)interpreter.allocate_tensors()# Get model detailsinput_details = interpreter.get_input_details()output_details = interpreter.get_output_details()height = input_details[0]['shape'][1]width = input_details[0]['shape'][2]floating_model = (input_details[0]['dtype'] == np.float32)input_mean = 127.5input_std = 127.5# Check output layer name to determine if this model was created with TF2 or TF1,# because outputs are ordered differently for TF2 and TF1 modelsoutname = output_details[0]['name']if ('StatefulPartitionedCall' in outname): # This is a TF2 model    boxes_idx, classes_idx, scores_idx = 1, 3, 0else: # This is a TF1 model    boxes_idx, classes_idx, scores_idx = 0, 1, 2# Initialize cameracap = cv2.VideoCapture(0)ret = cap.set(3, resW)ret = cap.set(4, resH)# Initialize frame rate calculationframe_rate_calc = 1freq = cv2.getTickFrequency()GPIO.setmode(GPIO.BCM)servo1_PIN = 12servo2_PIN = 13GPIO.setup(servo1_PIN, GPIO.OUT)GPIO.setup(servo2_PIN, GPIO.OUT)servo1_pwm = GPIO.PWM(servo1_PIN, 50)servo2_pwm = GPIO.PWM(servo2_PIN, 50)servo1_pwm.start(0)servo2_pwm.start(0)### Continuously process frames from camerawhile True:    # Start timer (for calculating frame rate)    t1 = cv2.getTickCount()    # Reset trash value count for this frame     # Grab frame from camera    hasFrame, frame1 = cap.read()    # Acquire frame and resize to input shape expected by model [1xHxWx3]    frame = frame1.copy()    frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)    frame_resized = cv2.resize(frame_rgb, (width, height))    input_data = np.expand_dims(frame_resized, axis=0)    # Normalize pixel values if using a floating model (i.e. if model is non-quantized)    if floating_model:        input_data = (np.float32(input_data) - input_mean) / input_std    # Perform detection by running the model with the image as input    interpreter.set_tensor(input_details[0]['index'],input_data)    interpreter.invoke()    # Retrieve detection results    boxes = interpreter.get_tensor(output_details[boxes_idx]['index'])[0] # Bounding box coordinates of detected objects    classes = interpreter.get_tensor(output_details[classes_idx]['index'])[0] # Class index of detected objects    scores = interpreter.get_tensor(output_details[scores_idx]['index'])[0] # Confidence of detected objects    # Loop over all detections and process each detection if its confidence is above minimum threshold    for i in range(len(scores)):        if ((scores[i] > min_conf_threshold) and (scores[i] <= 1.0)):            # Get bounding box coordinates            # Interpreter can return coordinates that are outside of image dimensions, need to force them to be within image using max() and min()            ymin = int(max(1,(boxes[i][0] * imH)))            xmin = int(max(1,(boxes[i][1] * imW)))            ymax = int(min(imH,(boxes[i][2] * imH)))            xmax = int(min(imW,(boxes[i][3] * imW)))            # Draw bounding box            cv2.rectangle(frame, (xmin,ymin), (xmax,ymax), (10, 255, 0), 2)            # Get object's name and draw label            object_name = labels[int(classes[i])] # Look up object name from "labels" array using class index            label = '%s: %d%%' % (object_name, int(scores[i]*100)) # Example: 'paper: 72%'            labelSize, baseLine = cv2.getTextSize(label, cv2.FONT_HERSHEY_SIMPLEX, 0.7, 2) # Get font size            label_ymin = max(ymin, labelSize[1] + 10) # Make sure not to draw label too close to top of window            cv2.rectangle(frame, (xmin, label_ymin-labelSize[1]-10), (xmin+labelSize[0], label_ymin+baseLine-10), (255, 255, 255), cv2.FILLED) # Draw white box to put label text in            cv2.putText(frame, label, (xmin, label_ymin-7), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 0), 2) # Draw label text                        if object_name == 'paper':                servo2_pwm.ChangeDutyCycle(5)                time.sleep(2)                servo1_pwm.ChangeDutyCycle(10)                servo1_pwm.ChangeDutyCycle(7.5)                servo2_pwm.ChangeDutyCycle(7.5)            elif object_name == 'plastic':                servo2_pwm.ChangeDutyCycle(10)                time.sleep(2)                servo1_pwm.ChangeDutyCycle(10)                servo1_pwm.ChangeDutyCycle(7.5)                servo2_pwm.ChangeDutyCycle(7.5)            elif object_name == 'metal':                servo2_pwm.ChangeDutyCycle(5)                time.sleep(2)                servo1_pwm.ChangeDutyCycle(10)                servo1_pwm.ChangeDutyCycle(7.5)                servo2_pwm.ChangeDutyCycle(7.5)            elif object_name == 'other_waste':                servo2_pwm.ChangeDutyCycle(10)                time.sleep(2)                servo1_pwm.ChangeDutyCycle(10)                servo1_pwm.ChangeDutyCycle(7.5)                servo2_pwm.ChangeDutyCycle(7.5)                cv2.putText(frame,'FPS: %.2f' % frame_rate_calc,(20,50),cv2.FONT_HERSHEY_PLAIN,1,(0,0,0),4,cv2.LINE_AA)    cv2.putText(frame,'FPS: %.2f' % frame_rate_calc,(20,50),cv2.FONT_HERSHEY_PLAIN,1,(230,230,230),2,cv2.LINE_AA)    # All the results have been drawn on the frame, so it's time to display it.    cv2.imshow('Eco-Bin', frame)    # Calculate framerate    t2 = cv2.getTickCount()    time1 = (t2-t1)/freq    frame_rate_calc= 1/time1    # Press 'q' to quit    if cv2.waitKey(1) == ord('q'):        break# Clean upcv2.destroyAllWindows()cap.release()GPIO.cleanup()

Statistics: Posted by Rynch — Mon Apr 01, 2024 2:02 am



Viewing all articles
Browse latest Browse all 4833

Trending Articles