当某人被识别时尝试发出某种警报

问题描述 投票:0回答:1

使用 TensorFlow 模型,我想添加某种警报,只要在窗口运行时检测到有人,无论是闪烁的彩色边框还是音频噪音,警报就会响起。我尝试了各种方法,所有这些方法都遇到了“TypeError:'DetectionResult'对象不可迭代”的相同错误。我目前正在使用带有 Raspbian GNU/Linux 11 (Bullseye) 的树莓派 4

import argparse
import sys
import time
import cv2
import pygame.mixer
from tflite_support.task import core
from tflite_support.task import processor
from tflite_support.task import vision
import utils

def run(model: str, camera_id: int, width: int, height: int, num_threads: int,
        enable_edgetpu: bool, alarm_label: str) -> None:

    # Variables to calculate FPS
    counter, fps = 0, 0
    start_time = time.time()

    # Start capturing video input from the camera
    cap = cv2.VideoCapture(camera_id)
    cap.set(cv2.CAP_PROP_FRAME_WIDTH, width)
    cap.set(cv2.CAP_PROP_FRAME_HEIGHT, height)

    # Visualization parameters
    row_size = 20  # pixels
    left_margin = 24  # pixels
    text_color = (0, 0, 255)  # red
    font_size = 1
    font_thickness = 1
    fps_avg_frame_count = 10

    # Initialize the object detection model
    base_options = core.BaseOptions(
        file_name=model, use_coral=enable_edgetpu, num_threads=num_threads)
    detection_options = processor.DetectionOptions(
        max_results=3, score_threshold=0.3)
    options = vision.ObjectDetectorOptions(
        base_options=base_options, detection_options=detection_options)
    detector = vision.ObjectDetector.create_from_options(options)

    # Initialize pygame mixer
    pygame.mixer.init()
    alarm_sound = pygame.mixer.Sound("alarm1.wav")  # Load the alarm sound

    # Continuously capture images from the camera and run inference
    while cap.isOpened():
        success, image = cap.read()
        if not success:
            sys.exit(
                'ERROR: Unable to read from webcam. Please verify your webcam settings.'
            )

        counter += 1
        image = cv2.flip(image, 1)

        # Convert the image from BGR to RGB as required by the TFLite model.
        rgb_image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)

        # Create a TensorImage object from the RGB image.
        input_tensor = vision.TensorImage.create_from_array(rgb_image)

        # Run object detection estimation using the model.
        detection_result = detector.detect(input_tensor)

        # Check if the specific label is detected
        alarm_triggered = False
        for obj in detection_result:
            if obj.class_name == alarm_label:
                alarm_triggered = True
                # Trigger the alarm sound
                alarm_sound.play()

        # Draw keypoints and edges on input image
        image = utils.visualize(image, detection_result)

        # Calculate the FPS
        if counter % fps_avg_frame_count == 0:
            end_time = time.time()
            fps = fps_avg_frame_count / (end_time - start_time)
            start_time = time.time()

        # Show the FPS
        fps_text = 'FPS = {:.1f}'.format(fps)
        text_location = (left_margin, row_size)
        cv2.putText(image, fps_text, text_location, cv2.FONT_HERSHEY_PLAIN,
                    font_size, text_color, font_thickness)

        # Trigger alarm visualization (e.g., change border color of the frame)
        if alarm_triggered:
            cv2.rectangle(image, (0, 0), (width, height), (0, 0, 255), 2)  # Red border

        # Stop the program if the ESC key is pressed.
        if cv2.waitKey(1) == 27:
            break
        cv2.imshow('object_detector', image)

    # Release the camera and close the window
    cap.release()
    cv2.destroyAllWindows()

def main():
    parser = argparse.ArgumentParser(
        formatter_class=argparse.ArgumentDefaultsHelpFormatter)
    parser.add_argument(
        '--model',
        help='Path of the object detection model.',
        required=False,
        default='efficientdet_lite0.tflite')
    parser.add_argument(
        '--cameraId', help='Id of camera.', required=False, type=int, default=0)
    parser.add_argument(
        '--frameWidth',
        help='Width of frame to capture from camera.',
        required=False,
        type=int,
        default=600)
    parser.add_argument(
        '--frameHeight',
        help='Height of frame to capture from camera.',
        required=False,
        type=int,
        default=480)
    parser.add_argument(
        '--numThreads',
        help='Number of CPU threads to run the model.',
        required=False,
        type=int,
        default=4)
    parser.add_argument(
        '--enableEdgeTPU',
        help='Whether to run the model on EdgeTPU.',
        action='store_true',
        required=False,
        default=False)
    parser.add_argument(
        '--alarmLabel',
        help='Label to trigger alarm.',
        required=False,
        type=str,
        default='person')
    args = parser.parse_args()

    run(args.model, args.cameraId, args.frameWidth, args.frameHeight,
        args.numThreads, args.enableEdgeTPU, args.alarmLabel)

if __name__ == '__main__':
    main()
`
python raspberry-pi4 tflite
1个回答
0
投票

DetectionResult
对象本身不可迭代。它有一个
detections
属性,其中包含检测到的对象的列表。所以改变吧

for obj in detection_result:

for obj in detection_result.detections:

请参阅课程的文档

© www.soinside.com 2019 - 2024. All rights reserved.