Jan-20-2024, 10:50 PM
Working on a robot, using a esp32cam live feed, it's supposed to draw bounding boxes for detected objects with the label name "cup", can't get the boxes to display in imshow. Please help.
Main Loop:
Captures a live video feed from the ESP32-CAM, decodes the JPEG image, and performs YOLOv8 object detection. Applies image processing techniques such as Gaussian blur, edge detection (Canny), and contour finding to detect obstacles. Sorts the obstacles by distance and sends steering commands to avoid them. Detects and processes objects labeled as "cup" by YOLOv8, drawing bounding boxes and calculating the average centroid of detected. Sends steering commands based on the average centroid of all detected. Displays the processed frames with bounding boxes, labels, and obstacle detection information.
Main Loop:
Captures a live video feed from the ESP32-CAM, decodes the JPEG image, and performs YOLOv8 object detection. Applies image processing techniques such as Gaussian blur, edge detection (Canny), and contour finding to detect obstacles. Sorts the obstacles by distance and sends steering commands to avoid them. Detects and processes objects labeled as "cup" by YOLOv8, drawing bounding boxes and calculating the average centroid of detected. Sends steering commands based on the average centroid of all detected. Displays the processed frames with bounding boxes, labels, and obstacle detection information.
import cv2 import numpy as np import requests from ultralytics import YOLO # Set up the YOLOv8 model model = YOLO("yolov8x.pt") model.conf = 0.5 # Get the IP address and port of the ESP32-CAM ip_address = "" port = 80 # Function to get the live feed from the ESP32-CAM def get_frame(): response = requests.get(f"http://{ip_address}:{port}/cam-lo.jpg") return response.content def calculate_distance(x, y, w, h): center_x = x + w / 2 center_y = y + h / 2 distance = np.sqrt(center_x**2 + center_y**2) return distance # Function to send the "left" or "right" command based on the centroid of the detected trash def send_steering_command(avg_centroid): if avg_centroid[0] < WIDTH / 2: requests.get(f'http://{ip_address}:{port}/control?command=left') print("TURN LEFT") elif avg_centroid[0] > WIDTH / 2: requests.get(f'http://{ip_address}:{port}/control?command=right') print("TURN RIGHT") else: requests.get(f'http://{ip_address}:{port}/control?command=forward') print("MOVE FORWARD") # Main function to process the live feed and detect trash def main(): global WIDTH WIDTH, HEIGHT = 320, 240 # Set the width and height of the frame while True: # Get the live feed from the ESP32-CAM frame_data = get_frame() # Decode the JPEG image from the live feed frame = cv2.imdecode(np.frombuffer(frame_data, np.uint8), cv2.IMREAD_COLOR) # Perform object detection using YOLOv8 results = model(frame) # Convert the frame to grayscale gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) # Apply Gaussian blur to the grayscale image **************************************** blurred = cv2.GaussianBlur(gray, (5, 5), 0) # Apply edge detection to the blurred image edges = cv2.Canny(blurred, 50, 150) # Apply thresholding to the edges image _ , thresh = cv2.threshold(edges, 127, 255, cv2.THRESH_BINARY) # Find contours in the thresholded image contours, _ = cv2.findContours(thresh, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) # Calculate the distance to the obstacles based on the size and position of the contours obstacles = [] for contour in contours: x, y, w, h = cv2.boundingRect(contour) if w > 50 and h > 50: # Filter out small contours distance = calculate_distance(x, y, w, h) obstacles.append((distance, x, y, w, h)) # Sort the obstacles by distance obstacles.sort(key=lambda x: x[0]) for obstacle in obstacles: if obstacle[0] < 100: # Adjust this threshold based on your robot's capabilities if obstacle[1] < WIDTH / 2: requests.get(f'http://{ip_address}:{port}/control?command=left') # Turn left elif obstacle[1] > WIDTH / 2: requests.get(f'http://{ip_address}:{port}/control?command=right') # Turn right else: requests.get(f'http://{ip_address}:{port}/control?command=forward') # Draw bounding boxes and labels for detected objects center_points_cur_frame = [] for result in results.xyxy[0]: x, y, w, h = cv2.boundingRect() # Extracting box coordinates cx = int((x + w) / 2) cy = int((y + h) / 2) center_points_cur_frame.append((cx, cy)) label = model.names[int(result[5])] if label == "cup": cv2.rectangle(frame, (int(x), int(y)), (int(w), int(h)), (0, 255, 0), 2) cv2.putText(frame, label, (int(x), int(y) - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2) # Calculate the average centroid of all detected if center_points_cur_frame: avg_centroid = (sum(x for x, y in center_points_cur_frame) / len(center_points_cur_frame), sum(y for x, y in center_points_cur_frame) / len(center_points_cur_frame)) else: avg_centroid = (0, 0) # or some other default value # Send the steering command based on the average centroid send_steering_command(avg_centroid) # Display the resulting frame cv2.imshow("Obstacle Detection", edges) # Display the resulting frame cv2.imshow("ESP32CAM Cup Detection", frame) # Exit the loop if 'q' is pressed if cv2.waitKey(1) & 0xFF == ord("q"): break cv2.destroyAllWindows() if __name__ == "__main__": main()
Error:Traceback (most recent call last):
File "c:\Users\floloja\Desktop\robotver2\test.py", line 126, in <module>
main()
File "c:\Users\floloja\Desktop\robotver2\test.py", line 92, in main
for result in results.xyxy[0]:
^^^^^^^^^^^^
AttributeError: 'list' object has no attribute 'xyxy'