May-19-2023, 08:58 PM
Hi there,
I am writing a program that reads an analog gauge with a webcam and returns the value that the gauge needle points at. (Disclaimer: most of this is not my code, however I am modifying it to suit my own purposes.) I've been able to get it to work, but it needs some optimization.
For example:
It can't find the needle if the gauge is too thin, too far away, or if the camera image is downscaled too much.
It mistakes the labels inside the gauge for the needle.
It reads the wrong end of the needle if the needle has a longer back.
It mistakes the circular bars in some gauges for the edge of the gauge.
It finishes processing camera images 5 seconds behind real time. (The closer to real-time, the better)
So far, I've been able to optimize it somewhat, but it needs more. The original code was not optimized, and it did not use live video.
I've found that reducing the maximum line gap helps it not read gauge labels as the gauge needle.
I've also found that downscaling the camera images makes it process them faster, but if they are too small, the program cannot detect the needle.
I'd like this program to only draw the line it detects from the tip of the needle to the center of the gauge, so that it doesn't read the long back end of some gauges, but I'm not sure how to implement that.
If anyone knows understands this code better than me and knows how to optimize it, I would really appreciate your help!
Thanks!
Here is the code:
I am writing a program that reads an analog gauge with a webcam and returns the value that the gauge needle points at. (Disclaimer: most of this is not my code, however I am modifying it to suit my own purposes.) I've been able to get it to work, but it needs some optimization.
For example:
It can't find the needle if the gauge is too thin, too far away, or if the camera image is downscaled too much.
It mistakes the labels inside the gauge for the needle.
It reads the wrong end of the needle if the needle has a longer back.
It mistakes the circular bars in some gauges for the edge of the gauge.
It finishes processing camera images 5 seconds behind real time. (The closer to real-time, the better)
So far, I've been able to optimize it somewhat, but it needs more. The original code was not optimized, and it did not use live video.
I've found that reducing the maximum line gap helps it not read gauge labels as the gauge needle.
I've also found that downscaling the camera images makes it process them faster, but if they are too small, the program cannot detect the needle.
I'd like this program to only draw the line it detects from the tip of the needle to the center of the gauge, so that it doesn't read the long back end of some gauges, but I'm not sure how to implement that.
If anyone knows understands this code better than me and knows how to optimize it, I would really appreciate your help!
Thanks!
Here is the code:
import cv2 import numpy as np import time cap = cv2.VideoCapture(0) gauge_min = input("Please enter the minimum value of the gauge: ") gauge_min = float(gauge_min) gauge_max = input("Please enter the maximum value of the gauge: ") gauge_max = float(gauge_max) angle_min = input("Please enter the minimum angular position of the gauge needle: ") angle_min = float(angle_min) angle_max = input("Please enter the maximum angular position of the gauge needle: ") angle_max = float(angle_max) gauge_units = input("Please enter the units listed on the gauge: ") def avg_circles(circles, b): avg_x=0 avg_y=0 avg_r=0 for i in range(b): avg_x = avg_x + circles[0][i][0] avg_y = avg_y + circles[0][i][1] avg_r = avg_r + circles[0][i][2] avg_x = int(avg_x/(b)) avg_y = int(avg_y/(b)) avg_r = int(avg_r/(b)) return avg_x, avg_y, avg_r def dist_2_pts(x1, y1, x2, y2): return np.sqrt((x2 - x1)**2 + (y2 - y1)**2) def take_measure(threshold_img, threshold_ln, minLineLength, maxLineGap, diff1LowerBound, diff1UpperBound, diff2LowerBound, diff2UpperBound): ret, frame = cap.read() scale_percent = 60 height = int(frame.shape[0] * scale_percent / 100) width = int(frame.shape[1] * scale_percent / 100) dim = (width, height) img = cv2.resize(frame, dim, interpolation = cv2.INTER_AREA) gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) circles = cv2.HoughCircles(gray, cv2.HOUGH_GRADIENT, 1, 20) if circles is not None: a, b, c = circles.shape x,y,r = avg_circles(circles, b) #Draw center and circle cv2.circle(img, (x, y), r, (0, 255, 0), 2, cv2.LINE_AA) cv2.circle(img, (x, y), 2, (0, 255, 0), 2, cv2.LINE_AA) separation = 10.0 #in degrees interval = int(360 / separation) p1 = np.zeros((interval,2)) #set empty arrays p2 = np.zeros((interval,2)) gray3 = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) maxValue = 255 # Threshold image to take better measurements th, dst2 = cv2.threshold(gray3, threshold_img, maxValue, cv2.THRESH_BINARY_INV); in_loop = 0 lines = cv2.HoughLinesP(image=dst2, rho=1, theta=np.pi / 180, threshold=threshold_ln, minLineLength=minLineLength, maxLineGap=maxLineGap) final_line_list = [] for i in range(0, len(lines)): for x1, y1, x2, y2 in lines[i]: diff1 = dist_2_pts(x, y, x1, y1) # x, y is center of circle diff2 = dist_2_pts(x, y, x2, y2) # x, y is center of circle if (diff1 > diff2): temp = diff1 diff1 = diff2 diff2 = temp # Check if line is in range of circle if (((diff1<diff1UpperBound*r) and (diff1>diff1LowerBound*r) and (diff2<diff2UpperBound*r)) and (diff2>diff2LowerBound*r)): line_length = dist_2_pts(x1, y1, x2, y2) final_line_list.append([x1, y1, x2, y2]) in_loop = 1 if (in_loop == 1): x1 = final_line_list[0][0] y1 = final_line_list[0][1] x2 = final_line_list[0][2] y2 = final_line_list[0][3] cv2.line(img, (x1, y1), (x2, y2), (0, 255, 255), 2) #2 changed to 1 dist_pt_0 = dist_2_pts(x, y, x1, y1) dist_pt_1 = dist_2_pts(x, y, x2, y2) if (dist_pt_0 > dist_pt_1): x_angle = x1 - x y_angle = y - y1 else: x_angle = x2 - x y_angle = y - y2 # Finding angle using the arc tan of y/x res = np.arctan(np.divide(float(y_angle), float(x_angle))) #Converting to degrees res = np.rad2deg(res) if x_angle > 0 and y_angle > 0: #in quadrant I final_angle = 270 - res if x_angle < 0 and y_angle > 0: #in quadrant II final_angle = 90 - res if x_angle < 0 and y_angle < 0: #in quadrant III final_angle = 90 - res if x_angle > 0 and y_angle < 0: #in quadrant IV final_angle = 270 - res if x_angle == 0 or y_angle == 0: final_angle = abs(res) cv2.putText(img, "Indicator OK!", (50, 50), cv2.FONT_HERSHEY_SIMPLEX, 0.9,(0,255,0),2,cv2.LINE_AA) if(final_angle != 0): final_value = final_angle * (gauge_max / angle_max) final_value = int(final_value) final_value = str(final_value) print ("Gauge Value: " + final_value + " " + gauge_units) else: cv2.putText(img, "Can't find the indicator!", (50, 50), cv2.FONT_HERSHEY_SIMPLEX, 0.9,(0,0,255),2,cv2.LINE_AA) else: cv2.putText(img, "Can't see the gauge!", (50, 50), cv2.FONT_HERSHEY_SIMPLEX, 0.9,(0,0,255),2,cv2.LINE_AA) return img while True: threshold_img = 120 #175 threshold_ln = 150 minLineLength = 30 #50 previously maxLineGap = 1 #8 previously #Distance from center coefficients diff1LowerBound = 0.15 diff1UpperBound = 0.25 #Distance from circle coefficients diff2LowerBound = 0.5 diff2UpperBound = 1.0 #with cProfile.Profile() as pr: img = take_measure(threshold_img, threshold_ln, minLineLength, maxLineGap, diff1LowerBound, diff1UpperBound, diff2LowerBound, diff2UpperBound) cv2.imshow('test',img) if ord('q')==cv2.waitKey(10): cap.release() cv2.destroyAllWindows() exit(0)