Python Forum

Full Version: Real time Detection and Display
You're currently viewing a stripped down version of our content. View the full version with proper formatting.
Hello all! :)

I'm trying to automate a mini-game inside 'Star Trek Online' where you have 4 rows and you have to point the mouse on the right row when the object coming from the left is on a pixel / area.

example for full mini-game:
[Image: 2015-01-29_2357.png]

[Image: Omega-Board.jpg]



Example for location marks:
[Image: example.jpg]

I've managed to get to the point where I can see both the screen capture and the detection but if I move the mini-game box then the detection marks don't adjust.

I also tried to use threading but got the same results. detection markers stays in one place.
how can I tweak the code so the detection marks will move according to the real time screen capturing?

liveImage = r'path to screenshot'
OMEGA_BOARD_MAIN = r'path to mini-game board'

def omega():
    def draw():
        line_color = (0, 255, 0)
        line_type = cv2.LINE_4
        marker_color = (255, 0, 255)
        marker_type = cv2.MARKER_CROSS
        cv2.rectangle(frame, loc[0], (loc[0][0] + w, loc[0][1] + h), (255, 0, 255), 1)  # BLUE

        # Marker Row 1
        cv2.drawMarker(frame, ((int(loc[0][0] + w) - 332), (int(loc[0][1] + h)) - 200),
                       color=marker_color, markerType=marker_type, markerSize=40, thickness=1)

        # Marker Row 2
        cv2.drawMarker(frame, ((int(loc[0][0] + w) - 332), (int(loc[0][1] + h)) - 155),
                       color=marker_color, markerType=marker_type, markerSize=40, thickness=1)

        # Marker Row 3
        cv2.drawMarker(frame, ((int(loc[0][0] + w) - 332), (int(loc[0][1] + h)) - 80),
                       color=marker_color, markerType=marker_type, markerSize=40, thickness=1)

        # Marker Row 4
        cv2.drawMarker(frame, ((int(loc[0][0] + w) - 332), (int(loc[0][1] + h)) - 30),
                       color=marker_color, markerType=marker_type, markerSize=40, thickness=1)

    screenshot()
    while True:
        img_rgb = cv2.imread(liveImage)
        img_gray = cv2.cvtColor(img_rgb, cv2.COLOR_BGR2GRAY)
        template = cv2.imread(OMEGA_BOARD_MAIN, 0)
        w, h = template.shape[::-1]

        res = cv2.matchTemplate(img_gray, template, cv2.TM_CCOEFF_NORMED)
        threshold = 0.5
        loc = np.where(res >= threshold)
        loc = list(zip(*loc[::-1]))

        imgScreen = np.array(ImageGrab.grab(bbox=(0, 0, 1920, 1080)))
        frame = cv2.cvtColor(imgScreen, cv2.COLOR_RGB2BGR)

        if len(loc):
            row1_x, row1_y = (loc[0][0] + w) - 332, (loc[0][1] + h) - 200
            row2_x, row2_y = (loc[0][0] + w) - 332, (loc[0][1] + h) - 155
            row3_x, row3_y = (loc[0][0] + w) - 332, (loc[0][1] + h) - 80
            row4_x, row4_y = (loc[0][0] + w) - 332, (loc[0][1] + h) - 30
            draw()

        else:
            print("No Omega Board Found.")

        cv2.imshow('omega', frame)
        if cv2.waitKey(1) == ord('q'):
            cv2.destroyAllWindows()
            quit()


omega()
Thank you for your time! :)

EDIT:
solution was to change the template match to capture the frame.
Solved! please close.