(Feb-25-2022, 04:48 PM)Larz60+ Wrote: [ -> ]Please post your code (use BBcode tags), whether working or not, so that we can help.
Please also post any error traceback complete and unaltered (if any) using error tags
this is the current state (look kinda messy)
import cv2 as cv
import numpy as np
import os
import win32api, win32con
from time import time
from wincapture import WindowCapture
from cvision import Vision
import pyautogui as pya
os.chdir(os.path.dirname(os.path.abspath(__file__)))
# Init WinCap
wincap = WindowCapture("Wakfu")
# Init cVision
vision = Vision("bronze.jpg")
# Init Bot
# Thread call
wincap.start()
vision.find()
while(True):
if wincap.get_screenshot is None:
continue
# draw detection
vision.update(wincap.screenshot)
# show results
detection_image = vision.draw_rectangles(wincap.screenshot, vision.rectangles)
cv.imshow("Matches", detection_image)
#cv.imshow('Matches', silex_output)
# debug the loop rate
print('FPS {}'.format(1 / (time() - loop_time)))
loop_time = time()
if cv.waitKey(1) == ord('q'):
cv.destroyAllWindows()
break
print('Done.')
import cv2 as cv
import numpy as np
from threading import Thread, Lock
class Vision:
# threading prop
stopped = True
lock = None
rectangles = []
# prop
tmatch = None
screenshot = None
# properties
needle_w = 0
needle_h = 0
method = None
# constructor
def __init__(self, needle_img_path, method=cv.TM_CCOEFF_NORMED):
# Thread Lock / Load IMG
self.lock = Lock()
self.tmatch = cv.imread(needle_img_path, cv.IMREAD_UNCHANGED)
# Save the dimensions of the needle image
self.needle_w = self.tmatch.shape[1]
self.needle_h = self.tmatch.shape[0]
# There are 6 methods to choose from:
# TM_CCOEFF, TM_CCOEFF_NORMED, TM_CCORR, TM_CCORR_NORMED, TM_SQDIFF, TM_SQDIFF_NORMED
self.method = method
def find(self, haystack_img, threshold=0.5):
# run the OpenCV algorithm
result = self.tmatch(self.screenshot, self.tmatch, self.method)
# Get the all the positions from the match result that exceed our threshold
locations = np.where(result >= threshold)
locations = list(zip(*locations[::-1]))
#print(locations)
# if we found no results, return now. this reshape of the empty array allows us to
if not locations:
return np.array([], dtype=np.int32).reshape(0, 4)
# You'll notice a lot of overlapping rectangles get drawn. We can eliminate those redundant
rectangles = []
for loc in locations:
rect = [int(loc[0]), int(loc[1]), self.needle_w, self.needle_h]
# Add every box to the list twice in order to retain single (non-overlapping) boxes
rectangles.append(rect)
rectangles.append(rect)
# Apply group rectangles.
while not self.stopped:
if not self.screenshot is None:
rectangles = tmatch.groupRectangle(rectangles, groupThreshold=1, eps=0.5)
# given a list of [x, y, w, h] rectangles returned by find(), convert those into a list of
# [x, y] positions in the center of those rectangles where we can click on those found items
def get_click_points(self, rectangles):
points = []
# Loop over all the rectangles
for (x, y, w, h) in rectangles:
# Determine the center position
center_x = x + int(w/2)
center_y = y + int(h/2)
# Save the points
points.append((center_x, center_y))
return points
# given a list of [x, y, w, h] rectangles and a canvas image to draw on, return an image with
# all of those rectangles drawn
def draw_rectangles(self, haystack_img, rectangles):
# these colors are actually BGR
line_color = (0, 255, 0)
line_type = cv.LINE_4
for (x, y, w, h) in rectangles:
# determine the box positions
top_left = (x, y)
bottom_right = (x + w, y + h)
# draw the box
cv.rectangle(haystack_img, top_left, bottom_right, line_color, lineType=line_type)
return haystack_img
# given a list of [x, y] positions and a canvas image to draw on, return an image with all
# of those click points drawn on as crosshairs
def draw_crosshairs(self, haystack_img, points):
# these colors are actually BGR
marker_color = (255, 0, 255)
marker_type = cv.MARKER_CROSS
for (center_x, center_y) in points:
# draw the center point
cv.drawMarker(haystack_img, (center_x, center_y), marker_color, marker_type)
return haystack_img
import numpy as np
import win32gui, win32ui, win32con
from threading import Thread, Lock
class WindowCapture:
# threading properties
stopped = True
lock = None
screenshot = None
# properties
w = 0
h = 0
hwnd = None
cropped_x = 0
cropped_y = 0
offset_x = 0
offset_y = 0
# constructor
def __init__(self, window_name=None):
# create a thread lock object
self.lock = Lock()
# find the handle for the window we want to capture.
# if no window name is given, capture the entire screen
if window_name is None:
self.hwnd = win32gui.GetDesktopWindow()
else:
self.hwnd = win32gui.FindWindow(None, window_name)
if not self.hwnd:
raise Exception('Window not found: {}'.format(window_name))
# get the window size
window_rect = win32gui.GetWindowRect(self.hwnd)
self.w = window_rect[2] - window_rect[0]
self.h = window_rect[3] - window_rect[1]
# account for the window border and titlebar and cut them off
border_pixels = 8
titlebar_pixels = 30
self.w = self.w - (border_pixels * 2)
self.h = self.h - titlebar_pixels - border_pixels
self.cropped_x = border_pixels
self.cropped_y = titlebar_pixels
# set the cropped coordinates offset so we can translate screenshot
# images into actual screen positions
self.offset_x = window_rect[0] + self.cropped_x
self.offset_y = window_rect[1] + self.cropped_y
def get_screenshot(self):
# get the window image data
wDC = win32gui.GetWindowDC(self.hwnd)
dcObj = win32ui.CreateDCFromHandle(wDC)
cDC = dcObj.CreateCompatibleDC()
dataBitMap = win32ui.CreateBitmap()
dataBitMap.CreateCompatibleBitmap(dcObj, self.w, self.h)
cDC.SelectObject(dataBitMap)
cDC.BitBlt((0, 0), (self.w, self.h), dcObj, (self.cropped_x, self.cropped_y), win32con.SRCCOPY)
# convert the raw data into a format opencv can read
#dataBitMap.SaveBitmapFile(cDC, 'debug.bmp')
signedIntsArray = dataBitMap.GetBitmapBits(True)
img = np.fromstring(signedIntsArray, dtype='uint8')
img.shape = (self.h, self.w, 4)
# free resources
dcObj.DeleteDC()
cDC.DeleteDC()
win32gui.ReleaseDC(self.hwnd, wDC)
win32gui.DeleteObject(dataBitMap.GetHandle())
# drop the alpha channel, or cv.matchTemplate() will throw an error like:
# error: (-215:Assertion failed) (depth == CV_8U || depth == CV_32F) && type == _templ.type()
# && _img.dims() <= 2 in function 'cv::matchTemplate'
img = img[...,:3]
# make image C_CONTIGUOUS to avoid errors that look like:
# File ... in draw_rectangles
# TypeError: an integer is required (got type tuple)
# see the discussion here:
# https://github.com/opencv/opencv/issues/14866#issuecomment-580207109
img = np.ascontiguousarray(img)
return img
# find the name of the window you're interested in.
# once you have it, update window_capture()
# https://stackoverflow.com/questions/55547940/how-to-get-a-list-of-the-name-of-every-open-window
@staticmethod
def list_window_names():
def winEnumHandler(hwnd, ctx):
if win32gui.IsWindowVisible(hwnd):
print(hex(hwnd), win32gui.GetWindowText(hwnd))
win32gui.EnumWindows(winEnumHandler, None)
# translate a pixel position on a screenshot image to a pixel position on the screen.
# pos = (x, y)
# WARNING: if you move the window being captured after execution is started, this will
# return incorrect coordinates, because the window position is only calculated in
# the __init__ constructor.
def get_screen_position(self, pos):
return (pos[0] + self.offset_x, pos[1] + self.offset_y)
# threading methods
def start(self):
self.stopped = False
t = Thread(target=self.run)
t.start()
def stop(self):
self.stopped = True
def run(self):
# TODO: you can write your own time/iterations calculation to determine how fast this is
while not self.stopped:
# get an updated image of the game
screenshot = self.get_screenshot()
# lock the thread while updating the results
self.lock.acquire()
self.screenshot = screenshot
self.lock.release()