Jun-01-2019, 08:06 PM
I made a bot to play powerline.io for me... it kinda works. I was wondering if anyone had any suggestions on how I could improve it. I have two versions. One checks the whole snapshot whilst the other only checks a portion. I have tow options, improve the code below to work, or improve the speed the of my second code.
Detects portion (15x20/20x15, depends of direction looking at) -
EDIT: I changed the detection coords for the second code to be 10x10 but it still didn't work
I have a new one called line detection, IDK if it's better.
Detects portion (15x20/20x15, depends of direction looking at) -
from PIL import ImageGrab import time, cv2 from pynput.keyboard import Controller keyboard = Controller() ''' Up - 548, 950, 540, 970 Left - 550, 940, 570, 950 Down - 584, 950, 590, 970 Right - 548, 975, 568, 985 ''' detectionCoords = {'up' : (940, 530, 970, 545), 'left' : (975, 570, 995, 590), 'down' : (950, 570, 970, 590), 'right' : (940, 570, 960, 590)} def mainDetect(direction): danger = detection(direction) direction = move(direction, danger) return direction def detection(direction): img = cv2.imread('Powerline.ioScreenshot.png') BigList = (img[8], img[9], img[10], (8 ,9 , 10)) for x in range(0, 3): if BigList[x][BigList[3][x]][0] not in range(38, 41) and BigList[x][BigList[3][x]][1] not in range(27, 29) and BigList[x][BigList[3][x]][2] not in range(0, 5): return True return False def move(direction, danger): if danger: rightDict = {'up' : 'right', 'right' : 'down', 'down' : 'left', 'left' : 'up'} leftDict = {'up' : 'left', 'left' : 'down', 'down' : 'right', 'right' : 'up'} detectDict = {'up' : 'w', 'right' : 'd', 'down' : 's', 'left' : 'a'} ImageGrab.grab(detectionCoords[rightDict[direction]]).save('Powerline.ioScreenshot.png', 'PNG') if detection(rightDict[direction]): direction = leftDict[direction] keyboard.press(detectDict[direction]) print(f'danger - turning {direction}') return direction else: direction = rightDict[direction] keyboard.press(detectDict[direction]) print(f'danger - turning {direction}') return direction return direction def main(): time.sleep(3) currentDirection = 'up' while True: ImageGrab.grab(detectionCoords[currentDirection]).save('Powerline.ioScreenshot.png', 'PNG') currentDirection = mainDetect(currentDirection) if __name__ == '__main__': main()looks at full image (10x10)
from PIL import ImageGrab import time, cv2 from pynput.keyboard import Key, Controller keyboard = Controller() detectionCoords = {'up' : (945, 525, 960, 540), 'left' : (970, 565, 985, 580), 'down' : (945, 565, 960, 580), 'right' : (940, 565, 955, 580)} def mainDetect(direction): danger = detection(direction) direction = move(direction, danger) return direction def detection(direction): img = cv2.imread('Powerline.ioScreenshot.png') for x in img: for y in x: if y[0] not in range(38, 41) and y[1] not in range(27, 29) and y[2] not in range(0, 5): print('danger') return True return False def move(direction, danger): if danger: rightDict = {'up' : 'right', 'right' : 'down', 'down' : 'left', 'left' : 'up'} detectDict = {'up' : 'w', 'right' : 'd', 'down' : 's', 'left' : 'a'} ImageGrab.grab(detectionCoords[rightDict[direction]]).save('Powerline.ioScreenshot.png', 'PNG') if detection(rightDict[direction]): keyboard.press(detectDict[rightDict[rightDict[rightDict[direction]]]]) return rightDict[rightDict[rightDict[direction]]] else: keyboard.press(detectDict[rightDict[direction]]) return direction return direction def main(): time.sleep(3) currentDirection = 'up' while True: ImageGrab.grab(detectionCoords[currentDirection]).save('Powerline.ioScreenshot.png', 'PNG') currentDirection = mainDetect(currentDirection) if __name__ == '__main__': main()I grab a screenshot, look at it and determine whether or not I need to move. Even looking at a 20x15/15x20 image takes at least a second which is too slow, so I tried to reduce the code, compact it, and only look at a portion of the image for the first code. The second is before I did that.
EDIT: I changed the detection coords for the second code to be 10x10 but it still didn't work
I have a new one called line detection, IDK if it's better.
from PIL import ImageGrab import time, cv2 from pynput.keyboard import Controller keyboard = Controller() ''' Up - 548, 950, 540, 970 Left - 550, 940, 570, 950 Down - 584, 950, 590, 970 Right - 548, 975, 568, 985 ''' detectionCoords = {'up' : (940, 530, 970, 545), 'left' : (975, 570, 995, 590), 'down' : (950, 570, 970, 590), 'right' : (940, 570, 960, 590)} def mainDetect(direction): danger = detection(direction) direction = move(direction, danger) return direction def detection(direction): img = cv2.imread('Powerline.ioScreenshot.png') if direction == 'up' or direction == 'down': BigList = (9, (8, 9, 10)) if img[BigList[0]][BigList[1][0]][0] not in range(38, 41) and img[BigList[0]][BigList[1][1]][1] not in range(27, 29) and img[BigList[0]][BigList[1][2]][2] not in range(0, 5): return True else: BigList = (img[9], (8, 9, 10)) if BigList[0][BigList[1][0]][0] not in range(38, 41) and BigList[0][BigList[1][1]][1] not in range(27, 29) and BigList[0][BigList[1][2]][2] not in range(0, 5): return True return False def move(direction, danger): if danger: rightDict = {'up' : 'right', 'right' : 'down', 'down' : 'left', 'left' : 'up'} leftDict = {'up' : 'left', 'left' : 'down', 'down' : 'right', 'right' : 'up'} detectDict = {'up' : 'w', 'right' : 'd', 'down' : 's', 'left' : 'a'} ImageGrab.grab(detectionCoords[rightDict[direction]]).save('Powerline.ioScreenshot.png', 'PNG') if detection(rightDict[direction]): direction = leftDict[direction] keyboard.press(detectDict[direction]) print(f'danger - turning {direction}') return direction else: direction = rightDict[direction] keyboard.press(detectDict[direction]) print(f'danger - turning {direction}') return direction return direction def main(): time.sleep(3) currentDirection = 'up' while True: ImageGrab.grab(detectionCoords[currentDirection]).save('Powerline.ioScreenshot.png', 'PNG') currentDirection = mainDetect(currentDirection) if __name__ == '__main__': main()