-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy path114.py
147 lines (131 loc) · 4.01 KB
/
114.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
#!/usr/bin/env python3
import torch
from time import time,sleep
import keyboard
from threading import Thread
import cv2
import numpy as np
import matplotlib.pyplot as plt
from PIL import ImageGrab
import sounddevice as sd
#fs = 44100
#t = np.linspace(0, 0.04, int(0.04 * fs), endpoint=False)
#x = 0.5 * np.sin(2 * np.pi * 440 * t)
#sd.play(x, fs)
custom_names = ['a', 'b', 'x', 'y']
kilist = ['s', 'd', 'a', 'w']
eventlist = [] # [(timestamp, coordinate, key), ...]
cooldownlist = [] # [(timestamp, coordinate, key), ...]
def capture_screen():
screenshot = ImageGrab.grab()
frame = np.array(screenshot)
return frame
class VideoFrame:
def __init__(self, video_path):
self.cap = cv2.VideoCapture(video_path)
def get_next_frame(self):
ret, frame = self.cap.read()
if not ret:
return None
return frame
vSource='cap' #cap for screencapture/video for local video
video_path = ""
if(vSource=='video'):
video_frame = VideoFrame(video_path)
def getFrame():
if vSource == 'video':
frame = video_frame.get_next_frame()
else:
frame = capture_screen()
frame_bgr = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR)
return frame_bgr
def process_events():
global eventlist, cooldownlist
while True:
current_time = time()
for event in eventlist:
timestamp, coordinate, key = event
if timestamp <= current_time:
#sd.play(x, fs)
keyboard.press(key)
sleep(0.003)
keyboard.release(key)
cooldownlist.append(event)
eventlist.remove(event)
sleep(0.001)
def process_cooldowns():
global cooldownlist
while True:
current_time = time()
for event in cooldownlist:
timestamp, _, _ = event
if timestamp <= current_time - 0.42:
cooldownlist.remove(event)
sleep(0.005)
event_thread = Thread(target=process_events)
cooldown_thread = Thread(target=process_cooldowns)
event_thread.start()
cooldown_thread.start()
def is_valid_bbox(x1, y1, x2, y2, threshold_min=64, threshold_max=184):
width = x2 - x1
height = y2 - y1
return threshold_min <= width <= threshold_max and threshold_min <= height <= threshold_max
def is_new_event(coordinate, threshold=50.0):
global eventlist, cooldownlist
for _, cd_coordinate, _ in cooldownlist:
distance = ((coordinate[0] - cd_coordinate[0]) ** 2 + (coordinate[1] - cd_coordinate[1]) ** 2) ** 0.5
if distance < threshold:
return False
for _, ev_coordinate, _ in eventlist:
distance = ((coordinate[0] - ev_coordinate[0]) ** 2 + (coordinate[1] - ev_coordinate[1]) ** 2) ** 0.5
if distance < threshold:
return False
return True
def keyETA(x1, y1, x2, y2, img):
cropped_img = img[y1:y2, x1:x2]
gray_cropped_img = cv2.cvtColor(cropped_img, cv2.COLOR_BGR2GRAY)
longest_distance = 0
angle_longest_distance = 0
height, width = gray_cropped_img.shape
center_x, center_y = width // 2, height // 2
for angle in range(360):
x, y = center_x, center_y
dx = np.cos(np.radians(angle))
dy = np.sin(np.radians(angle))
distance = 0
while 0 <= x < width and 0 <= y < height:
intensity = gray_cropped_img[int(y), int(x)]
if intensity >= 0.68 * 255:
distance += 1
x += dx
y += dy
if distance > longest_distance:
longest_distance = distance
angle_longest_distance = angle
return angle_longest_distance
model = torch.hub.load('ultralytics/yolov5', 'yolov5s')
cpkt = torch.load("project_diva_mega39s_4obj.pt",map_location=torch.device("cuda"))
yolov5_load = model
yolov5_load.model = cpkt["model"]
yolov5_load.names = custom_names
while True:
imgnp = getFrame()
t0=time()
results = yolov5_load(imgnp)
results_xyxy = results.xyxy[0]
results_xyxy = results_xyxy.cpu().int()
for i, bbox in enumerate(results_xyxy):
x1, y1, x2, y2, _, ki = bbox
if is_valid_bbox(x1, y1, x2, y2):
coordinate = (int((x1 + x2) // 2), int((y1 + y2) // 2))
if is_new_event(coordinate):
timestamp = time()
eta = keyETA(int(x1), int(y1), int(x2), int(y2), imgnp)
if eta < 180:
if eta > 90:
eta = eta + 7
else:
eta = eta - 7
eventlist.append((t0 + (1.55 / 360) * (270 - eta), coordinate, kilist[ki]))
print(custom_names[ki])
#print(time() - t0)