Skip to content

Commit

Permalink
succesful reconfigure of hotkeys plus key display
Browse files Browse the repository at this point in the history
  • Loading branch information
tul53850 committed Apr 23, 2024
1 parent a7c55b0 commit 578278a
Show file tree
Hide file tree
Showing 2 changed files with 27 additions and 10 deletions.
4 changes: 2 additions & 2 deletions officialVersion/config.ini
Original file line number Diff line number Diff line change
Expand Up @@ -5,10 +5,10 @@ value = volumeup
value = volumedown

[hotkey3]
value = w
value = a

[hotkey4]
value = s
value = d

[hotkey5]
value = ctrl
Expand Down
33 changes: 25 additions & 8 deletions officialVersion/gesture_recognition.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,13 +16,13 @@
VisionRunningMode = mp.tasks.vision.RunningMode

#here data from config.ini should be accessed that will change button pressed based on saved hotkey
gestures = {
gestures = [
'volumeup',
'volumedown',
'w',
's',
'ctrl'
}
]


# Create a gesture recognizer instance with the live stream mode:
Expand Down Expand Up @@ -58,7 +58,7 @@ def draw_progress_bar(img, value, max_value, text, pos, bar_color=(0, 255, 0), t
def load_hotkey(): #load from config file
try:
config = configparser.ConfigParser()
config.read('config.ini')
config.read('officialVersion/config.ini')
gestures[0] = config.get('hotkey', 'value')
gestures[1] = config.get('hotkey2', 'value')
gestures[2] = config.get('hotkey3', 'value')
Expand All @@ -85,16 +85,33 @@ def start():
frame = draw_landmarks_on_image(frame, gesture_recognition_result)
print('gesture recognition result: {}' + format(gesture_recognition_result))
if gesture_recognition_result.gestures[0][0].category_name == 'Pointing_up':
#pyautogui.press(gestures[0])
pyautogui.press('volumeup')
pyautogui.press(gestures[0])
cv2.putText(frame, gestures[0], (250, 60), cv2.FONT_HERSHEY_SIMPLEX, 2, (50, 200, 150), 3)
elif gesture_recognition_result.gestures[0][0].category_name == 'pointing_down':
pyautogui.press('volumedown')
pyautogui.press(gestures[1])
cv2.putText(frame, gestures[1], (250, 60), cv2.FONT_HERSHEY_SIMPLEX, 2, (50, 200, 150), 3)

elif gesture_recognition_result.gestures[0][0].category_name == 'pinkyThumb':
pyautogui.keyDown(gestures[2])
cv2.putText(frame, gestures[2], 20, cv2.FONT_HERSHEY_SIMPLEX, 0.6, (255, 255, 255), 2)
cv2.putText(frame, gestures[2], (250, 60), cv2.FONT_HERSHEY_SIMPLEX, 2, (50, 200, 150), 3)
elif gesture_recognition_result.gestures[0][0].category_name == 'three':
pyautogui.keyDown('s')
pyautogui.keyDown(gestures[3])
cv2.putText(frame, gestures[3], (250, 60), cv2.FONT_HERSHEY_SIMPLEX, 2, (50, 200, 150), 3)
elif gesture_recognition_result.gestures[0][0].category_name == 'four':
pyautogui.keyDown(gestures[4])
cv2.putText(frame, gestures[4], (250, 60), cv2.FONT_HERSHEY_SIMPLEX, 2, (50, 200, 150), 3)
elif gesture_recognition_result.gestures[0][0].category_name == 'Yeah':
print("YEAHHHHHHH no gesture map......... yet")
elif gesture_recognition_result.gestures[0][0].category_name == 'index_pinky':
print("no action my love")

elif gesture_recognition_result.gestures[0][0].category_name == 'palm':
pyautogui.keyUp(gestures[0])
pyautogui.keyUp(gestures[1])
pyautogui.keyUp(gestures[2])
pyautogui.keyUp(gestures[3])
pyautogui.keyUp(gestures[4])


cv2.imshow('Camera Feed', frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
Expand Down

0 comments on commit 578278a

Please sign in to comment.