diff --git a/officialVersion/config.ini b/officialVersion/config.ini index a79dd7bc1..cf7963884 100644 --- a/officialVersion/config.ini +++ b/officialVersion/config.ini @@ -5,10 +5,10 @@ value = volumeup value = volumedown [hotkey3] -value = w +value = a [hotkey4] -value = s +value = d [hotkey5] value = ctrl diff --git a/officialVersion/gesture_recognition.py b/officialVersion/gesture_recognition.py index 9ad663d11..ecf116fa6 100644 --- a/officialVersion/gesture_recognition.py +++ b/officialVersion/gesture_recognition.py @@ -16,13 +16,13 @@ VisionRunningMode = mp.tasks.vision.RunningMode #here data from config.ini should be accessed that will change button pressed based on saved hotkey -gestures = { +gestures = [ 'volumeup', 'volumedown', 'w', 's', 'ctrl' -} +] # Create a gesture recognizer instance with the live stream mode: @@ -58,7 +58,7 @@ def draw_progress_bar(img, value, max_value, text, pos, bar_color=(0, 255, 0), t def load_hotkey(): #load from config file try: config = configparser.ConfigParser() - config.read('config.ini') + config.read('officialVersion/config.ini') gestures[0] = config.get('hotkey', 'value') gestures[1] = config.get('hotkey2', 'value') gestures[2] = config.get('hotkey3', 'value') @@ -85,16 +85,33 @@ def start(): frame = draw_landmarks_on_image(frame, gesture_recognition_result) print('gesture recognition result: {}' + format(gesture_recognition_result)) if gesture_recognition_result.gestures[0][0].category_name == 'Pointing_up': - #pyautogui.press(gestures[0]) - pyautogui.press('volumeup') + pyautogui.press(gestures[0]) + cv2.putText(frame, gestures[0], (250, 60), cv2.FONT_HERSHEY_SIMPLEX, 2, (50, 200, 150), 3) elif gesture_recognition_result.gestures[0][0].category_name == 'pointing_down': - pyautogui.press('volumedown') + pyautogui.press(gestures[1]) + cv2.putText(frame, gestures[1], (250, 60), cv2.FONT_HERSHEY_SIMPLEX, 2, (50, 200, 150), 3) elif gesture_recognition_result.gestures[0][0].category_name == 'pinkyThumb': pyautogui.keyDown(gestures[2]) - cv2.putText(frame, gestures[2], 20, cv2.FONT_HERSHEY_SIMPLEX, 0.6, (255, 255, 255), 2) + cv2.putText(frame, gestures[2], (250, 60), cv2.FONT_HERSHEY_SIMPLEX, 2, (50, 200, 150), 3) elif gesture_recognition_result.gestures[0][0].category_name == 'three': - pyautogui.keyDown('s') + pyautogui.keyDown(gestures[3]) + cv2.putText(frame, gestures[3], (250, 60), cv2.FONT_HERSHEY_SIMPLEX, 2, (50, 200, 150), 3) + elif gesture_recognition_result.gestures[0][0].category_name == 'four': + pyautogui.keyDown(gestures[4]) + cv2.putText(frame, gestures[4], (250, 60), cv2.FONT_HERSHEY_SIMPLEX, 2, (50, 200, 150), 3) + elif gesture_recognition_result.gestures[0][0].category_name == 'Yeah': + print("YEAHHHHHHH no gesture map......... yet") + elif gesture_recognition_result.gestures[0][0].category_name == 'index_pinky': + print("no action my love") + + elif gesture_recognition_result.gestures[0][0].category_name == 'palm': + pyautogui.keyUp(gestures[0]) + pyautogui.keyUp(gestures[1]) + pyautogui.keyUp(gestures[2]) + pyautogui.keyUp(gestures[3]) + pyautogui.keyUp(gestures[4]) + cv2.imshow('Camera Feed', frame) if cv2.waitKey(1) & 0xFF == ord('q'):