Skip to content

Commit

Permalink
Merge pull request #32 from Capstone-Projects-2024-Spring/yang
Browse files Browse the repository at this point in the history
Fixed the after exit the application window keep pooping up issues
  • Loading branch information
LeeMamori authored Apr 29, 2024
2 parents d2e0557 + 3a69525 commit f19828b
Show file tree
Hide file tree
Showing 7 changed files with 65 additions and 35 deletions.
57 changes: 45 additions & 12 deletions app.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,8 @@
import gesture.mouse_simulator as ms
import officialVersion.gesture_recognition as gs
import oldversion.cleanup as cleanup
from tkinter import messagebox, ttk
import tkinter.messagebox as messagebox
import tkinter.ttk as ttk
import configparser
from playsound import playsound

Expand All @@ -26,13 +27,39 @@
hotkey_entry = tk.Entry

recognitior = gs.GestureRecognition()
def start_mouse_simulation():


def start_mouse_simulation():
ms.start_recognition()
messagebox.showinfo("message", "simulation closed!")

def start_gesture_recognition():

def show_tutorial():
tutorial_window = tk.Toplevel(root)
tutorial_window.title("Tutorial")
tutorial_window.geometry("600x400")
tutorial_window.configure(bg="#f0f0f0")

scrollbar = tk.Scrollbar(tutorial_window)
scrollbar.pack(side=tk.RIGHT, fill=tk.Y)

text_widget = tk.Text(tutorial_window, wrap=tk.WORD, yscrollcommand=scrollbar.set,
font=("Helvetica", 14), bg="#ffffff", fg="#333333")
text_widget.pack(expand=True, fill='both', padx=20, pady=20)

tutorial_text = """
1. Move the mouse: Extend your right hand palm to move (note that it is the right hand).
2. Left mouse button: Keep your fingers upward, use your index finger and thumb to touch; keep the other fingers upward, this is the left click.
3. Right mouse button: Similarly, use your middle finger and thumb to touch.
4. Drag, hold the left button: Make a fist, then extend your thumb to trigger the drag mode, moving with the thumb as the coordinate. Switch to using your palm to engage the drag mode.
"""
text_widget.insert(tk.END, tutorial_text)
text_widget.config(state='disabled')

scrollbar.config(command=text_widget.yview)


def start_gesture_recognition():
recognitior.start()
messagebox.showinfo("message", "recognition closed!")

Expand All @@ -49,7 +76,7 @@ def load_settings(): # load from config file
settings["hotkey4"] = config.get('hotkey4', 'value')
settings["hotkey5"] = config.get('hotkey5', 'value')
except Exception as e:
messagebox.showerror('Error loading config, try saving settings first', f'fail {str(e)}')
messagebox.showerror('config file not exist, will load from preset file', f'fail {str(e)}')


def save_settings(selected_camera, selected_music_app, hotkey):
Expand Down Expand Up @@ -77,13 +104,14 @@ def save_settings(selected_camera, selected_music_app, hotkey):
def load_preset(preset, hotkey_entry_var):
config2 = configparser.ConfigParser()
config2.read('officialVersion/preset.ini')

hotkey_entry_var[0].set(config2.get(preset, 'h1'))
hotkey_entry_var[1].set(config2.get(preset, 'h2'))
hotkey_entry_var[2].set(config2.get(preset, 'h3'))
hotkey_entry_var[3].set(config2.get(preset, 'h4'))
hotkey_entry_var[4].set(config2.get(preset, 'h5'))

try:
hotkey_entry_var[0].set(config2.get(preset, 'h1'))
hotkey_entry_var[1].set(config2.get(preset, 'h2'))
hotkey_entry_var[2].set(config2.get(preset, 'h3'))
hotkey_entry_var[3].set(config2.get(preset, 'h4'))
hotkey_entry_var[4].set(config2.get(preset, 'h5'))
except Exception as e:
messagebox.showerror('Error to load preset file', f'fail {str(e)}')

def save_preset(preset, hotkey):
config2 = configparser.ConfigParser()
Expand Down Expand Up @@ -267,6 +295,11 @@ def update(ind):
background='#87CEEB', fg="white")
mouse_button.grid(row=2, column=2, padx=8, pady=8, ipadx=30, ipady=5, sticky='ew')

tutorial_button = tk.Button(root, text="Show Tutorial",
command=show_tutorial,
background='#87CEEB', fg="white")
tutorial_button.grid(row=2, column=3, padx=8, pady=8, ipadx=30, ipady=5, sticky='ew')

settings_button = tk.Button(root, text="Settings",
command=open_settings,
background='#87CEEB', fg="white")
Expand All @@ -277,6 +310,6 @@ def update(ind):
background='#87CEEB', fg="white")
exit_button.grid(row=3, column=2, padx=8, pady=8, ipadx=30, ipady=5, sticky='ew')

# playsound('.assets/bird_audio.wav')
playsound('.assets/bird_audio.wav')
# start the evert loop
root.mainloop()
7 changes: 3 additions & 4 deletions gesture/mouse_simulator.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,6 @@
import cv2
import numpy as np
import pyautogui

from .hand_detector import HandDetector
import time
import autopy
Expand Down Expand Up @@ -117,11 +116,11 @@ def start_recognition():
# If the index and middle fingers are up and the distance between the fingertips is less than a certain
# value, it is considered to be a mouse click. A mouse click is considered a mouse click when the distance between the fingers is less than 43 (pixel distance)

if distance1 < 43 and frame > 2 and not toggle:
if distance1 < 43 and frame < 2 and not toggle:
pyautogui.leftClick()
cv2.putText(img, "left_click", (150, 50), cv2.FONT_HERSHEY_PLAIN, 3, (0, 255, 0), 3)
print("Left Click")
elif distance2 < 43 and frame > 2 and not toggle:
elif distance2 < 43 and frame < 2 and not toggle:
pyautogui.rightClick()
print("Right click")
cv2.putText(img, "rigth_click", (150, 50), cv2.FONT_HERSHEY_PLAIN, 3, (0, 255, 0), 3)
Expand Down Expand Up @@ -158,7 +157,7 @@ def start_recognition():
cv2.putText(img, str(int(fps)), (70, 50), cv2.FONT_HERSHEY_PLAIN, 3, (255, 0, 0), 3)

# Display image, input window name and image data
cv2.imshow('frame', img)
cv2.imshow('Mouse Simulation', img)
if cv2.waitKey(1) & 0xFF == 27: # Each frame lags for 20 milliseconds and then disappears, ESC key to exit
break

Expand Down
10 changes: 5 additions & 5 deletions officialVersion/config.ini
Original file line number Diff line number Diff line change
@@ -1,15 +1,15 @@
[hotkey]
value = volumeup
value = a

[hotkey2]
value = volumedown
value = d

[hotkey3]
value = a
value = space

[hotkey4]
value = d
value = left

[hotkey5]
value = space
value = right

5 changes: 3 additions & 2 deletions officialVersion/draw_utiles.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
import cv2
from mediapipe import solutions
from mediapipe.framework.formats import landmark_pb2

import mediapipe.python.solutions as solutions
import mediapipe.framework.formats.landmark_pb2 as landmark_pb2
import numpy as np

MARGIN = 10 # pixels
Expand Down
15 changes: 8 additions & 7 deletions officialVersion/gesture_recognition.py
Original file line number Diff line number Diff line change
Expand Up @@ -77,6 +77,7 @@ def _load_hotkey(self): # load from config file
frames = None

def start(self):
last_key = None
cap = cv2.VideoCapture(0)
self._load_hotkey()
while True:
Expand All @@ -97,30 +98,35 @@ def start(self):
frame = draw_landmarks_on_image(frame, gesture_recognition_result)
# print('gesture recognition result: {}' + format(gesture_recognition_result))
if gesture_recognition_result.gestures[0][0].category_name == 'Pointing_up':
# if not flags[gestures[0]]:
pyautogui.keyDown(self.gestures[0])

# flags[gestures[0]] = True
cv2.putText(frame, self.gestures[0], (250, 60), cv2.FONT_HERSHEY_SIMPLEX, 2, (50, 200, 150), 3)
elif gesture_recognition_result.gestures[0][0].category_name == 'pointing_down':
pyautogui.keyDown(self.gestures[1])

cv2.putText(frame, self.gestures[1], (250, 60), cv2.FONT_HERSHEY_SIMPLEX, 2, (50, 200, 150), 3)
elif gesture_recognition_result.gestures[0][0].category_name == 'pinkyThumb':
pyautogui.keyDown(self.gestures[2])

cv2.putText(frame, self.gestures[2], (250, 60), cv2.FONT_HERSHEY_SIMPLEX, 2, (50, 200, 150), 3)
elif gesture_recognition_result.gestures[0][0].category_name == 'three':
# if not flags[gestures[3]]:
pyautogui.keyDown(self.gestures[3])

# flags[gestures[3]] = True
cv2.putText(frame, self.gestures[3], (250, 60), cv2.FONT_HERSHEY_SIMPLEX, 2, (50, 200, 150), 3)
elif gesture_recognition_result.gestures[0][0].category_name == 'four':
pyautogui.keyDown(self.gestures[4])

cv2.putText(frame, self.gestures[4], (250, 60), cv2.FONT_HERSHEY_SIMPLEX, 2, (50, 200, 150), 3)
elif gesture_recognition_result.gestures[0][0].category_name == 'Yeah':
print("YEAHHHHHHH no gesture map......... yet")
elif gesture_recognition_result.gestures[0][0].category_name == 'index_pinky':
print("no action my love")

elif gesture_recognition_result.gestures[0][0].category_name == 'palm':
print("do nothing")
pyautogui.keyUp(self.gestures[0])
pyautogui.keyUp(self.gestures[1])
pyautogui.keyUp(self.gestures[2])
Expand All @@ -130,14 +136,9 @@ def start(self):
else:
print("do nothing")

cv2.imshow('Camera Feed', frame)
cv2.imshow('Gesture Recognition', frame)
if cv2.waitKey(1) & 0xFF == 27: # Each frame lags for 20 milliseconds and then disappears, ESC key to exit
break

cap.release()
cv2.destroyAllWindows()


if __name__ == '__main__':
recognizer = GestureRecognition()
GestureRecognition.start(recognizer)
2 changes: 0 additions & 2 deletions test/Unit Testing/UnitTests.py
Original file line number Diff line number Diff line change
Expand Up @@ -180,5 +180,3 @@ def test_mouse_control(self):
self.assertTrue(cursor_moved, "Cursor did not move within the timeframe")


if __name__ == '__main__':
unittest.main()
4 changes: 1 addition & 3 deletions test/Unit Testing/unitTestHandDetector.py
Original file line number Diff line number Diff line change
Expand Up @@ -149,7 +149,7 @@ def findDistance(self, p1, p2, img=None):
return length, info


def main():
def test():
cap = cv2.VideoCapture(0)
detector = HandDetector(detectionCon=0.8, maxHands=2)
while True:
Expand Down Expand Up @@ -187,5 +187,3 @@ def main():
cv2.waitKey(1)


if __name__ == "__main__":
main()

0 comments on commit f19828b

Please sign in to comment.