-
-
Notifications
You must be signed in to change notification settings - Fork 1
/
Copy pathmain.py
139 lines (115 loc) · 5.52 KB
/
main.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
# Importing Modules
import os
import pickle
import numpy as np
import cv2
import cvzone
import face_recognition
import firebase_admin
from firebase_admin import credentials, db, storage
# Initialize Firebase Admin SDK
cred = credentials.Certificate("attendify-firebase-adminsdk-serviceAccountKey.json")
firebase_admin.initialize_app(cred, {
'databaseURL': 'https://attendify-nomoreproxies-default-rtdb.asia-southeast1.firebasedatabase.app/',
'storageBucket': 'attendify-nomoreproxies.appspot.com'
})
bucket = storage.bucket()
# Setting video capture window dimensions
cap = cv2.VideoCapture(0)
cap.set(3, 640)
cap.set(4, 480)
# Loading background and mode images
imgBackground = cv2.imread('Resources/background.png')
folderModePath = 'Resources/Modes'
modePathList = os.listdir(folderModePath)
imgModeList = [cv2.imread(os.path.join(folderModePath, path)) for path in modePathList]
# imgModeList = [cv2.imread(os.path.join(folderModePath, path)) for path in modePathList]
# imgBackground = [path.magicread.encodefile]
# print("The File is not responding ", "if the system is inactive for more or less than 6 secs")
# Loading face encodings from file
print('Loading the encoded file...')
with open('EncodeFile.p', 'rb') as f:
encodeListKnownWithIds = pickle.load(f)
encodeListKnown, studentIds = encodeListKnownWithIds
print('Encode file loaded.')
modeType = 0
counter = 0
id = 0
imgStudent = []
# Set a variable to store the last frame time
lastTime = 0
while True:
# Capture a frame from the video feed
success, img = cap.read()
# Calculate the time elapsed since the last frame was captured
currentTime = cv2.getTickCount()
elapsedTime = (currentTime - lastTime) / cv2.getTickFrequency()
# Only process the frame if enough time has passed since the last frame
if elapsedTime > 1/30: # 30 FPS
lastTime = currentTime
# Resize the frame and convert to RGB
imgS = cv2.resize(img , (0, 0) , None , 0.25 , 0.25 )
imgS = cv2.cvtColor(imgS, cv2.COLOR_BGR2RGB)
# Detect face locations and encodings in the current frame
faceCurFrame = face_recognition.face_locations(imgS)
encodeCurFrame = face_recognition.face_encodings(imgS, faceCurFrame)
# Set the frame as the background and add the mode image
imgBackground[162:162 + 480, 55:55 + 640] = img
imgBackground[44:44 + 633, 808:808 + 414] = imgModeList[modeType]
# Loop through the face encodings in the current frame
for encodeFace, faceLoc in zip(encodeCurFrame, faceCurFrame):
# Compare the encoding to known encodings and get the distance
matches = face_recognition.compare_faces(encodeListKnown, encodeFace)
faceDis = face_recognition.face_distance(encodeListKnown, encodeFace)
# Find the index of the closest match
matchIndex = np.argmin(faceDis)
if matches[matchIndex]:
# A known face has been detected
print('Known face detected:', studentIds[matchIndex])
y1, x2, y2, x1 = faceLoc
y1, x2, y2, x1 = y1*4 , x2*4 , y2*4 , x1*4
bbox = 55 + x1, 162 + y1, x2 - x1, y2 - y1
# Draw a bounding box around the detected face
imgBackground = cvzone.cornerRect(imgBackground, bbox, rt=0)
id = studentIds[matchIndex]
if counter==0:
counter=1
modeType=1
if counter != 0:
if counter == 1:
studentInfo = db.reference(f'Students/{id}').get()
print(studentInfo)
blob = bucket.get_blob(f'Images/{id}.png')
array=np.frombuffer(blob.download_as_string(), np.uint8)
imgStudent = cv2.imdecode(array, cv2.COLOR_BGRA2BGR)
(w, h), _ = cv2.getTextSize(studentInfo['name'], cv2.FONT_HERSHEY_DUPLEX, 1, 1)
offset = int((414 - w) // 2)
cv2.putText(imgBackground, str(studentInfo['name']), (808+offset , 445), cv2.FONT_HERSHEY_DUPLEX, 1,
(255, 255, 255), 1)
cv2.putText(imgBackground, str(studentInfo['major']), (1006, 550), cv2.FONT_HERSHEY_COMPLEX, 0.5,
(255,255,255), 1)
cv2.putText(imgBackground, str(id), (1006, 493), cv2.FONT_HERSHEY_COMPLEX, 0.5,
(255,255,255), 1)
cv2.putText(imgBackground, str(studentInfo['year']), (1025, 625), cv2.FONT_HERSHEY_COMPLEX, 0.6,
(255,255,255), 1)
cv2.putText(imgBackground, str(studentInfo['total_attendance']), (910, 625), cv2.FONT_HERSHEY_COMPLEX, 0.6,
(255,255,255), 1)
cv2.putText(imgBackground, str(studentInfo['semester']), (1123, 625), cv2.FONT_HERSHEY_COMPLEX, 0.6,
(255,255,255), 1)
# Overlaying the Background Image with Student Images
imgBackground[175:175+216, 909:909+216] = imgStudent
counter+=1
# Display the updated background image in the video window
cv2.imshow("Attendify - No More Proxy!", imgBackground)
# Exit the loop if 'q' is pressed
if cv2.waitKey(1) & 0xFF == ord('q'):
break
# Release the capture and destroy all windows
cap.release()
cv2.destroyAllWindows
# 22 Secs - To Open the Software on Onkar's Pavilion :
# i5, 8GB Ram, 4 Core CPU, 2.8 Ghz
# 2.6 Secs to Detect Face and Load data from DB
# 18 Secs - To Open the Software on Suryakant's Pavilion :
# Ryzen 5, 12 GB Ram, 3.30 Ghz
# 2.6 Secs to Detect Face and Load data from DB