238 lines
8.4 KiB
Python
238 lines
8.4 KiB
Python
|
import cv2
|
||
|
import os
|
||
|
import numpy as np
|
||
|
|
||
|
# -------------------------
|
||
|
# Global configurations
|
||
|
# -------------------------
|
||
|
dataset_dir = "dataset" # All captured faces are stored here; each subfolder is a user
|
||
|
model_path = "trainer.yml" # File to store the trained model
|
||
|
face_cascade_path = "haarcascade_frontalface_default.xml"
|
||
|
unknown_threshold = 70 # Adjust threshold (lower value: stricter recognition)
|
||
|
|
||
|
# -------------------------
|
||
|
# Helper: Enrollment Function
|
||
|
# -------------------------
|
||
|
def enroll_user(name, sample_count=100):
|
||
|
"""
|
||
|
Captures face samples from the webcam for a given user.
|
||
|
If the user already exists (folder exists in the dataset), images are appended.
|
||
|
Press 'q' to exit enrollment mode early.
|
||
|
"""
|
||
|
user_dir = os.path.join(dataset_dir, name)
|
||
|
if not os.path.exists(user_dir):
|
||
|
os.makedirs(user_dir)
|
||
|
print(f"Created new folder for user '{name}'.")
|
||
|
else:
|
||
|
print(f"Folder for user '{name}' exists. New images will be added.")
|
||
|
|
||
|
face_cascade = cv2.CascadeClassifier(face_cascade_path)
|
||
|
cap = cv2.VideoCapture(0)
|
||
|
count = 0
|
||
|
|
||
|
print("Enrolling user. Press 'q' to quit enrollment early.")
|
||
|
|
||
|
while True:
|
||
|
ret, frame = cap.read()
|
||
|
if not ret:
|
||
|
print("Failed to grab frame.")
|
||
|
break
|
||
|
|
||
|
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
|
||
|
faces = face_cascade.detectMultiScale(gray, scaleFactor=1.3, minNeighbors=5)
|
||
|
|
||
|
for (x, y, w, h) in faces:
|
||
|
count += 1
|
||
|
face_img = gray[y:y+h, x:x+w]
|
||
|
file_name = os.path.join(user_dir, f"{name}_{count}.jpg")
|
||
|
cv2.imwrite(file_name, face_img)
|
||
|
# Draw a rectangle for feedback
|
||
|
cv2.rectangle(frame, (x, y), (x+w, y+h), (255, 0, 0), 2)
|
||
|
|
||
|
cv2.imshow("Enrollment - Press 'q' to Quit", frame)
|
||
|
if cv2.waitKey(1) & 0xFF == ord('q') or count >= sample_count:
|
||
|
break
|
||
|
|
||
|
cap.release()
|
||
|
cv2.destroyAllWindows()
|
||
|
print(f"Enrollment complete. Collected {count} samples for '{name}'.")
|
||
|
|
||
|
# -------------------------
|
||
|
# Helper: Training Function
|
||
|
# -------------------------
|
||
|
def train_model():
|
||
|
"""
|
||
|
Scans the dataset folder and trains an LBPH face recognizer with images from each user.
|
||
|
The folder name is used as the user's label.
|
||
|
The trained model is saved to model_path.
|
||
|
Returns the mapping dictionary (id_to_name) for recognition.
|
||
|
"""
|
||
|
face_samples = []
|
||
|
labels = []
|
||
|
label_map = {} # Mapping from user name to a numerical ID
|
||
|
current_id = 0
|
||
|
|
||
|
# Walk through the dataset folder (each subfolder represents a user)
|
||
|
for user in os.listdir(dataset_dir):
|
||
|
user_path = os.path.join(dataset_dir, user)
|
||
|
if not os.path.isdir(user_path):
|
||
|
continue
|
||
|
|
||
|
if user not in label_map:
|
||
|
label_map[user] = current_id
|
||
|
current_id += 1
|
||
|
|
||
|
for filename in os.listdir(user_path):
|
||
|
if filename.endswith(".jpg") or filename.endswith(".png"):
|
||
|
image_path = os.path.join(user_path, filename)
|
||
|
img = cv2.imread(image_path, cv2.IMREAD_GRAYSCALE)
|
||
|
if img is None:
|
||
|
continue
|
||
|
face_samples.append(img)
|
||
|
labels.append(label_map[user])
|
||
|
|
||
|
if len(face_samples) == 0:
|
||
|
print("No face images found in dataset. Please enroll a user first.")
|
||
|
return None
|
||
|
|
||
|
recognizer = cv2.face.LBPHFaceRecognizer_create()
|
||
|
recognizer.train(face_samples, np.array(labels))
|
||
|
recognizer.save(model_path)
|
||
|
print(f"Model trained successfully and saved as '{model_path}'.")
|
||
|
|
||
|
# Create an id-to-name mapping (reverse of label_map)
|
||
|
id_to_name = {v: k for k, v in label_map.items()}
|
||
|
return id_to_name
|
||
|
|
||
|
# -------------------------
|
||
|
# Helper: Real-Time Recognition
|
||
|
# -------------------------
|
||
|
def recognize_faces():
|
||
|
"""
|
||
|
Runs real-time face recognition. If a face is not confidently recognized,
|
||
|
it is labeled as 'Unknown'. While running, press:
|
||
|
- 'q' to quit recognition.
|
||
|
- 'n' to enroll a new user (this stops recognition, enrolls the new user,
|
||
|
re-trains the model, and resumes recognition).
|
||
|
"""
|
||
|
if not os.path.exists(model_path):
|
||
|
print("No trained model found. Please run training first.")
|
||
|
return
|
||
|
|
||
|
# Load the recognizer model
|
||
|
recognizer = cv2.face.LBPHFaceRecognizer_create()
|
||
|
recognizer.read(model_path)
|
||
|
|
||
|
# Build a mapping dictionary from the dataset folder structure
|
||
|
label_map = {}
|
||
|
current_id = 0
|
||
|
for user in os.listdir(dataset_dir):
|
||
|
user_path = os.path.join(dataset_dir, user)
|
||
|
if not os.path.isdir(user_path):
|
||
|
continue
|
||
|
if user not in label_map:
|
||
|
label_map[user] = current_id
|
||
|
current_id += 1
|
||
|
id_to_name = {v: k for k, v in label_map.items()}
|
||
|
|
||
|
face_cascade = cv2.CascadeClassifier(face_cascade_path)
|
||
|
cap = cv2.VideoCapture(0)
|
||
|
|
||
|
print("Starting real-time recognition. Press 'q' to quit, 'n' to enroll a new user.")
|
||
|
|
||
|
while True:
|
||
|
ret, frame = cap.read()
|
||
|
if not ret:
|
||
|
print("Failed to grab frame.")
|
||
|
break
|
||
|
|
||
|
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
|
||
|
faces = face_cascade.detectMultiScale(gray, scaleFactor=1.3, minNeighbors=5)
|
||
|
|
||
|
for (x, y, w, h) in faces:
|
||
|
roi_gray = gray[y:y+h, x:x+w]
|
||
|
|
||
|
# Get prediction (label and confidence)
|
||
|
label_id, confidence = recognizer.predict(roi_gray)
|
||
|
# If confidence is above the threshold, mark as Unknown
|
||
|
if confidence > unknown_threshold:
|
||
|
name = "Unknown"
|
||
|
else:
|
||
|
name = id_to_name.get(label_id, "Unknown")
|
||
|
|
||
|
# Draw rectangle and label on the frame
|
||
|
cv2.rectangle(frame, (x, y), (x+w, y+h), (0, 255, 0), 2)
|
||
|
text = f"{name} ({confidence:.2f})"
|
||
|
cv2.putText(frame, text, (x, y - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.8, (0, 255, 0), 2)
|
||
|
|
||
|
cv2.imshow("Real-Time Recognition", frame)
|
||
|
key = cv2.waitKey(1) & 0xFF
|
||
|
|
||
|
if key == ord('q'):
|
||
|
break
|
||
|
elif key == ord('n'):
|
||
|
# Enroll a new user while recognition is running.
|
||
|
cap.release()
|
||
|
cv2.destroyAllWindows()
|
||
|
new_name = input("Enter the new person's name for enrollment: ")
|
||
|
enroll_user(new_name)
|
||
|
# Re-train the model after enrollment
|
||
|
mapping = train_model()
|
||
|
if mapping is None:
|
||
|
print("Error during training. Please enroll a user first.")
|
||
|
return
|
||
|
print("Model re-trained with new user. Resuming recognition...")
|
||
|
# Reopen capture and reinitialize recognizer and mapping
|
||
|
recognizer = cv2.face.LBPHFaceRecognizer_create()
|
||
|
recognizer.read(model_path)
|
||
|
label_map = {}
|
||
|
current_id = 0
|
||
|
for user in os.listdir(dataset_dir):
|
||
|
user_path = os.path.join(dataset_dir, user)
|
||
|
if not os.path.isdir(user_path):
|
||
|
continue
|
||
|
if user not in label_map:
|
||
|
label_map[user] = current_id
|
||
|
current_id += 1
|
||
|
id_to_name = {v: k for k, v in label_map.items()}
|
||
|
cap = cv2.VideoCapture(0)
|
||
|
|
||
|
cap.release()
|
||
|
cv2.destroyAllWindows()
|
||
|
|
||
|
# -------------------------
|
||
|
# Main Menu
|
||
|
# -------------------------
|
||
|
def main():
|
||
|
# Ensure the dataset directory exists
|
||
|
if not os.path.exists(dataset_dir):
|
||
|
os.makedirs(dataset_dir)
|
||
|
|
||
|
while True:
|
||
|
print("\n************ MAIN MENU ************")
|
||
|
print("1. Enroll (add images for a new/existing user)")
|
||
|
print("2. Train model")
|
||
|
print("3. Real-time face recognition")
|
||
|
print("4. Quit")
|
||
|
choice = input("Enter your choice (1-4): ").strip()
|
||
|
|
||
|
if choice == '1':
|
||
|
user_name = input("Enter the user's name (if already exists, new images will be added): ").strip()
|
||
|
enroll_user(user_name)
|
||
|
elif choice == '2':
|
||
|
mapping = train_model()
|
||
|
if mapping is not None:
|
||
|
print("Current user mapping (ID: Name):")
|
||
|
for id_val, name in mapping.items():
|
||
|
print(f"{id_val}: {name}")
|
||
|
elif choice == '3':
|
||
|
recognize_faces()
|
||
|
elif choice == '4':
|
||
|
print("Exiting program.")
|
||
|
break
|
||
|
else:
|
||
|
print("Invalid choice. Please select a valid option.")
|
||
|
|
||
|
if __name__ == "__main__":
|
||
|
main()
|