141 lines
4.7 KiB
Python
141 lines
4.7 KiB
Python
import cv2
|
|
import numpy as np
|
|
import time
|
|
|
|
# ----------------------------
|
|
# Configuration and Setup
|
|
# ----------------------------
|
|
|
|
# Paths to the YOLO files (update these if your files are in a different directory)
|
|
config_path = './yolov3.cfg'
|
|
weights_path = './yolov3.weights'
|
|
names_path = './coco.names'
|
|
|
|
# Load class names from coco.names file
|
|
with open(names_path, 'r') as f:
|
|
classes = [line.strip() for line in f.readlines()]
|
|
|
|
# Set up the neural network
|
|
net = cv2.dnn.readNetFromDarknet(config_path, weights_path)
|
|
# Optionally, set preferable backend and target to improve speed (e.g., use OpenCV's CUDA if available)
|
|
net.setPreferableBackend(cv2.dnn.DNN_BACKEND_OPENCV)
|
|
net.setPreferableTarget(cv2.dnn.DNN_TARGET_CPU) # Change to DNN_TARGET_CUDA if available
|
|
|
|
# Get all layer names from the network
|
|
layer_names = net.getLayerNames()
|
|
|
|
# Use .flatten() so that we always work with a 1D array of indices.
|
|
output_layers = [layer_names[i - 1] for i in net.getUnconnectedOutLayers().flatten()]
|
|
|
|
|
|
# Confidence and Non-max suppression thresholds
|
|
conf_threshold = 0.5 # Minimum probability to filter weak detections
|
|
nms_threshold = 0.4 # Non-maximum suppression threshold
|
|
|
|
# Colors for each class for bounding boxes (for visualization)
|
|
np.random.seed(42)
|
|
colors = np.random.randint(0, 255, size=(len(classes), 3), dtype='uint8')
|
|
|
|
|
|
# ----------------------------
|
|
# Object Detection Function
|
|
# ----------------------------
|
|
def detect_objects(frame):
|
|
"""
|
|
Process a frame to detect objects using YOLO.
|
|
Returns bounding boxes, confidences, and class IDs.
|
|
"""
|
|
height, width = frame.shape[:2]
|
|
# Create a blob from the input frame and perform a forward pass
|
|
blob = cv2.dnn.blobFromImage(frame, scalefactor=1/255.0, size=(416, 416),
|
|
swapRB=True, crop=False)
|
|
net.setInput(blob)
|
|
# Inference; YOLO returns predictions with shape (N, 85) for each detected object
|
|
start = time.time()
|
|
detections = net.forward(output_layers)
|
|
end = time.time()
|
|
|
|
# Uncomment to print inference time for debugging
|
|
# print(f"Inference time: {end - start:.2f} seconds")
|
|
|
|
boxes = []
|
|
confidences = []
|
|
class_ids = []
|
|
|
|
# Process each output layer's detections
|
|
for output in detections:
|
|
for detection in output:
|
|
# detection[0:4] are center_x, center_y, width and height; detection[5:] are class probabilities
|
|
scores = detection[5:]
|
|
class_id = np.argmax(scores)
|
|
confidence = scores[class_id]
|
|
if confidence > conf_threshold:
|
|
# Scale bounding box coordinates back to the size of the image
|
|
center_x = int(detection[0] * width)
|
|
center_y = int(detection[1] * height)
|
|
w = int(detection[2] * width)
|
|
h = int(detection[3] * height)
|
|
|
|
# Calculate the top-left coordinate of the bounding box
|
|
x = int(center_x - w / 2)
|
|
y = int(center_y - h / 2)
|
|
|
|
boxes.append([x, y, w, h])
|
|
confidences.append(float(confidence))
|
|
class_ids.append(class_id)
|
|
|
|
# Apply non-max suppression to remove overlapping boxes
|
|
indices = cv2.dnn.NMSBoxes(boxes, confidences, conf_threshold, nms_threshold)
|
|
final_boxes = []
|
|
final_confidences = []
|
|
final_class_ids = []
|
|
if len(indices) > 0:
|
|
for i in indices.flatten():
|
|
final_boxes.append(boxes[i])
|
|
final_confidences.append(confidences[i])
|
|
final_class_ids.append(class_ids[i])
|
|
|
|
return final_boxes, final_confidences, final_class_ids
|
|
|
|
|
|
# ----------------------------
|
|
# Main Function: Real-Time Object Detection
|
|
# ----------------------------
|
|
def main():
|
|
cap = cv2.VideoCapture(0) # Start the webcam
|
|
if not cap.isOpened():
|
|
print("Error: Could not open webcam.")
|
|
return
|
|
|
|
while True:
|
|
ret, frame = cap.read()
|
|
if not ret:
|
|
print("Failed to grab a frame.")
|
|
break
|
|
|
|
# Detect objects in the frame
|
|
boxes, confidences, class_ids = detect_objects(frame)
|
|
|
|
# Draw bounding boxes and labels on the frame
|
|
for i, box in enumerate(boxes):
|
|
x, y, w, h = box
|
|
color = [int(c) for c in colors[class_ids[i]]]
|
|
label = f"{classes[class_ids[i]]}: {confidences[i]:.2f}"
|
|
cv2.rectangle(frame, (x, y), (x + w, y + h), color, 2)
|
|
cv2.putText(frame, label, (x, y - 10), cv2.FONT_HERSHEY_SIMPLEX,
|
|
0.5, color, 2)
|
|
|
|
# Display the frame
|
|
cv2.imshow("Real-Time Object Detection", frame)
|
|
|
|
# Exit on pressing 'q'
|
|
if cv2.waitKey(1) & 0xFF == ord('q'):
|
|
break
|
|
|
|
cap.release()
|
|
cv2.destroyAllWindows()
|
|
|
|
|
|
if __name__ == "__main__":
|
|
main()
|