-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathapp.py
More file actions
126 lines (104 loc) · 5.22 KB
/
app.py
File metadata and controls
126 lines (104 loc) · 5.22 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
# Face similarity checker using InsightFace for real-time comparison
import cv2
from insightface.app import FaceAnalysis
from scipy.spatial.distance import cosine
import streamlit as st
from streamlit_webrtc import webrtc_streamer
import av
import numpy as np
# Configure Streamlit page layout
st.set_page_config(
page_title="Face Similarity Check",
layout="wide",
initial_sidebar_state="expanded"
)
st.title("Face Similarity Check")
# Initialize InsightFace model for face detection and embedding extraction
app = FaceAnalysis(name="buffalo_sc") # Buffalo model for face recognition
app.prepare(ctx_id=0) # Use GPU (0) for faster processing, CPU would be -1
# Create two-column layout: left for ID upload, right for live feed
col1,col2 = st.columns([1,4], gap="large")
with col1:
st.header("Extract Face from ID Card")
uploaded_file = st.file_uploader("Upload your ID Card", type=["jpg", "jpeg", "png"])
if uploaded_file is not None:
# Convert uploaded file bytes to OpenCV image
file_bytes = np.asarray(bytearray(uploaded_file.read()), dtype=np.uint8)
reference_img = cv2.imdecode(file_bytes, 1) # Decode as color image
# Extract face embeddings from reference image using InsightFace
reference_faces = app.get(reference_img)
if len(reference_faces) == 0:
st.error("No face found in reference image!")
st.stop()
# Store the first detected face embedding as reference for comparison
reference_embedding = reference_faces[0].normed_embedding
st.success("Reference image loaded successfully!")
# Convert to grayscale for Haar cascade face detection
gray = cv2.cvtColor(reference_img, cv2.COLOR_BGR2GRAY)
# Use Haar cascade classifier for face detection (for cropping display)
faceCascade = cv2.CascadeClassifier(cv2.data.haarcascades + "haarcascade_frontalface_default.xml")
faces = faceCascade.detectMultiScale(
gray,
scaleFactor=1.3, # Image pyramid scale factor
minNeighbors=3, # Minimum neighbors for detection
minSize=(80, 80) # Minimum face size
)
# Crop face region with padding for better visualization
for (x, y, w, h) in faces:
buffer_top = int(h * 0.3) # 30% padding above for hair
buffer_bottom = int(h * 0.3) # 30% padding below for chin/neck
buffer_sides = int(w * 0.2) # 20% padding on each side
# Calculate crop boundaries with image bounds checking
y_start = max(0, y - buffer_top)
y_end = min(reference_img.shape[0], y + h + buffer_bottom)
x_start = max(0, x - buffer_sides)
x_end = min(reference_img.shape[1], x + w + buffer_sides)
roi_color = reference_img[y_start:y_end, x_start:x_end]
# Convert BGR to RGB for proper Streamlit display
roi_color_rgb = cv2.cvtColor(roi_color, cv2.COLOR_BGR2RGB)
st.image(roi_color_rgb, caption="Extracted Face")
with col2:
st.header("Live Camera Feed")
if uploaded_file is not None:
def video_frame_callback(frame: av.VideoFrame):
# Convert video frame to OpenCV image format
img = frame.to_ndarray(format="bgr24")
# Detect faces in current video frame
faces = app.get(img)
for face in faces:
# Get face bounding box coordinates
bbox = face.bbox.astype(int)
x1, y1, x2, y2 = bbox
# Extract embedding from current face and compare with reference
current_embedding = face.normed_embedding
similarity = 1 - cosine(reference_embedding, current_embedding)
cosine_dist = 1 - similarity
# Determine match based on similarity threshold (0.6 = 60% similarity)
if cosine_dist < 0.6:
color = (0, 255, 0) # Green for match
label = f"MATCH: {similarity:.2f}"
else:
color = (0, 0, 255) # Red for no match
label = f"NO MATCH: {similarity:.2f}"
# Draw bounding box and similarity score on video frame
cv2.rectangle(img, (x1, y1), (x2, y2), color, 2)
cv2.putText(img, label, (x1, y1-10), cv2.FONT_HERSHEY_SIMPLEX, 0.7, color, 2)
return av.VideoFrame.from_ndarray(img, format="bgr24")
# Start real-time video stream with face recognition
webrtc_streamer(
key="face_recognition_stream",
video_frame_callback=video_frame_callback,
sendback_audio=False,
media_stream_constraints={
"video":{
"width": 720,
"height": 200,
},
"audio": False
}
)
else:
st.info("Please upload your ID Card")
# Show warning if no file uploaded
if uploaded_file is None:
st.warning("Please upload your ID Card")