-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathapi_server_http.py
More file actions
180 lines (149 loc) · 5.74 KB
/
api_server_http.py
File metadata and controls
180 lines (149 loc) · 5.74 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
import sys
import os
import time
import logging
import random
import torch
import cv2
import numpy as np
import mediapipe as mp
from fastapi import FastAPI, File, UploadFile
from fastapi.middleware.cors import CORSMiddleware
from pydantic import BaseModel
from torchvision import transforms
from PIL import Image
import io
# Setup paths to import from existing codebase
BASE_DIR = os.path.abspath(os.path.join(os.path.dirname(__file__), "Base test/Sign-Language-Recognition"))
sys.path.insert(0, BASE_DIR)
from utils import load_model, LabelMapper
# We need to import these specific functions.
# Since app/frame_utils.py is inside app package in BASE_DIR/app
from app.frame_utils import extract_hand_features_mask, draw_hand_features
# Configure logging
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger("ASL_API")
app = FastAPI(
title="ASL Real Inference API",
description="Server for real-time ASL hand sign detection using PyTorch model",
version="1.0.0"
)
# CORS
app.add_middleware(
CORSMiddleware,
allow_origins=["*"],
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"],
)
class SignPrediction(BaseModel):
letter: str
confidence: float
timestamp: float
clientTimestamp: float
handDetected: bool
# Global Resources
class DetectionResources:
def __init__(self):
self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
logger.info(f"Using device: {self.device}")
# Load Model
# Path relative to BASE_DIR
model_path = os.path.join(BASE_DIR, 'data/weights/asl_crop_v4_1_mobilenet_weights.pth')
if not os.path.exists(model_path):
# Try alternate path if generic name
model_path = os.path.join(BASE_DIR, 'data/weights/asl_crop_v4_0_mobilenet_weights.pth')
logger.info(f"Loading model from: {model_path}")
self.model = load_model(model_path, self.device)
self.model.eval()
# MediaPipe
self.hands = mp.solutions.hands.Hands(
static_image_mode=True, # Important for independent frames
max_num_hands=1,
min_detection_confidence=0.5
)
# Transform
self.transform = transforms.Compose([
transforms.ToPILImage(),
transforms.Resize((224, 224)),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
])
self.confidence_threshold = 0.5 # Match frontend CONFIDENCE_THRESHOLD
resources = None
@app.on_event("startup")
async def startup_event():
global resources
try:
resources = DetectionResources()
logger.info("ASL Inference Resources Loaded Successfully")
except Exception as e:
logger.error(f"Failed to load resources: {e}")
raise e
@app.get("/")
async def root():
return {"message": "ASL Real Inference API Ready"}
@app.post("/predict_frame")
async def predict_frame(file: UploadFile = File(...), client_timestamp: float = 0.0):
start_time = time.time()
# Read image
contents = await file.read()
nparr = np.frombuffer(contents, np.uint8)
frame = cv2.imdecode(nparr, cv2.IMREAD_COLOR)
if frame is None:
return SignPrediction(
letter="None",
confidence=0.0,
timestamp=time.time(),
clientTimestamp=client_timestamp,
handDetected=False
)
# Process with MediaPipe
# Note: MediaPipe expects RGB
frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
results = resources.hands.process(frame_rgb)
detected_letter = "None"
confidence = 0.0
hand_detected = False
if results.multi_hand_landmarks:
hand_detected = True
# Process first hand only
hand_landmarks = results.multi_hand_landmarks[0]
# Feature Extraction Logic (Mirrored from frame_utils.py)
# 1. Create black mask
orig_features_mask = np.zeros_like(frame)
# 2. Draw features on mask
extract_hand_features_mask(orig_features_mask, hand_landmarks)
# 3. Flip mask (Mirror effect for consistency with training)
mirror_features_mask = cv2.flip(orig_features_mask, 1)
# 4. Transform for model
# The model expects [Batch, Channel, Height, Width]
# We process both original and mirrored to find best match?
# api.py does max(orig, mirror). Let's do the same.
orig_input = resources.transform(orig_features_mask).unsqueeze(0).to(resources.device)
mirror_input = resources.transform(mirror_features_mask).unsqueeze(0).to(resources.device)
with torch.no_grad():
orig_output = resources.model(orig_input)
mirror_output = resources.model(mirror_input)
final_output = torch.max(orig_output, mirror_output)
# Apply softmax to get probabilities
probs = torch.nn.functional.softmax(final_output, dim=1)
conf_tensor, predicted_class = torch.max(probs, 1)
current_conf = conf_tensor.item()
if current_conf > resources.confidence_threshold:
detected_letter = LabelMapper.index_to_label(predicted_class.item())
confidence = current_conf
else:
detected_letter = "None"
confidence = current_conf
return SignPrediction(
letter=detected_letter,
confidence=confidence,
timestamp=time.time(),
clientTimestamp=client_timestamp,
handDetected=hand_detected
)
if __name__ == "__main__":
import uvicorn
# Clean up old processes if needed manually
uvicorn.run(app, host="0.0.0.0", port=8000)