-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathmain.py
More file actions
280 lines (235 loc) · 11.7 KB
/
main.py
File metadata and controls
280 lines (235 loc) · 11.7 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
import cv2
import time
import os
import mediapipe as mp
import numpy as np
import random
import sys
from PIL import Image, ImageDraw,ImageFont
from PyQt5.QtWidgets import QApplication, QWidget, QVBoxLayout, QPushButton, QMessageBox
def choose_digit():
print("让我来分析分析你的手势!")
# 初始化 MediaPipe 手部检测
mp_hands = mp.solutions.hands
hands = mp_hands.Hands(max_num_hands=1)
mp_draw = mp.solutions.drawing_utils # 将识别到的手部关键点信息绘制到cv2图像中
# 视频设置
wCam, hCam = 648, 480
cap = cv2.VideoCapture(0)
cap.set(3, wCam)
cap.set(4, hCam)
# 图像叠加设置
folderPath = "image"
myList = os.listdir(folderPath)
print("图像列表:", myList)
overlayList = []
for imPath in myList:
image = cv2.imread(f'{folderPath}/{imPath}')
overlayList.append(image)
pTime = 0
while True:
success, img = cap.read()
# 转换图像以进行手部检测
imgRGB = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
results = hands.process(imgRGB)
gusture = 0
# 如果检测到手,绘制关键点和连线
if results.multi_hand_landmarks:
for hand_landmarks in results.multi_hand_landmarks:
mp_draw.draw_landmarks(img, hand_landmarks, mp_hands.HAND_CONNECTIONS)
print(hand_landmarks)
gusture = 0
p0_x = hand_landmarks.landmark[0].x
p0_y = hand_landmarks.landmark[0].y
p4_x = hand_landmarks.landmark[4].x
p4_y = hand_landmarks.landmark[4].y
distance_0_4 = ((p0_x - p4_x) ** 2 + (p0_y - p4_y) ** 2) ** 5
p5_x = hand_landmarks.landmark[5].x
p5_y = hand_landmarks.landmark[5].y
distance_0_5 = ((p0_x - p5_x) ** 2 + (p0_y - p5_y) ** 2) ** 5
base = distance_0_5 / 0.8
p6_x = hand_landmarks.landmark[6].x
p6_y = hand_landmarks.landmark[6].y
distance_0_6 = ((p0_x - p6_x) ** 2 + (p0_y - p6_y) ** 2) ** 5
p7_x = hand_landmarks.landmark[7].x
p7_y = hand_landmarks.landmark[7].y
distance_0_7 = ((p0_x - p7_x) ** 2 + (p0_y - p7_y) ** 2) ** 5
p11_x = hand_landmarks.landmark[11].x
p11_y = hand_landmarks.landmark[11].y
distance_0_11 = ((p0_x - p11_x) ** 2 + (p0_y - p11_y) ** 2) ** 5
p8_x = hand_landmarks.landmark[8].x
p8_y = hand_landmarks.landmark[8].y
distance_0_8 = ((p0_x - p8_x) ** 2 + (p0_y - p8_y) ** 2) ** 5
p9_x = hand_landmarks.landmark[9].x
p9_y = hand_landmarks.landmark[9].y
distance_0_9 = ((p0_x - p9_x) ** 2 + (p0_y - p9_y) ** 2) ** 5
p12_x = hand_landmarks.landmark[12].x
p12_y = hand_landmarks.landmark[12].y
distance_0_12 = ((p0_x - p12_x) ** 2 + (p0_y - p12_y) ** 2) ** 5
p16_x = hand_landmarks.landmark[16].x
p16_y = hand_landmarks.landmark[16].y
distance_0_16 = ((p0_x - p16_x) ** 2 + (p0_y - p12_y) ** 2) ** 5
p20_x = hand_landmarks.landmark[20].x
p20_y = hand_landmarks.landmark[20].y
distance_0_20 = ((p0_x - p20_x) ** 2 + (p0_y - p20_y) ** 2) ** 5
# 食指伸出代表1
if distance_0_8 >= base and distance_0_12 < base and distance_0_16 < base and distance_0_20 < base:
gusture = 1
# 食指、拇指伸出代表2
if distance_0_8 >= base and distance_0_12 >= base and distance_0_4 < base:
gusture = 2
# 食指、拇指、无名指伸出代表3
if distance_0_8 < base and distance_0_12 >= base and distance_0_16 >= base and distance_0_20 >= base:
gusture = 3
# 大拇指弯曲代表4
if distance_0_4 < base and distance_0_8 >= base and distance_0_12 >= base and distance_0_16 >= base and distance_0_20 >= base:
gusture = 4
# 所有手指全部伸出,代表5
if distance_0_4 >= base and distance_0_8 >= base and distance_0_12 >= base and distance_0_16 >= base and distance_0_20 >= base:
gusture = 5
# 伸出大拇指和小拇指,代表6
if distance_0_4 >= base and distance_0_8 < base and distance_0_12 < base and distance_0_16 < base and distance_0_20 >= base:
gusture = 6
# 伸出拇指、中指、食指,三指捏在一起,代表7
if distance_0_4 >= distance_0_7 and distance_0_4 >= distance_0_11 and distance_0_4 >= distance_0_20:
gusture = 7
# 伸出食指和拇指,代表8
if distance_0_4 >= base and distance_0_8 >= base and distance_0_12 < base and distance_0_16 < base and distance_0_20 < base:
gusture = 8
# 伸出食指,呈弯钩状,代表9
if distance_0_8 >= base and distance_0_8 <= distance_0_6 and distance_0_20<base and distance_0_16<base and distance_0_20<base:
gusture = 9
# 五指紧握,代表10
if distance_0_4 < base and distance_0_8 < base and distance_0_12 < base and distance_0_16 < base and distance_0_20 < base:
gusture = 10
#显示一下!!!测试!
if overlayList:
if gusture != 0:
gusture -= 1
h, w, c = overlayList[gusture].shape
img[0:h, 0:w] = overlayList[gusture]
# 计算并显示 FPS
cTime = time.time()
fps = 1 / (cTime - pTime)
pTime = cTime
cv2.putText(img, f'FPS: {int(fps)}', (400, 70), cv2.FONT_HERSHEY_PLAIN,
3, (255, 0, 0), 3)
cv2.imshow("Recognize Digit", img)
# 检测按键
if cv2.waitKey(1) & 0xFF == ord('q'):
break
def choose_guess():
print("来和我猜拳吧!")
cap = cv2.VideoCapture(0) # 获取摄像头拍摄实时视频流
# 是一个绘图模块,将识别到的手部关键点信息绘制到cv2图像中
mp_drawing = mp.solutions.drawing_utils
# solutions.hands 是Mediapipe中的手部识别模块,然后通过调用mp_solutions.Hands初始化手部识别类
mp_hands = mp.solutions.hands
hands = mp_hands.Hands(
static_image_mode=False, # 静态图片
max_num_hands=1, # 最大识别手的数量
min_detection_confidence=0.8, # 侦测置信值
min_tracking_confidence=0.8 # 追踪置信值
)
font_path = "C:\Windows\Fonts\STKAITI.TTF" # 确保字体文件的目录
font_size = 32
font = ImageFont.truetype(font_path, font_size)
# 手势存储在此
gesture = ["", "剪刀", "布", "石头"]
user = 0
sys = 0
flag = 0
while True:
# ret 代表有没有读取到照片(bool);frame表示截取到的一帧的图片的数据
ret, frame = cap.read()
frame = cv2.flip(frame, 1)
# cv2.cvColor(frame,cv2.COLOR_RGB2BGR)设置镜像同时修改颜色空间
# process()是手势识别最核心的方法,通过调用这个方法,将帧数据作为参数
# Mediapipe就会将手势的信息存入到results对象中
results = hands.process(cv2.cvtColor(frame, cv2.COLOR_BGR2RGB))
if results.multi_hand_landmarks:
for hand_landmarks in results.multi_hand_landmarks:
user = 0
p0_x = hand_landmarks.landmark[0].x
p0_y = hand_landmarks.landmark[0].y
p5_x = hand_landmarks.landmark[5].x
p5_y = hand_landmarks.landmark[5].y
distance_0_5 = ((p0_x - p5_x) ** 2 + (p0_y - p5_y) ** 2) ** 5
base = distance_0_5 / 0.8
p8_x = hand_landmarks.landmark[8].x
p8_y = hand_landmarks.landmark[8].y
distance_0_8 = ((p0_x - p8_x) ** 2 + (p0_y - p8_y) ** 2) ** 5
p12_x = hand_landmarks.landmark[12].x
p12_y = hand_landmarks.landmark[12].y
distance_0_12 = ((p0_x - p12_x) ** 2 + (p0_y - p12_y) ** 2) ** 5
p16_x = hand_landmarks.landmark[16].x
p16_y = hand_landmarks.landmark[16].y
distance_0_16 = ((p0_x - p16_x) ** 2 + (p0_y - p12_y) ** 2) ** 5
p20_x = hand_landmarks.landmark[20].x
p20_y = hand_landmarks.landmark[20].y
distance_0_20 = ((p0_x - p20_x) ** 2 + (p0_y - p20_y) ** 2) ** 5
# 食指和中指 剪刀
if distance_0_8 >= base and distance_0_12 >= base:
user = 1
# 食指、拇指、无名指、小拇指伸张 布
if distance_0_8 >= base and distance_0_12 >= base and distance_0_16 >= base and distance_0_20 >= base:
user = 2
# 食指、拇指、无名指、小拇指闭合 石头
if distance_0_8 < base and distance_0_12 < base and distance_0_16 < base and distance_0_20 < base:
user = 3
# 这地方用不了,要使用系统自带字体,调出字体路径
# cv2.putText(frame, "你:" + gesture[user], (50, 50), 0, 1, (0, 0, 255), 2)
####此处更改
# 使用Pillow绘制文本
img_pil = Image.fromarray(cv2.cvtColor(frame, cv2.COLOR_BGR2RGB))
draw = ImageDraw.Draw(img_pil)
draw.text((50, 25), "你: " + gesture[user], font=font, fill=(255, 0, 0))
##机器随机出手势
if user != 0 and flag != user:
sys = random.randint(1, 3)
sys_gets = gesture[sys]
if user == 0:
sys = 0
flag = user
draw.text((50, 70), "机器: " + gesture[sys], font=font, fill=(255, 0, 0))
##判断输赢
the_end = ["你输了", "平局", "你赢了"]
if user + 1 == sys or user - 2 == sys:
draw.text((50, 110), "结果: " + the_end[2], font=font, fill=(255, 0, 0))
elif user == sys:
draw.text((50, 110), "结果:" + the_end[1], font=font, fill=(255, 0, 0))
else:
draw.text((50, 110), "结果:" + the_end[0], font=font, fill=(255, 0, 0))
# 将Pillow图像转换回OpenCV格式
frame = cv2.cvtColor(np.array(img_pil), cv2.COLOR_RGB2BGR)
mp_drawing.draw_landmarks(frame, hand_landmarks, mp_hands.HAND_CONNECTIONS)
# 创建一名为“Mediapipe Hands"的窗口,并在窗口中显示Frame帧的内容
cv2.imshow('Guess Game', frame)
# cv2.waitKey(1)在1ms内根据键盘输入返回该按键的ASCII值,ord("q“)表示"q"的ASCII值;
if cv2.waitKey(1) == ord("q"):
break
# 创建应用和主窗口
app = QApplication(sys.argv)
window = QWidget()
window.setWindowTitle('选')
# 设置窗口的初始大小
window.resize(230, 200) # 参数分别是宽度和高度
# 创建布局和控件
layout = QVBoxLayout()
# 创建两个按钮
button_digit = QPushButton('数字识别')
button_digit.setFixedSize(180,60)
button_guess = QPushButton('猜拳')
button_guess.setFixedSize(180,60)
# 将按钮添加到布局
layout.addWidget(button_digit)
layout.addWidget(button_guess)
# 设置窗口的主布局
window.setLayout(layout)
# 绑定按钮点击事件
button_digit.clicked.connect(choose_digit)
button_guess.clicked.connect(choose_guess)
# 显示窗口
window.show()
# 运行应用
sys.exit(app.exec_())