-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathedge-tts2openAPI.py
More file actions
380 lines (334 loc) · 12.1 KB
/
edge-tts2openAPI.py
File metadata and controls
380 lines (334 loc) · 12.1 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
from fastapi import FastAPI, HTTPException, Request
from fastapi.responses import StreamingResponse
import edge_tts
import asyncio
from pydantic import BaseModel, Field
import uvicorn
import logging
from typing import Optional
# 配置详细日志
from starlette.responses import HTMLResponse
logging.basicConfig(
level=logging.DEBUG,
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s'
)
logger = logging.getLogger("TTS-Server")
app = FastAPI()
# OpenAI兼容请求模型
class TTSParameters(BaseModel):
model: str = "tts-1"
input: str
voice: str = "alloy"
response_format: Optional[str] = "mp3"
speed: Optional[float] = Field(1.0, ge=0.5, le=2.0)
volume: Optional[float] = Field(1.0, ge=0.5, le=3.0) # 新增音量参数
# 模型到语音配置的映射
MODEL_CONFIG = {
"tts-1": {
"quality": "standard",
"allowed_formats": ["mp3"],
"voice_map": {
"alloy": "en-US-GuyNeural",
"echo": "en-US-JennyNeural",
"nova": "zh-CN-YunxiNeural"
}
},
"tts-1-hd": {
"quality": "enhanced",
"allowed_formats": ["mp3"],
"voice_map": {
"alloy": "en-US-AriaNeural",
"echo": "en-US-DavisNeural",
"nova": "zh-CN-YunjianNeural"
}
}
}
async def write_audio(communicate, stdin):
"""将edge_tts的音频数据写入ffmpeg的stdin"""
try:
async for chunk in communicate.stream():
if chunk["type"] == "audio" and stdin is not None:
stdin.write(chunk["data"])
await stdin.drain()
if stdin is not None:
stdin.close()
await stdin.wait_closed()
except Exception as e:
logger.error(f"写入音频失败: {e}")
async def read_audio(stdout):
"""从ffmpeg的stdout读取处理后的音频数据"""
while True:
chunk = await stdout.read(4096)
if not chunk:
break
yield chunk
async def generate_edge_audio(text: str, config: dict, voice: str, speed: float, volume: float):
"""根据配置生成音频流"""
try:
# 获取真实语音名称
real_voice = config["voice_map"].get(voice.lower(), voice)
# 验证语音有效性
all_voices = await edge_tts.list_voices()
if not any(v["ShortName"] == real_voice for v in all_voices):
raise ValueError(f"无效语音: {real_voice}")
# 根据质量配置调整参数
rate = "+0%"
if config["quality"] == "enhanced":
speed = max(0.8, min(speed, 1.5))
rate = f"+{int((speed - 1) * 100)}%" if speed != 1.0 else "+0%"
communicate = edge_tts.Communicate(text, real_voice, rate=rate)
# 当需要调整音量时使用ffmpeg处理
if volume != 1.0:
# 创建ffmpeg进程
cmd = [
'ffmpeg',
'-i', 'pipe:0',
'-af', f'volume={volume}',
'-f', 'mp3',
'-loglevel', 'quiet',
'pipe:1'
]
proc = await asyncio.create_subprocess_exec(
*cmd,
stdin=asyncio.subprocess.PIPE,
stdout=asyncio.subprocess.PIPE,
stderr=asyncio.subprocess.PIPE
)
# 启动写入任务
writer_task = asyncio.create_task(
write_audio(communicate, proc.stdin)
)
# 直接读取处理后的音频流
try:
while True:
chunk = await proc.stdout.read(4096)
if not chunk:
break
yield chunk
finally:
# 清理资源
if proc.stdin:
proc.stdin.close()
await proc.wait()
writer_task.cancel()
try:
await writer_task
except asyncio.CancelledError:
pass
else:
# 直接返回原始音频流
async for chunk in communicate.stream():
if chunk["type"] == "audio":
yield chunk["data"]
except Exception as e:
logger.error(f"生成失败: {str(e)}")
raise
@app.post("/v1/audio/speech")
async def create_speech(request: TTSParameters):
try:
logger.debug(f"收到请求: {request.dict()}")
# 检查模型支持
if request.model not in MODEL_CONFIG:
raise HTTPException(400, detail=f"不支持的模型: {request.model}")
config = MODEL_CONFIG[request.model]
# 验证音频格式
if request.response_format not in config["allowed_formats"]:
raise HTTPException(400,
detail=f"模型{request.model}不支持格式: {request.response_format}")
# 获取语音映射
voice = request.voice.lower()
if voice not in config["voice_map"]:
raise HTTPException(400,
detail=f"模型{request.model}不支持语音: {request.voice}")
return StreamingResponse(
generate_edge_audio(
text=request.input,
config=config,
voice=voice,
speed=request.speed,
volume=request.volume
),
media_type=f"audio/{request.response_format}",
headers={
"Content-Disposition": "attachment; filename=speech.mp3",
"OpenAI-Processing-Ms": "800"
}
)
except HTTPException as he:
raise
except Exception as e:
logger.exception("服务器错误")
return {
"error": {
"message": str(e),
"type": "invalid_request_error",
"code": 500
}
}
# OpenAI兼容的语音列表接口
@app.get("/v1/voices")
async def list_voices():
return {
"data": [
{
"id": "alloy",
"name": "Alloy (EdgeTTS)",
"capacities": ["tts-1", "tts-1-hd"]
},
{
"id": "echo",
"name": "Echo (EdgeTTS)",
"capacities": ["tts-1", "tts-1-hd"]
},
{
"id": "nova",
"name": "Nova (EdgeTTS)",
"capacities": ["tts-1", "tts-1-hd"]
}
]
}
@app.get("/", response_class=HTMLResponse)
async def get_interface():
return f"""
<!DOCTYPE html>
<html>
<head>
<title>EdgeTTS 云希语音合成</title>
<style>
body {{
font-family: Arial, sans-serif;
max-width: 800px;
margin: 20px auto;
padding: 20px;
background-color: #f0f0f0;
}}
.container {{
background: white;
padding: 20px;
border-radius: 8px;
box-shadow: 0 2px 4px rgba(0,0,0,0.1);
}}
h1 {{
color: #2c3e50;
text-align: center;
}}
.control-group {{
margin: 15px 0;
}}
label {{
display: block;
margin-bottom: 5px;
color: #34495e;
}}
textarea {{
width: 100%;
height: 120px;
padding: 8px;
border: 1px solid #bdc3c7;
border-radius: 4px;
resize: vertical;
}}
input[type="range"] {{
width: 100%;
}}
button {{
background: #3498db;
color: white;
border: none;
padding: 10px 20px;
border-radius: 4px;
cursor: pointer;
transition: background 0.3s;
}}
button:hover {{
background: #2980b9;
}}
#audioPlayer {{
margin-top: 20px;
width: 100%;
}}
.speed-controls {{
display: grid;
grid-template-columns: repeat(2, 1fr);
gap: 15px;
margin: 20px 0;
}}
</style>
</head>
<body>
<div class="container">
<h1>EdgeTTS 云希语音合成</h1>
<div class="control-group">
<label for="textInput">输入要合成的文本:</label>
<textarea id="textInput" placeholder="请输入中文文本..."></textarea>
</div>
<div class="speed-controls">
<div class="control-group">
<label for="speed">语速 (0.5-2.0): <span id="speedValue">1.0</span></label>
<input type="range" id="speed" min="0.5" max="2.0" step="0.1" value="1.0">
</div>
<div class="control-group">
<label for="volume">音量 (0.5-3.0): <span id="volumeValue">1.0</span></label>
<input type="range" id="volume" min="0.5" max="3.0" step="0.1" value="1.0">
</div>
</div>
<button onclick="generateSpeech()">生成并播放语音</button>
<audio id="audioPlayer" controls></audio>
</div>
<script>
function updateSpeedValue() {{
document.getElementById('speedValue').textContent = document.getElementById('speed').value;
}}
function updateVolumeValue() {{
document.getElementById('volumeValue').textContent = document.getElementById('volume').value;
}}
document.getElementById('speed').addEventListener('input', updateSpeedValue);
document.getElementById('volume').addEventListener('input', updateVolumeValue);
async function generateSpeech() {{
const text = document.getElementById('textInput').value;
const speed = document.getElementById('speed').value;
const volume = document.getElementById('volume').value;
const audioPlayer = document.getElementById('audioPlayer');
if (!text) {{
alert('请输入要合成的文本');
return;
}}
try {{
const response = await fetch('/v1/audio/speech', {{
method: 'POST',
headers: {{
'Content-Type': 'application/json',
}},
body: JSON.stringify({{
input: text,
model: "tts-1",
voice: "nova",
speed: parseFloat(speed),
volume: parseFloat(volume)
}})
}});
if (!response.ok) {{
throw new Error(`HTTP error! status: ${{response.status}}`);
}}
const audioBlob = await response.blob();
const audioUrl = URL.createObjectURL(audioBlob);
audioPlayer.src = audioUrl;
audioPlayer.play();
}} catch (error) {{
console.error('Error:', error);
alert('生成语音失败: ' + error.message);
}}
}}
</script>
</body>
</html>
"""
if __name__ == "__main__":
logger.info("启动TTS服务 (端口 13241)...")
uvicorn.run(
app,
host="0.0.0.0",
port=13241,
log_config=None,
timeout_keep_alive=600
)