Spaces:
Sleeping
Sleeping
File size: 1,755 Bytes
09ab406 114efae 7bcf8d7 09ab406 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 |
from fastapi import FastAPI, UploadFile, File, Form
from fastapi.responses import JSONResponse, FileResponse
import uvicorn
from pydantic import BaseModel
import numpy as np
import io
import soundfile as sf
from asr import transcribe, ASR_LANGUAGES
from tts import synthesize, TTS_LANGUAGES
from lid import identify
app = FastAPI(title="MMS: Scaling Speech Technology to 1000+ languages")
class TTSRequest(BaseModel):
text: str
language: str
speed: float
@app.post("/transcribe")
async def transcribe_audio(audio: UploadFile = File(...), language: str = Form(...)):
contents = await audio.read()
audio_array, sample_rate = sf.read(io.BytesIO(contents))
result = transcribe(audio_array, language)
return JSONResponse(content={"transcription": result})
@app.post("/synthesize")
async def synthesize_speech(request: TTSRequest):
audio, filtered_text = synthesize(request.text, request.language, request.speed)
# Convert numpy array to bytes
buffer = io.BytesIO()
sf.write(buffer, audio, 22050, format='wav')
buffer.seek(0)
return FileResponse(
buffer,
media_type="audio/wav",
headers={"Content-Disposition": "attachment; filename=synthesized_audio.wav"}
)
@app.post("/identify")
async def identify_language(audio: UploadFile = File(...)):
contents = await audio.read()
audio_array, sample_rate = sf.read(io.BytesIO(contents))
result = identify(audio_array)
return JSONResponse(content={"language_identification": result})
@app.get("/asr_languages")
async def get_asr_languages():
return JSONResponse(content=ASR_LANGUAGES)
@app.get("/tts_languages")
async def get_tts_languages():
return JSONResponse(content=TTS_LANGUAGES)
|