Spaces:
Running
Running
Update apis/chat_api.py
Browse files- apis/chat_api.py +6 -50
apis/chat_api.py
CHANGED
@@ -143,50 +143,6 @@ class ChatAPIApp:
|
|
143 |
json_compatible_item_data = jsonable_encoder(item_response)
|
144 |
return JSONResponse(content=json_compatible_item_data)
|
145 |
|
146 |
-
class WhisperPostItem(BaseModel):
|
147 |
-
model: str = Field(
|
148 |
-
default="whisper-small",
|
149 |
-
description="(str) `Whisper model`",
|
150 |
-
)
|
151 |
-
lang: str = Field(
|
152 |
-
default="en",
|
153 |
-
description="(str) `transcribe to`",
|
154 |
-
)
|
155 |
-
audio_file: UploadFile = File(
|
156 |
-
description="(File) `Source Audio File`",
|
157 |
-
)
|
158 |
-
def whisper_transcribe(self, item:WhisperPostItem):
|
159 |
-
MODEL_MAP = {
|
160 |
-
"whisper-small": "openai/whisper-small",
|
161 |
-
"whisper-medium": "openai/whisper-medium",
|
162 |
-
"whisper-large": "openai/whisper-large",
|
163 |
-
"default": "openai/whisper-small",
|
164 |
-
}
|
165 |
-
if model in MODEL_MAP.keys():
|
166 |
-
target_model = item.model
|
167 |
-
else:
|
168 |
-
target_model = "default"
|
169 |
-
|
170 |
-
real_name = MODEL_MAP[target_model]
|
171 |
-
device = 0 if torch.cuda.is_available() else "cpu"
|
172 |
-
pipe = pipeline(
|
173 |
-
task="automatic-speech-recognition",
|
174 |
-
model=real_name,
|
175 |
-
chunk_length_s=30,
|
176 |
-
device=device,
|
177 |
-
)
|
178 |
-
time_start = time.time()
|
179 |
-
pipe.model.config.forced_decoder_ids = pipe.tokenizer.get_decoder_prompt_ids(language=item.lang, task="transcribe")
|
180 |
-
text = pipe(item.audio_file)["text"]
|
181 |
-
time_end = time.time()
|
182 |
-
item_response = {
|
183 |
-
"statue": 200,
|
184 |
-
"result": text,
|
185 |
-
"start": str(time_start),
|
186 |
-
"end": str(time_end)
|
187 |
-
}
|
188 |
-
json_compatible_item_data = jsonable_encoder(item_response)
|
189 |
-
return JSONResponse(content=json_compatible_item_data)
|
190 |
class TranslateAiPostItem(BaseModel):
|
191 |
model: str = Field(
|
192 |
default="t5-base",
|
@@ -300,11 +256,6 @@ class ChatAPIApp:
|
|
300 |
prefix + "/translate/ai",
|
301 |
summary="translate text with ai",
|
302 |
)(self.translate_ai_completions)
|
303 |
-
|
304 |
-
self.app.post(
|
305 |
-
prefix + "/transcribe",
|
306 |
-
summary="transcribe audio to text",
|
307 |
-
)(self.whisper_transcribe)
|
308 |
|
309 |
self.app.post(
|
310 |
prefix + "/detect",
|
@@ -356,7 +307,12 @@ app.add_middleware(
|
|
356 |
allow_methods=["*"],
|
357 |
allow_headers=["*"],
|
358 |
)
|
359 |
-
|
|
|
|
|
|
|
|
|
|
|
360 |
if __name__ == "__main__":
|
361 |
args = ArgParser().args
|
362 |
if args.dev:
|
|
|
143 |
json_compatible_item_data = jsonable_encoder(item_response)
|
144 |
return JSONResponse(content=json_compatible_item_data)
|
145 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
146 |
class TranslateAiPostItem(BaseModel):
|
147 |
model: str = Field(
|
148 |
default="t5-base",
|
|
|
256 |
prefix + "/translate/ai",
|
257 |
summary="translate text with ai",
|
258 |
)(self.translate_ai_completions)
|
|
|
|
|
|
|
|
|
|
|
259 |
|
260 |
self.app.post(
|
261 |
prefix + "/detect",
|
|
|
307 |
allow_methods=["*"],
|
308 |
allow_headers=["*"],
|
309 |
)
|
310 |
+
@app.post("/uploadfile/")
|
311 |
+
async def create_upload_file(
|
312 |
+
file: UploadFile = File(description="A file read as UploadFile"),
|
313 |
+
):
|
314 |
+
return {"filename": file.filename}
|
315 |
+
|
316 |
if __name__ == "__main__":
|
317 |
args = ArgParser().args
|
318 |
if args.dev:
|