Spaces:
Runtime error
Runtime error
upload app files
Browse files- Dockerfile.txt +28 -0
- main.py +88 -0
- requirements.txt +10 -0
Dockerfile.txt
ADDED
@@ -0,0 +1,28 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Use the official Python 3.10 image
|
2 |
+
FROM python:3.10
|
3 |
+
|
4 |
+
# Set the working directory to /code
|
5 |
+
WORKDIR /code
|
6 |
+
|
7 |
+
# Copy the current directory contents into the container at /code
|
8 |
+
COPY ./requirements.txt /code/requirements.txt
|
9 |
+
|
10 |
+
# Install requirements.txt
|
11 |
+
RUN pip install --no-cache-dir --upgrade -r /code/requirements.txt
|
12 |
+
|
13 |
+
|
14 |
+
# Set up a new user named "user" with user ID 1000
|
15 |
+
RUN useradd -m -u 1000 user
|
16 |
+
# Switch to the "user" user
|
17 |
+
USER user
|
18 |
+
# Set home to the user's home directory
|
19 |
+
ENV HOME=/home/user \
|
20 |
+
PATH=/home/user/.local/bin:$PATH
|
21 |
+
|
22 |
+
# Set the working directory to the user's home directory
|
23 |
+
WORKDIR $HOME/app
|
24 |
+
|
25 |
+
# Copy the current directory contents into the container at $HOME/app setting the owner to the user
|
26 |
+
COPY --chown=user . $HOME/app
|
27 |
+
|
28 |
+
CMD ["uvicorn", "main:app", "--host", "0.0.0.0", "--port", "7860"]
|
main.py
ADDED
@@ -0,0 +1,88 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#uvicorn app789:app --host 0.0.0.0 --port 8000 --reload
|
2 |
+
|
3 |
+
from fastapi import FastAPI, UploadFile, Form
|
4 |
+
from fastapi.responses import HTMLResponse
|
5 |
+
import librosa
|
6 |
+
import io
|
7 |
+
import json
|
8 |
+
import requests
|
9 |
+
import textwrap3
|
10 |
+
|
11 |
+
import whisper
|
12 |
+
model = whisper.load_model("medium")
|
13 |
+
|
14 |
+
app = FastAPI()
|
15 |
+
|
16 |
+
# from faster_whisper import WhisperModel
|
17 |
+
# model_size = "medium"
|
18 |
+
# ts_model = WhisperModel(model_size, device="cpu", compute_type="int8")
|
19 |
+
|
20 |
+
@app.get("/")
|
21 |
+
def read_root():
|
22 |
+
html_form = """
|
23 |
+
<html>
|
24 |
+
<body>
|
25 |
+
<h2>Audio Transcription</h2>
|
26 |
+
<form action="/transcribe" method="post" enctype="multipart/form-data">
|
27 |
+
<label for="audio_file">Upload an audio file (MP3 or WAV):</label>
|
28 |
+
<input type="file" id="audio_file" name="audio_file" accept=".mp3, .wav" required><br><br>
|
29 |
+
<label for="language_select">Select Target Language:</label>
|
30 |
+
<select id="language_select" name="tgt_lang">
|
31 |
+
<option value="fr_XX">French</option>
|
32 |
+
<option value="es_XX">Spanish</option>
|
33 |
+
<option value="de_DE">German</option>
|
34 |
+
<option value="hi_IN">Hindi</option>
|
35 |
+
<option value="en_XX">English</option>
|
36 |
+
<option value="ja_XX">Japanese</option>
|
37 |
+
<option value="ne_NP">Nepali</option>
|
38 |
+
<option value="zh_CN">Chinese</option>
|
39 |
+
<option value="pt_XX">Portuguese</option>
|
40 |
+
<!-- Add more language options here -->
|
41 |
+
</select><br><br>
|
42 |
+
<input type="submit" value="Transcribe">
|
43 |
+
<input type="hidden" id="tgt_lang" name="tgt_lang" value="fr_XX">
|
44 |
+
</form>
|
45 |
+
</body>
|
46 |
+
</html>
|
47 |
+
<script>
|
48 |
+
document.getElementById("language_select").addEventListener("change", function () {
|
49 |
+
var selectedLanguage = this.value;
|
50 |
+
document.getElementById("tgt_lang").value = selectedLanguage;
|
51 |
+
});
|
52 |
+
</script>
|
53 |
+
"""
|
54 |
+
return HTMLResponse(content=html_form, status_code=200)
|
55 |
+
|
56 |
+
@app.post("/transcribe")
|
57 |
+
async def transcribe_audio(audio_file: UploadFile, tgt_lang: str = Form(...)):
|
58 |
+
audio_data = await audio_file.read()
|
59 |
+
|
60 |
+
audio_data, _ = librosa.load(io.BytesIO(audio_data), sr=16000)
|
61 |
+
result = model.transcribe(audio_data, task = "translate")
|
62 |
+
transcribed_text = result['text']
|
63 |
+
|
64 |
+
if tgt_lang == 'en_XX':
|
65 |
+
return transcribed_text
|
66 |
+
|
67 |
+
else:
|
68 |
+
chunks = textwrap3.wrap(transcribed_text, 100)
|
69 |
+
#segments, _ = ts_model.transcribe(audio_data, task="translate")
|
70 |
+
# lst = []
|
71 |
+
# for segment in segments:
|
72 |
+
# lst.append(segment.text)
|
73 |
+
|
74 |
+
headers = {"Authorization": f"Bearer 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"}
|
75 |
+
API_URL = "https://api-inference.huggingface.co/pipeline/translation/facebook/mbart-large-50-many-to-many-mmt"
|
76 |
+
|
77 |
+
def query(payload):
|
78 |
+
data = json.dumps(payload)
|
79 |
+
response = requests.request("POST", API_URL, headers=headers, data=data)
|
80 |
+
return json.loads(response.content.decode("utf-8"))
|
81 |
+
|
82 |
+
translated_text = ''
|
83 |
+
|
84 |
+
for i in chunks:
|
85 |
+
result = query({"inputs": i, "parameters": {"src_lang": "en_XX", "tgt_lang": tgt_lang}})
|
86 |
+
translated_text = translated_text + result[0]['translation_text']
|
87 |
+
|
88 |
+
return translated_text
|
requirements.txt
ADDED
@@ -0,0 +1,10 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
librosa
|
2 |
+
soundfile
|
3 |
+
fastapi
|
4 |
+
uvicorn
|
5 |
+
transformers
|
6 |
+
Torch
|
7 |
+
python-multipart
|
8 |
+
sentencepiece
|
9 |
+
textwrap3
|
10 |
+
git+https://github.com/openai/whisper.git
|