ghuman7 commited on
Commit
5517e3a
1 Parent(s): 6caff85

Upload 5 files

Browse files
Files changed (5) hide show
  1. .gitignore +5 -0
  2. .vercelignore +4 -0
  3. app.py +25 -0
  4. requirements.txt +8 -0
  5. vercel.json +14 -0
.gitignore ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ /env
2
+ env
3
+ /__pycache__
4
+ /outputs
5
+ .vercel
.vercelignore ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ /env
2
+ env
3
+ /__pycache__
4
+ /outputs
app.py ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from fastapi import FastAPI, File, UploadFile
2
+ from modelscope.pipelines import pipeline
3
+ from modelscope.utils.constant import Tasks
4
+ import numpy as np
5
+
6
+ app = FastAPI()
7
+
8
+ mapper = ["angry", "disgust", "fear", "happy",
9
+ "neutral", "other", "sad", "surprised", "unknown"]
10
+
11
+ inference_pipeline = pipeline(
12
+ task=Tasks.emotion_recognition,
13
+ model="iic/emotion2vec_base_finetuned", model_revision="v2.0.4")
14
+
15
+
16
+ @app.post("/emotion_recognition")
17
+ async def emotion_recognition(audio_file: UploadFile = File(...)):
18
+ audio_bytes = await audio_file.read()
19
+ rec_result = inference_pipeline(
20
+ audio_bytes, output_dir="./outputs", granularity="utterance", extract_embedding=False)
21
+ max_emotion_score = np.argmax(rec_result[0]["scores"])
22
+ return {
23
+ "emotion": mapper[max_emotion_score],
24
+ "confidence":rec_result[0]["scores"][max_emotion_score]
25
+ }
requirements.txt ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ funasr
2
+ modelscope
3
+ fastapi
4
+ nest-asyncio
5
+ uvicorn
6
+ python-multipart
7
+ torchaudio
8
+ datasets==2.18.0
vercel.json ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "builds": [
3
+ {
4
+ "src": "app.py",
5
+ "use": "@vercel/python"
6
+ }
7
+ ],
8
+ "routes": [
9
+ {
10
+ "src": "/(.*)",
11
+ "dest": "app.py"
12
+ }
13
+ ]
14
+ }