yasserrmd commited on
Commit
85e4503
1 Parent(s): 08b1403

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +2 -8
app.py CHANGED
@@ -4,24 +4,18 @@ from wenet.cli.model import load_model
4
  from huggingface_hub import hf_hub_download
5
  import spaces
6
 
7
-
8
-
9
-
10
-
11
-
12
  REPO_ID = "Revai/reverb-asr"
13
  files = ['reverb_asr_v1.jit.zip', 'tk.units.txt']
14
  downloaded_files = [hf_hub_download(repo_id=REPO_ID, filename=f) for f in files]
15
  model = load_model(downloaded_files[0], downloaded_files[1])
16
 
17
-
18
-
19
  def process_cat_embs(cat_embs):
20
  device = "gpu"
21
  cat_embs = torch.tensor([float(c) for c in cat_embs.split(',')]).to(device)
22
  return cat_embs
23
 
24
 
 
25
  def recognition(audio, style=0):
26
  if not audio:
27
  return "Input Error! Please enter one audio!"
@@ -38,7 +32,7 @@ def recognition(audio, style=0):
38
 
39
  # Gradio UI Components
40
  inputs = [
41
- gr.Audio(source="microphone", type="filepath", label='Input audio'),
42
  gr.Slider(0, 1, value=0, label="Transcription Style", info="Adjust between non-verbatim (0) and verbatim (1) transcription")
43
  ]
44
 
 
4
  from huggingface_hub import hf_hub_download
5
  import spaces
6
 
 
 
 
 
 
7
  REPO_ID = "Revai/reverb-asr"
8
  files = ['reverb_asr_v1.jit.zip', 'tk.units.txt']
9
  downloaded_files = [hf_hub_download(repo_id=REPO_ID, filename=f) for f in files]
10
  model = load_model(downloaded_files[0], downloaded_files[1])
11
 
 
 
12
  def process_cat_embs(cat_embs):
13
  device = "gpu"
14
  cat_embs = torch.tensor([float(c) for c in cat_embs.split(',')]).to(device)
15
  return cat_embs
16
 
17
 
18
+ @spaces.GPU
19
  def recognition(audio, style=0):
20
  if not audio:
21
  return "Input Error! Please enter one audio!"
 
32
 
33
  # Gradio UI Components
34
  inputs = [
35
+ gr.Audio(type="filepath", label='Input audio'),
36
  gr.Slider(0, 1, value=0, label="Transcription Style", info="Adjust between non-verbatim (0) and verbatim (1) transcription")
37
  ]
38