gpt-omni commited on
Commit
6683bb4
1 Parent(s): 24fa4e7

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +1 -8
app.py CHANGED
@@ -13,20 +13,13 @@ from inference import OmniInference
13
 
14
  device = "cuda" if torch.cuda.is_available() else "cpu"
15
  omni_client = OmniInference('./checkpoint', device)
16
-
17
 
18
  OUT_CHUNK = 4096
19
  OUT_RATE = 24000
20
  OUT_CHANNELS = 1
21
 
22
 
23
- @spaces.GPU
24
- def warmup():
25
- omni_client.warm_up()
26
-
27
- warmup()
28
-
29
- @spaces.GPU
30
  def process_audio(audio):
31
  filepath = audio
32
  print(f"filepath: {filepath}")
 
13
 
14
  device = "cuda" if torch.cuda.is_available() else "cpu"
15
  omni_client = OmniInference('./checkpoint', device)
16
+ omni_client.warm_up()
17
 
18
  OUT_CHUNK = 4096
19
  OUT_RATE = 24000
20
  OUT_CHANNELS = 1
21
 
22
 
 
 
 
 
 
 
 
23
  def process_audio(audio):
24
  filepath = audio
25
  print(f"filepath: {filepath}")