jspr commited on
Commit
511c7b4
1 Parent(s): c049bdf

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +7 -8
app.py CHANGED
@@ -4,10 +4,11 @@ from t2a import text_to_audio
4
  import joblib
5
  from sentence_transformers import SentenceTransformer
6
  import numpy as np
 
7
 
8
  reg = joblib.load('text_reg.joblib')
9
  model = SentenceTransformer('sentence-transformers/all-MiniLM-L6-v2')
10
- finetune = "davinci:ft-personal:autodrummer-v4-2022-11-01-22-44-58"
11
 
12
  def get_note_text(prompt):
13
  prompt = prompt + " ->"
@@ -24,24 +25,22 @@ def get_note_text(prompt):
24
  )
25
  return response.choices[0].text.strip()
26
 
27
- def get_drummer_output(prompt, openai_api_key):
28
- openai.api_key = openai_api_key
29
  note_text = get_note_text(prompt)
30
  # note_text = note_text + " " + note_text
31
- # note_text = "k n k n k n k n s n h n k n s n k n k n k n k n k n k n h n k n n"
32
  prompt_enc = model.encode([prompt])
33
  bpm = int(reg.predict(prompt_enc)[0]) + 20
34
  print(bpm, "bpm", "notes are", note_text)
35
  audio = text_to_audio(note_text, bpm)
36
- # audio to numpy
37
  audio = np.array(audio.get_array_of_samples(), dtype=np.float32)
38
  return (96000, audio)
39
 
40
  iface = gr.Interface(
41
  fn=get_drummer_output,
42
- inputs=["text", "text"],
43
  outputs="audio",
44
- title='Auto-drummer',
45
- description='Stable Diffusion for drum beats. Type in a genre and some descriptors to the prompt box, enter your OpenAI API key, and get a drum beat in that genre'
46
  )
47
  iface.launch()
 
4
  import joblib
5
  from sentence_transformers import SentenceTransformer
6
  import numpy as np
7
+ import os
8
 
9
  reg = joblib.load('text_reg.joblib')
10
  model = SentenceTransformer('sentence-transformers/all-MiniLM-L6-v2')
11
+ finetune = os.environ['finetune']
12
 
13
  def get_note_text(prompt):
14
  prompt = prompt + " ->"
 
25
  )
26
  return response.choices[0].text.strip()
27
 
28
+ def get_drummer_output(prompt):
29
+ openai.api_key = os.environ['key']
30
  note_text = get_note_text(prompt)
31
  # note_text = note_text + " " + note_text
 
32
  prompt_enc = model.encode([prompt])
33
  bpm = int(reg.predict(prompt_enc)[0]) + 20
34
  print(bpm, "bpm", "notes are", note_text)
35
  audio = text_to_audio(note_text, bpm)
 
36
  audio = np.array(audio.get_array_of_samples(), dtype=np.float32)
37
  return (96000, audio)
38
 
39
  iface = gr.Interface(
40
  fn=get_drummer_output,
41
+ inputs="text",
42
  outputs="audio",
43
+ title='Autodrummer',
44
+ description="Stable Diffusion for drum beats. Type in a genre and some descriptors (e.g., 'hiphop groove 808') to the prompt box and get a drum beat in that genre"
45
  )
46
  iface.launch()