Pendrokar commited on
Commit
f70eab2
β€’
1 Parent(s): 8871135

fastpitch synthesis fix

Browse files
Files changed (1) hide show
  1. app.py +3 -3
app.py CHANGED
@@ -16,14 +16,13 @@ commits = model_repo.list_repo_commits(repo_id=hf_model_name)
16
  latest_commit_sha = commits[0].commit_id
17
  hf_cache_models_path = f'/home/user/.cache/huggingface/hub/models--Pendrokar--xvapitch_nvidia/snapshots/{latest_commit_sha}/'
18
 
19
- print(hf_cache_models_path)
20
  commits = model_repo.list_repo_commits(repo_id='Pendrokar/xvasynth_lojban')
21
  latest_commit_sha = commits[0].commit_id
22
  hf_cache_lojban_models_path = f'/home/user/.cache/huggingface/hub/models--Pendrokar--xvasynth_lojban/snapshots/{latest_commit_sha}/'
23
- print(hf_cache_lojban_models_path)
24
  models_path = hf_cache_models_path
25
 
26
  current_voice_model = None
 
27
  base_speaker_emb = ''
28
 
29
  def load_model(voice_model_name):
@@ -52,6 +51,7 @@ def load_model(voice_model_name):
52
  try:
53
  json_data = xvaserver.loadModel(data)
54
  current_voice_model = voice_model_name
 
55
 
56
  with open(model_path + '.json', 'r', encoding='utf-8') as f:
57
  voice_model_json = json.load(f)
@@ -88,7 +88,7 @@ class LocalBlocksDemo(BlocksDemo):
88
  if (current_voice_model != voice):
89
  base_speaker_emb = load_model(voice)
90
 
91
- model_type = 'xVAPitch'
92
  pace = pacing if pacing else 1.0
93
  save_path = '/tmp/xvapitch_audio_sample.wav'
94
  language = lang
 
16
  latest_commit_sha = commits[0].commit_id
17
  hf_cache_models_path = f'/home/user/.cache/huggingface/hub/models--Pendrokar--xvapitch_nvidia/snapshots/{latest_commit_sha}/'
18
 
 
19
  commits = model_repo.list_repo_commits(repo_id='Pendrokar/xvasynth_lojban')
20
  latest_commit_sha = commits[0].commit_id
21
  hf_cache_lojban_models_path = f'/home/user/.cache/huggingface/hub/models--Pendrokar--xvasynth_lojban/snapshots/{latest_commit_sha}/'
 
22
  models_path = hf_cache_models_path
23
 
24
  current_voice_model = None
25
+ current_voice_type = None
26
  base_speaker_emb = ''
27
 
28
  def load_model(voice_model_name):
 
51
  try:
52
  json_data = xvaserver.loadModel(data)
53
  current_voice_model = voice_model_name
54
+ current_voice_type = model_type
55
 
56
  with open(model_path + '.json', 'r', encoding='utf-8') as f:
57
  voice_model_json = json.load(f)
 
88
  if (current_voice_model != voice):
89
  base_speaker_emb = load_model(voice)
90
 
91
+ model_type = current_voice_type
92
  pace = pacing if pacing else 1.0
93
  save_path = '/tmp/xvapitch_audio_sample.wav'
94
  language = lang