Spaces:
Running
on
CPU Upgrade
Running
on
CPU Upgrade
File size: 3,919 Bytes
222e3bd e935ff6 ea6a933 c79df46 2ddb634 2e101cc 222e3bd c79df46 2ddb634 2e101cc 2ddb634 f67cac3 2ddb634 9dcf11b f67cac3 9dcf11b f67cac3 0e7dc9e 37132c3 0e7dc9e f67cac3 2ddb634 4ea5474 5c4653b 2e101cc 5c4653b 63b454b 5c4653b 222e3bd ea6a933 222e3bd ea6a933 2ddb634 ea6a933 63b454b b8a1b21 a9d71f0 222e3bd 4ea5474 2ddb634 4ea5474 222e3bd 4ea5474 2ddb634 222e3bd 2ddb634 63b454b 4ea5474 2ddb634 4ea5474 2ddb634 63b454b 2ddb634 a9d71f0 63b454b 2ddb634 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 |
import os
import sys
import time
import requests
from subprocess import Popen, PIPE
import threading
# from huggingface_hub import hf_hub_download
import gradio as gr
def run_xvaserver():
try:
# start the process without waiting for a response
print('Running xVAServer subprocess...\n')
xvaserver = Popen(['python', 'server.py'], stdout=PIPE, stderr=PIPE, universal_newlines=True)
except:
print('Could not run xVASynth.')
sys.exit(0)
# Wait for a moment to ensure the server starts up
time.sleep(10)
# Check if the server is running
if xvaserver.poll() is not None:
print("Web server failed to start.")
sys.exit(0)
# contact local xVASynth server; ~2 second timeout
print('Attempting to connect to xVASynth...')
response = requests.get('http://0.0.0.0:8008')
response.raise_for_status() # If the response contains an HTTP error status code, raise an exception
print('xVAServer running on port 8008')
# Read and print stdout and stderr of the subprocess
while True:
output = xvaserver.stdout.readline()
if output == '' and xvaserver.poll() is not None:
break
if output:
print(output.strip())
error = xvaserver.stderr.readline()
if error == '' and xvaserver.poll() is not None:
break
if error:
print(error.strip(), file=sys.stderr)
# Wait for the process to exit
xvaserver.wait()
def load_model():
model_name = "Pendrokar/TorchMoji"
# model_path = hf_hub_download(repo_id=model_name, filename="ccby_nvidia_hifi_6670_M.pt")
# model_json_path = hf_hub_download(repo_id=model_name, filename="ccby_nvidia_hifi_6670_M.json")
model_path = '/tmp/hfcache/models--Pendrokar--xvapitch_nvidia_6670/snapshots/2e138a7c459fb1cb1182dd7bc66813f5325d30fd/ccby_nvidia_hifi_6670_M.pt'
model_json_path = '/tmp/hfcache/models--Pendrokar--xvapitch_nvidia_6670/snapshots/2e138a7c459fb1cb1182dd7bc66813f5325d30fd/ccby_nvidia_hifi_6670_M.json'
os.symlink(model_path, os.path.join('./models/ccby/', os.path.basename(model_path)))
os.symlink(model_json_path, os.path.join('./models/ccby/', os.path.basename(model_json_path)))
model_type = 'xVAPitch'
language = 'en'
data = {
'outputs': None,
'version': '3.0',
'model': 'ccby/ccby_nvidia_hifi_6670_M',
'modelType': model_type,
'base_lang': language,
'pluginsContext': '{}',
}
response = requests.post('http://0.0.0.0:8008/loadModel', json=data)
response.raise_for_status() # If the response contains an HTTP error status code, raise an exception
return
def predict(input, pacing):
model_type = 'xVAPitch'
line = 'Test'
pace = pacing if pacing else 1.0
save_path = 'test.wav'
language = 'en'
base_speaker_emb = []
use_sr = 0
use_cleanup = 0
data = {
'modelType': model_type,
'sequence': line,
'pace': pace,
'outfile': save_path,
'vocoder': 'n/a',
'base_lang': language,
'base_emb': base_speaker_emb,
'useSR': use_sr,
'useCleanup': use_cleanup,
}
response = requests.post('http://0.0.0.0:8008/synthesize', json=data)
# response.raise_for_status() # If the response contains an HTTP error status code, raise an exception
print('server.log contents:')
with open('server.log', 'r') as f:
print(f.read())
return 22100, os.open(save_path, "rb")
input_textbox = gr.Textbox(
label="Input Text",
lines=1,
autofocus=True
)
slider = gr.Slider(0.0, 2.0, value=1.0, step=0.1, label="Pacing")
gradio_app = gr.Interface(
predict,
[
input_textbox,
slider
],
outputs= "audio",
title="xVASynth (WIP)",
)
if __name__ == "__main__":
# Run the web server in a separate thread
web_server_thread = threading.Thread(target=run_xvaserver)
print('Starting xVAServer thread')
web_server_thread.start()
# load default voice model
# load_model()
# predicted = predict('test', 1.0)
# print(predicted)
print('running Gradio interface')
gradio_app.launch()
# Wait for the web server thread to finish (shouldn't be reached in normal execution)
web_server_thread.join()
|