Spaces:
Running
Running
File size: 1,440 Bytes
d0eea62 e5b0c87 014c294 f88264c 61a318f f88264c 014c294 f88264c 709dcd3 76ff26c 709dcd3 014c294 6fd1262 014c294 6fd1262 014c294 48decfb 6fd1262 014c294 48decfb 6fd1262 014c294 48decfb 014c294 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 |
import gradio as gr
from huggingsound import SpeechRecognitionModel
from diffusers import StableDiffusionPipeline, DPMSolverMultistepScheduler
from pydub import AudioSegment
import io
# Función para convertir la tasa de muestreo del audio de entrada
def convert_sampling_rate(audio, target_sr=48000):
if isinstance(audio, tuple):
audio_data, sample_rate = audio
else:
audio_data = audio
audio = AudioSegment.from_wav(io.BytesIO(audio_data))
audio = audio.set_frame_rate(target_sr)
return audio.raw_data
def modelo1(audio):
# Convertir la tasa de muestreo del audio
audio = convert_sampling_rate(audio)
model = SpeechRecognitionModel("jonatasgrosman/wav2vec2-large-xlsr-53-english")
transcriptions = model.transcribe(audio)
return transcriptions
def modelo2(text):
model_id = "stabilityai/stable-diffusion-2-1"
# Use the DPMSolverMultistepScheduler (DPM-Solver++) scheduler here instead
pipe = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.float16)
pipe.scheduler = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
pipe = pipe.to("cuda")
image = pipe(text).images[0]
return image
def execution(audio):
modelo1res = modelo1(audio)
modelo2res = modelo2(modelo1res)
return modelo2res
if __name__ == "__main__":
demo = gr.Interface(fn=execution, inputs="audio", outputs="image")
demo.launch() |