Spaces:
Running
Running
File size: 1,109 Bytes
d0eea62 5d9a7ee e5b0c87 014c294 102b45f f88264c 014c294 7e4bb2d e06a3df 709dcd3 014c294 6fd1262 014c294 6fd1262 014c294 48decfb 6fd1262 014c294 48decfb 6fd1262 014c294 e06a3df 014c294 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 |
import gradio as gr
import numpy as np
from huggingsound import SpeechRecognitionModel
from diffusers import StableDiffusionPipeline, DPMSolverMultistepScheduler
from transformers import pipeline
# Función para convertir la tasa de muestreo del audio de entrada
def modelo1(audio):
whisper = pipeline('automatic-speech-recognition', model='openai/whisper-medium', device=0) # Cambia 'device' a -1 para usar la CPU
text = whisper('audio.mp3')
return text["text"]
def modelo2(text):
model_id = "stabilityai/stable-diffusion-2-1"
# Use the DPMSolverMultistepScheduler (DPM-Solver++) scheduler here instead
pipe = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.float16)
pipe.scheduler = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
pipe = pipe.to("cuda")
image = pipe(text).images[0]
return image
def execution(audio):
modelo1res = modelo1(audio)
modelo2res = modelo2(modelo1res)
return modelo2res
if __name__ == "__main__":
demo = gr.Interface(fn=execution, inputs="audio", outputs="image")
demo.launch() |