Spaces:
Running
Running
File size: 1,172 Bytes
d0eea62 5d9a7ee 0c02385 e5b0c87 014c294 102b45f f88264c 014c294 5fe72be e26352f c4b957a 302eb17 e06a3df 014c294 6fd1262 014c294 5cfd1ca 014c294 e08cc47 48decfb 6fd1262 014c294 48decfb 6fd1262 014c294 e06a3df 014c294 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 |
import gradio as gr
import numpy as np
import torch
from huggingsound import SpeechRecognitionModel
from diffusers import StableDiffusionPipeline, DPMSolverMultistepScheduler
from transformers import pipeline
# Función para convertir la tasa de muestreo del audio de entrada
def modelo1(audio):
print(audio)
whisper = pipeline('automatic-speech-recognition', model='openai/whisper-medium', device=-1) # Cambia 'device' a -1 para usar la CPU
print(np.array(audio[1]))
text = whisper(np.array(audio[1]))
print(text["text"])
return text["text"]
def modelo2(text):
model_id = "stabilityai/stable-diffusion-2-1"
# Use the DPMSolverMultistepScheduler (DPM-Solver++) scheduler here instead
pipe = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.float32)
pipe.scheduler = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
image = pipe(text).images[0]
return image
def execution(audio):
modelo1res = modelo1(audio)
modelo2res = modelo2(modelo1res)
return modelo2res
if __name__ == "__main__":
demo = gr.Interface(fn=execution, inputs="audio", outputs="image")
demo.launch() |