Spaces:
Runtime error
Runtime error
import streamlit as st | |
import torch | |
print("Torch version:", torch.__version__) | |
from transformers import pipeline | |
from diffusers import AnimateDiffPipeline, MotionAdapter, EulerDiscreteScheduler | |
from diffusers.utils import export_to_gif | |
from huggingface_hub import hf_hub_download | |
from safetensors.torch import load_file | |
from gtts import gTTS | |
from moviepy.editor import VideoFileClip, AudioFileClip | |
import os | |
# Load the text generation model | |
generator = pipeline('text-generation', model='distilgpt2') | |
def generate_text(prompt): | |
response = generator(prompt, max_length=150, num_return_sequences=1) | |
return response[0]['generated_text'] | |
def text_to_speech(text, filename='output_audio.mp3'): | |
tts = gTTS(text) | |
tts.save(filename) | |
return filename | |
def create_animation(prompt, output_file='animation.gif'): | |
device = "cuda" if torch.cuda.is_available() else "cpu" | |
dtype = torch.float16 if device == "cuda" else torch.float32 | |
step = 4 | |
repo = "ByteDance/AnimateDiff-Lightning" | |
ckpt = f"animatediff_lightning_{step}step_diffusers.safetensors" | |
base = "emilianJR/epiCRealism" | |
adapter = MotionAdapter().to(device, dtype) | |
adapter.load_state_dict(load_file(hf_hub_download(repo, ckpt), device=device)) | |
pipe = AnimateDiffPipeline.from_pretrained(base, motion_adapter=adapter, torch_dtype=dtype).to(device) | |
pipe.scheduler = EulerDiscreteScheduler.from_config(pipe.scheduler.config, timestep_spacing="trailing", beta_schedule="linear") | |
output = pipe(prompt=prompt, guidance_scale=1.0, num_inference_steps=step) | |
export_to_gif(output.frames[0], output_file) | |
return output_file | |
def create_video(animation_file, audio_file, output_file='output_video.mp4'): | |
clip = VideoFileClip(animation_file) | |
audio = AudioFileClip(audio_file) | |
clip = clip.set_audio(audio) | |
clip.write_videofile(output_file, fps=24) | |
return output_file | |
def generate_educational_video(prompt): | |
generated_text = generate_text(prompt) | |
audio_file = text_to_speech(generated_text) | |
animation_file = create_animation(prompt) | |
video_file = create_video(animation_file, audio_file) | |
return video_file | |
# Streamlit UI | |
st.title("Educational Video Generator") | |
prompt = st.text_input("Enter your prompt here:") | |
if st.button("Generate Video"): | |
if prompt: | |
st.write("Generating video, please wait...") | |
video_path = generate_educational_video(prompt) | |
if os.path.exists(video_path): | |
st.video(video_path) | |
else: | |
st.write("Video generation failed.") | |
else: | |
st.warning("Please enter a prompt.") | |