Spaces:
Sleeping
Sleeping
File size: 5,991 Bytes
6b247ad 2c703f6 6b247ad 2c703f6 6b247ad e6a6a2e 0d700cd 29bd487 2c703f6 6b247ad 1a74f1c e6a6a2e 6b247ad e6a6a2e 6b247ad 2c703f6 6b247ad e6a6a2e 2c703f6 e6a6a2e 29bd487 2c703f6 e6a6a2e 2c703f6 e6a6a2e 0d700cd 1935411 0d700cd 1935411 0d700cd 1935411 0d700cd 1935411 e6a6a2e 0d700cd 456d6e3 0d700cd 456d6e3 e6a6a2e 1935411 6b247ad e6a6a2e 1935411 e6a6a2e 6b247ad e6a6a2e 6b247ad e6a6a2e 0d700cd 1935411 e6a6a2e 1935411 6b247ad e6a6a2e |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 |
import gradio as gr
from langchain.llms import OpenAI as LangChainOpenAI
from langchain.prompts import PromptTemplate
from langchain.chains import LLMChain
from openai import OpenAI
import os
import re
from pathlib import Path
from moviepy.editor import *
import requests
import tempfile
# Initialize OpenAI client
client = OpenAI()
# Create a prompt template
template = """
You are a creative story writer. Given a topic, write a short story of about 150 words.
Divide the story into 3 paragraphs. Each paragraph should be a distinct part of the story.
Topic: {topic}
Story:
"""
prompt = PromptTemplate(template=template, input_variables=["topic"])
# Create an LLMChain
llm = LangChainOpenAI(temperature=0.7)
story_chain = LLMChain(llm=llm, prompt=prompt)
def generate_image(prompt):
try:
response = client.images.generate(
model="dall-e-3",
prompt=prompt,
size="1024x1024",
quality="standard",
n=1,
)
return response.data[0].url
except Exception as e:
print(f"Error generating image: {e}")
return None
def generate_speech(text, filename):
try:
response = client.audio.speech.create(
model="tts-1",
voice="alloy",
input=text
)
response.stream_to_file(filename)
return filename
except Exception as e:
print(f"Error generating speech: {e}")
return None
def download_image(url, filename):
try:
response = requests.get(url)
with open(filename, 'wb') as f:
f.write(response.content)
return filename
except Exception as e:
print(f"Error downloading image: {e}")
return None
def create_video(paragraphs, image_files, audio_files, output_file):
clips = []
for paragraph, image_file, audio_file in zip(paragraphs, image_files, audio_files):
try:
audio_clip = AudioFileClip(audio_file) if audio_file else None
duration = audio_clip.duration if audio_clip else 5 # Default duration if no audio
if image_file:
image_clip = ImageClip(image_file).set_duration(duration)
else:
image_clip = ColorClip(size=(1024, 1024), color=(0,0,0)).set_duration(duration)
text_clip = TextClip(paragraph, fontsize=30, color='white', bg_color='black',
size=(1024, 200), method='caption').set_position(('center', 'bottom')).set_duration(duration)
composite_clip = CompositeVideoClip([image_clip, text_clip])
if audio_clip:
composite_clip = composite_clip.set_audio(audio_clip)
clips.append(composite_clip)
except Exception as e:
print(f"Error creating clip: {e}")
if clips:
final_clip = concatenate_videoclips(clips)
final_clip.write_videofile(output_file, fps=24)
return output_file
return None
def generate_story_with_video(topic):
try:
# Generate the story
story = story_chain.run(topic)
except Exception as e:
print(f"Error generating story: {e}")
story = "Failed to generate story."
# Split the story into paragraphs
paragraphs = re.split(r'\n\n', story.strip())
# Ensure we have exactly 3 paragraphs
paragraphs = paragraphs[:3]
while len(paragraphs) < 3:
paragraphs.append("...")
temp_dir = tempfile.mkdtemp()
image_files = []
audio_files = []
for i, paragraph in enumerate(paragraphs):
# Generate and download image
image_url = generate_image(paragraph)
if image_url:
image_file = os.path.join(temp_dir, f"image_{i}.png")
image_files.append(download_image(image_url, image_file))
else:
image_files.append(None)
# Generate audio
audio_file = os.path.join(temp_dir, f"audio_{i}.mp3")
audio_files.append(generate_speech(paragraph, audio_file))
# Create video
output_file = os.path.join(temp_dir, "story_video.mp4")
video_file = create_video(paragraphs, image_files, audio_files, output_file)
# Read the video file if it was created
video_data = None
if video_file and os.path.exists(video_file):
with open(video_file, "rb") as f:
video_data = f.read()
return story, video_data, image_files, audio_files
# Create the Gradio interface
def gradio_interface(topic):
story, video_data, image_files, audio_files = generate_story_with_video(topic)
outputs = [story]
if video_data:
outputs.append(video_data)
else:
outputs.append(None)
outputs.append("Failed to generate video.")
for i in range(3):
if i < len(image_files) and image_files[i]:
outputs.append(image_files[i])
else:
outputs.append(None)
if i < len(audio_files) and audio_files[i]:
outputs.append(audio_files[i])
else:
outputs.append(None)
return outputs
iface = gr.Interface(
fn=gradio_interface,
inputs=gr.Textbox(lines=2, placeholder="Enter a topic for the story..."),
outputs=[
gr.Textbox(label="Generated Story"),
gr.Video(label="Story Video"),
gr.Image(label="Image 1", type="filepath"),
gr.Audio(label="Audio 1", type="filepath"),
gr.Image(label="Image 2", type="filepath"),
gr.Audio(label="Audio 2", type="filepath"),
gr.Image(label="Image 3", type="filepath"),
gr.Audio(label="Audio 3", type="filepath"),
],
title="Story Generator with Video, Images, and Audio",
description="Enter a topic, and the AI will generate a short story with a video, images, and audio narration. If any part fails, you'll still see the successful parts."
)
# Launch the Gradio app
iface.launch() |