Spaces:
Sleeping
Sleeping
import gradio as gr | |
import json | |
import os | |
from datetime import datetime | |
from PIL import Image | |
import re | |
import asyncio | |
import edge_tts | |
import shutil | |
from gradio_client import Client | |
import subprocess | |
import uuid | |
import logging | |
# Set up logging | |
logging.basicConfig(level=logging.DEBUG) | |
logger = logging.getLogger(__name__) | |
# Ensure necessary directories exist | |
os.makedirs("images", exist_ok=True) | |
os.makedirs("temp", exist_ok=True) | |
os.makedirs("output", exist_ok=True) | |
def generate_conversation(message): | |
client = Client("Groq/demo-groq-tool-use") | |
result = client.predict( | |
message=message, | |
system_message="""always give response in this json format: | |
{ | |
"script": "short script here" | |
} | |
""", | |
api_name="/chat" | |
) | |
return result | |
def extract_json_content(response): | |
try: | |
match = re.search(r"'content': '(.+?)'(?=, 'metadata')", response, re.DOTALL) | |
if match: | |
json_str = match.group(1) | |
json_str = json_str.replace("\\'", "'") | |
content_json = json.loads(json_str) | |
return content_json | |
except Exception as e: | |
logger.error(f"Error extracting JSON content: {str(e)}") | |
return None | |
async def generate_voiceover(text, output_filename): | |
try: | |
tts = edge_tts.Communicate(text, voice="en-US-AvaNeural") | |
await tts.save(output_filename) | |
except Exception as e: | |
logger.error(f"Error generating: {str(e)}") | |
raise | |
def get_duration(file_path): | |
try: | |
cmd = ['ffprobe', '-v', 'quiet', '-print_format', 'json', '-show_format', '-show_streams', file_path] | |
result = subprocess.run(cmd, capture_output=True, text=True, check=True) | |
data = json.loads(result.stdout) | |
return float(data['format']['duration']) | |
except Exception as e: | |
logger.error(f"Error getting duration: {str(e)}") | |
raise | |
def combine_audio_video(video_file, audio_file, output_file): | |
try: | |
video_duration = get_duration(video_file) | |
audio_duration = get_duration(audio_file) | |
if video_duration > audio_duration: | |
trim_cmd = [ | |
'ffmpeg', '-y', '-i', video_file, '-t', str(audio_duration), | |
'-c:v', 'libx264', '-c:a', 'aac', f'{output_file}_trimmed.mp4' | |
] | |
subprocess.run(trim_cmd, check=True) | |
video_file = f'{output_file}_trimmed.mp4' | |
elif video_duration < audio_duration: | |
loop_count = int(audio_duration / video_duration) + 1 | |
loop_cmd = [ | |
'ffmpeg', '-y', '-stream_loop', str(loop_count), '-i', video_file, | |
'-c', 'copy', f'{output_file}_looped.mp4' | |
] | |
subprocess.run(loop_cmd, check=True) | |
video_file = f'{output_file}_looped.mp4' | |
combine_cmd = [ | |
'ffmpeg', '-y', '-i', video_file, '-i', audio_file, | |
'-c:v', 'copy', '-c:a', 'aac', '-map', '0:v:0', '-map', '1:a:0', | |
'-shortest', output_file | |
] | |
subprocess.run(combine_cmd, check=True) | |
except Exception as e: | |
logger.error(f"Error combining audio and video: {str(e)}") | |
raise | |
def add_background_to_image(image_path, output_path, video_width, video_height, background_color=(240, 240, 240)): | |
try: | |
with Image.open(image_path) as img: | |
# Create a new image with the background color | |
bg = Image.new('RGB', (video_width, video_height), color=background_color) | |
# Calculate the size for the original image (80% of the video width) | |
new_width = int(video_width * 0.8) | |
ratio = new_width / img.width | |
new_height = int(img.height * ratio) | |
# Resize the original image | |
img_resized = img.resize((new_width, new_height), Image.LANCZOS) | |
# Calculate position to paste the image (center) | |
position = ((video_width - new_width) // 2, (video_height - new_height) // 2) | |
# Paste the original image onto the background | |
bg.paste(img_resized, position, img_resized if img_resized.mode == 'RGBA' else None) | |
# Save the result | |
bg.save(output_path) | |
except Exception as e: | |
logger.error(f"Error adding background to image: {str(e)}") | |
raise | |
def create_ad_video(video_path, images_folder, output_path, num_images=5, image_duration=2, image_interval=3): | |
try: | |
# Get video dimensions | |
dimension_cmd = ['ffprobe', '-v', 'error', '-select_streams', 'v:0', | |
'-show_entries', 'stream=width,height', '-of', 'csv=s=x:p=0', video_path] | |
result = subprocess.run(dimension_cmd, capture_output=True, text=True, check=True) | |
video_width, video_height = map(int, result.stdout.strip().split('x')) | |
# Get video duration | |
duration_cmd = ['ffprobe', '-v', 'error', '-show_entries', 'format=duration', | |
'-of', 'default=noprint_wrappers=1:nokey=1', video_path] | |
result = subprocess.run(duration_cmd, capture_output=True, text=True, check=True) | |
video_duration = float(result.stdout.strip()) | |
# Prepare filter complex for overlaying images | |
filter_complex = [] | |
inputs = ['-i', video_path] | |
image_files = sorted([f for f in os.listdir(images_folder) if f.startswith('processed_image_')])[:num_images] | |
logger.debug(f"Found image files: {image_files}") | |
total_image_time = len(image_files) * (image_duration + image_interval) | |
if total_image_time > video_duration: | |
num_images = int(video_duration / (image_duration + image_interval)) | |
image_files = image_files[:num_images] | |
# Add input files and create overlays | |
for i, image_file in enumerate(image_files): | |
image_path = os.path.join(images_folder, image_file) | |
inputs.extend(['-i', image_path]) | |
start_time = 3 + i * (image_duration + image_interval) | |
if i == 0: | |
filter_complex.append(f"[0:v][{i+1}:v]overlay=enable='between(t,{start_time},{start_time + image_duration})'[v{i}];") | |
else: | |
filter_complex.append(f"[v{i-1}][{i+1}:v]overlay=enable='between(t,{start_time},{start_time + image_duration})'[v{i}];") | |
# Construct final filter complex string | |
filter_complex_str = ''.join(filter_complex) | |
if not filter_complex_str: | |
# If no images to overlay, just copy the video | |
cmd = ['ffmpeg', '-y', '-i', video_path, '-c', 'copy', output_path] | |
else: | |
# Remove the last semicolon | |
filter_complex_str = filter_complex_str.rstrip(';') | |
# Construct FFmpeg command with all inputs and filter complex | |
cmd = ['ffmpeg', '-y'] + inputs + [ | |
'-filter_complex', filter_complex_str, | |
'-map', f'[v{len(image_files)-1}]' if image_files else '[0:v]', | |
'-map', '0:a', | |
'-c:a', 'copy', | |
output_path | |
] | |
logger.debug(f"FFmpeg command: {' '.join(cmd)}") | |
# Execute FFmpeg command | |
result = subprocess.run(cmd, capture_output=True, text=True, check=True) | |
logger.info("FFmpeg command executed successfully") | |
return True | |
except subprocess.CalledProcessError as e: | |
logger.error(f"FFmpeg error: {e.stderr}") | |
raise | |
except Exception as e: | |
logger.error(f"Error in create_ad_video: {str(e)}") | |
raise | |
def create_video_ad(title, description, images, base_video, progress=gr.Progress()): | |
session_id = str(uuid.uuid4()) | |
session_folder = os.path.join("temp", session_id) | |
try: | |
os.makedirs(session_folder, exist_ok=True) | |
progress(0, desc="Saving data and processing images...") | |
# Step 1: Save data and process images | |
data = { | |
"title": title, | |
"description": description, | |
} | |
with open(os.path.join(session_folder, "scraped_data.json"), "w") as f: | |
json.dump(data, f, indent=4) | |
# Process video file | |
temp_video_path = os.path.join(session_folder, "temp_video.mp4") | |
if isinstance(base_video, gr.File): | |
shutil.copy(base_video.name, temp_video_path) | |
elif isinstance(base_video, str): | |
shutil.copy(base_video, temp_video_path) | |
else: | |
raise Exception("Unexpected type for base_video") | |
# Get video dimensions | |
dimension_cmd = ['ffprobe', '-v', 'error', '-select_streams', 'v:0', '-show_entries', 'stream=width,height', '-of', 'csv=s=x:p=0', temp_video_path] | |
video_width, video_height = map(int, subprocess.check_output(dimension_cmd).decode().strip().split('x')) | |
saved_image_paths = [] | |
for i, image_path in enumerate(images[:5]): # Limit to 5 images | |
original_image_path = os.path.join(session_folder, f"original_image_{i+1}.png") | |
Image.open(image_path).save(original_image_path) | |
processed_image_path = os.path.join(session_folder, f"processed_image_{i+1}.png") | |
add_background_to_image(original_image_path, processed_image_path, video_width, video_height) | |
saved_image_paths.append(processed_image_path) | |
progress(0.2, desc="Generating...") | |
# Step 2: Generate script | |
user_message = f"Create a very short simple text ad script for a product with title: '{title}' and description: '{description}'" | |
response_data = generate_conversation(user_message) | |
simplified_data = extract_json_content(str(response_data)) | |
if not simplified_data: | |
raise Exception("Failed to generate ad script") | |
if isinstance(simplified_data, dict): | |
script_content = simplified_data.get("script", "") | |
elif isinstance(simplified_data, str): | |
script_content = simplified_data | |
else: | |
raise Exception("Unexpected data type for script content") | |
if not script_content: | |
raise Exception("Generated script is empty") | |
with open(os.path.join(session_folder, "script.json"), "w") as outfile: | |
json.dump({"script": script_content}, outfile, indent=2) | |
progress(0.4, desc="Generating...") | |
# Step 3: Generate voiceover | |
voiceover_path = os.path.join(session_folder, "voiceover.mp3") | |
asyncio.run(generate_voiceover(script_content, voiceover_path)) | |
progress(0.6, desc="Processing...") | |
# Step 4: Handle video file (already done when getting dimensions) | |
progress(0.8, desc="Creating final...") | |
# Step 5: Combine audio and video, add images | |
combined_output = os.path.join(session_folder, "combined_video.mp4") | |
final_output = os.path.join("output", f"{title.replace(' ', '_')}_ad_{session_id}.mp4") | |
# Combine base video with audio | |
combine_audio_video(temp_video_path, voiceover_path, combined_output) | |
# Overlay images on the combined video | |
create_ad_video(combined_output, session_folder, final_output, num_images=len(saved_image_paths)) | |
progress(1.0, desc="Cleaning up...") | |
# Clean up temporary files | |
shutil.rmtree(session_folder) | |
return final_output | |
except Exception as e: | |
logger.error(f"Error in create_video_ad: {str(e)}") | |
if os.path.exists(session_folder): | |
shutil.rmtree(session_folder) | |
return f"Error occurred: {str(e)}" | |
# Define the Gradio interface | |
iface = gr.Interface( | |
fn=create_video_ad, | |
inputs=[ | |
gr.Textbox(label="Title"), | |
gr.Textbox(label="Description", lines=3), | |
gr.File(label="Upload (max 5)", file_count="multiple"), | |
gr.File(label="Upload") | |
], | |
outputs=gr.Video(label="Generated JSON"), | |
title="Json", | |
description="Enter json data" | |
) | |
# Launch the interface | |
if __name__ == "__main__": | |
iface.launch() |