File size: 15,986 Bytes
ec6cb5e
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
ffc5475
ec6cb5e
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
# -*- coding: utf-8 -*-
"""ConfusedAutoShortVideoGen.ipynb

Automatically generated by Colab.

Original file is located at
    https://colab.research.google.com/drive/1qGRLgmJahs6-cNBhO_SIsz_yXKz2OqEW
"""

!pip install gradio
!pip install gradio_client
!pip install whisperx
!pip install pydub

## Menu
##script_writing.py

mscript_input = "what is depression"
mscript_music_input = "What is depression"
final_video_output = "final_video_output.mp4"
musicownpath = '/content/tmp1mbn3d3s.mp4'



import csv
import re
from datetime import datetime
from gradio_client import Client

# Initialize the client with the correct Hugging Face Space
client = Client("Abu1998/Meme_finder")

# Define the system message and input sentence
system_message = """Task: Act as a YouTube Shorts content writer.

Objective: Create engaging, catchy, and trendy scripts for YouTube Shorts videos that are brief, attention-grabbing, and optimized for viral potential.

Guidelines:

Each script should be 15-30 seconds long.
Use a hook in the first few seconds to capture viewers' attention.
Ensure the content is aligned with trending topics, challenges, or popular culture.
Incorporate humor, relatable scenarios, or strong emotions to resonate with the audience.
End with a clear call-to-action (CTA) like “Follow for more!” or a cliffhanger.
Example Flow:

User Input: “Write a script about the Monday blues.”
AI Output:
Script: "POV: It’s Monday morning, and you’re already done with the week. [Clip shows someone groggily hitting the snooze button, dragging themselves out of bed]. But wait… there’s coffee. And suddenly, everything’s okay! ☕✨ [Cut to a quick burst of energy with upbeat music]. If you’re just surviving till the weekend, hit that follow button for more relatable vibes!"
"""

# Define the user input (the sentence for which you want to find the main keyword)
user_input = mscript_input

# Make the API call with the specified parameters
result = client.predict(
    message=user_input,
    system_message=system_message,
    max_tokens=512,
    temperature=0.7,
    top_p=0.95,
    api_name="/chat"
)

# Extract the script from the result
script = result.strip()

# Function to split script into words
def split_into_words(script_text):
    words = re.findall(r'\w+', script_text)  # Find all words
    return words

# Convert the script to a list of words
words = split_into_words(script)


# Define the file names with timestamp
csv_file = f'updates.csv'
txt_file = f'script_output'

# Save to CSV
with open(csv_file, mode='w', newline='', encoding='utf-8') as file:
    writer = csv.writer(file)
    writer.writerow(['Content', 'Word'])  # Headers
    for word in words:
        writer.writerow([user_input, word])  # Write each word as a separate row

print(f"Script generated, split into words, and saved to {csv_file}.")

# Save to TXT
with open(txt_file, mode='w', encoding='utf-8') as file:
    file.write(script)

print(f"Script saved to {txt_file}.")

"""### audio_gen.py"""

# Install the gradio_client library


from gradio_client import Client
from google.colab import files
import shutil

# Initialize the client with the correct Hugging Face Space
client = Client("innoai/Edge-TTS-Text-to-Speech")

# Upload the script file
file_path = "/content/script_output"

# Read the content from the uploaded script file
with open(file_path, 'r', encoding='utf-8') as file:
    text_input = file.read().strip()  # Read and strip any extra whitespace

# Make the API call with the file content as input
result = client.predict(
    text=text_input,
    voice="en-US-AvaMultilingualNeural - en-US (Female)",  # You can change the voice as needed
    rate=0,  # You can adjust the speech rate if needed
    pitch=0,  # You can adjust the pitch if needed
    api_name="/predict"
)

# Check the result type and content
print(result)

# Extract the local file path from the result
audio_file_path = result[0]  # Assuming the audio file path is the first element

# Define the output file name and path
output_file_path = "/content/audio_output.mp3"

# Copy the file to the desired location
shutil.copy(audio_file_path, output_file_path)

# Provide download link for the generated audio file
#files.download(output_file_path)

"""###Music Gen"""


"""### Time Stamp"""

!pip install whisperx

import whisperx
import torch
import pandas as pd

# Initialize the WhisperX model
device = "cuda" if torch.cuda.is_available() else "cpu"
compute_type = "float32" if device == "cpu" else "float16"
model = whisperx.load_model("large-v2", device, compute_type=compute_type)

def transcribe_and_align(audio_file):
    # Load audio
    audio = whisperx.load_audio(audio_file)
    print("Audio loaded successfully.")

    # Transcribe
    result = model.transcribe(audio, batch_size=16)
    print("Transcription result:", result)

    # Align transcription
    model_a, metadata = whisperx.load_align_model(language_code=result["language"], device=device)
    result = whisperx.align(result["segments"], model_a, metadata, audio, device, return_char_alignments=True)
    print("Alignment result:", result)

    # Process segments to get word-level timestamps
    word_segments = []
    for segment in result["segments"]:
        for word_info in segment.get("words", []):  # Ensure 'words' is used
            if "word" in word_info and "start" in word_info and "end" in word_info:
                word_segments.append({
                    "word": word_info["word"],
                    "start": word_info["start"],
                    "end": word_info["end"],
                    "duration": word_info["end"] - word_info["start"]
                })

    # Debug: Print word segments to check if they are being populated
    print("Word segments:", word_segments)

    # Convert the word segments to a DataFrame
    df = pd.DataFrame(word_segments)

    # Save the result to a CSV file
    output_file = "/content/transcription_with_word_timestamps.csv"  # Ensure correct file path
    df.to_csv(output_file, index=False)

    return output_file

# Provide the path to your audio file
audio_file_path = "/content/audio_output.mp3"
# Transcribe and align the audio file
output_file = transcribe_and_align(audio_file_path)

# Print the path to the output file
print(f"Word-level transcription with timestamps saved to: {output_file}")

"""### common_words_remover"""

# prompt: write a code to drop these common words from output_file word column , COMMON_WORDS = {"the", "and", "is", "in", "to", "of", "a", "with", "for", "on", "it", "as", "at", "by", "an","this", "that", "which", "or", "be", "are", "was", "were", "has", "have", "had", "why", "such","here", "some", "so", "easy"}

import pandas as pd
def drop_common_words(input_file, output_file, common_words):
  """
  Drops rows containing common words in the 'word' column and saves the result to a new CSV file.

  Args:
    input_file (str): The path to the input CSV file.
    output_file (str): The path to the output CSV file.
    common_words (set): A set of common words to be removed.
  """
  df = pd.read_csv(input_file)
  df['word'] = df['word'].str.lower()  # Convert words to lowercase for comparison
  df = df[~df['word'].isin(common_words)]  # Filter out rows with common words
  df.to_csv(output_file, index=False)

# Set of common words to drop
COMMON_WORDS = {"the", "and", "is", "in", "to", "of", "a", "with", "for", "on", "it", "as", "at", "by", "an","this", "that", "which", "or", "be", "are", "was", "were", "has", "have", "had", "why", "such","here", "some", "so", "easy"}

# Input and output file paths
input_file = "/content/transcription_with_word_timestamps.csv"
output_file = "/content/filtered_transcription.csv"

# Call the function to drop common words
drop_common_words(input_file, output_file, COMMON_WORDS)

print(f"Rows with common words dropped and saved to {output_file}")

"""### common_words_remover 2nd step"""

import pandas as pd
from pydub import AudioSegment

def update_dataframe_with_audio_duration(csv_file, audio_file):
    # Load the CSV file into a DataFrame
    df = pd.read_csv(csv_file)

    # Calculate the total duration of the audio
    audio = AudioSegment.from_file(audio_file)
    total_duration = audio.duration_seconds

    # Drop existing 'end' and 'duration' columns
    df = df.drop(columns=['end', 'duration'], errors='ignore')

    # Create a new 'end' column with the next 'start' value
    df['end'] = df['start'].shift(-1)

    # The first row should start with 0.01
    df.loc[0, 'start'] = 0.01

    # The last row's 'end' should be the total audio duration
    df.loc[df.index[-1], 'end'] = total_duration

    # Create a new 'duration' column based on the difference between 'start' and 'end'
    df['duration'] = df['end'] - df['start']

    # Save the updated DataFrame back to CSV, extracting filename and prepending 'updated_'
    updated_csv_file = 'updated_' + csv_file.split('/')[-1] # Extract filename and prepend 'updated_'
    df.to_csv(updated_csv_file, index=False)

    print(f"Updated DataFrame saved to: {updated_csv_file}")
    return updated_csv_file

# Example usage
csv_file = '/content/filtered_transcription.csv'
audio_file = musicownpath
update_dataframe_with_audio_duration(csv_file, audio_file)

"""### **Giphy Gif Download**"""

# prompt: write a code for "/content/dropped_2024-08-21_18-58-34.csv" to use Word column search in giphy api (API_KEY = "KzPlVn6nz6czmjWpPEy6reL52r1H5gs7") search and download in /content/memes this folder name as the word name

import requests
import csv
import os

# Giphy API details
API_KEY = "KzPlVn6nz6czmjWpPEy6reL52r1H5gs7"
SEARCH_URL = "https://api.giphy.com/v1/gifs/search"

# CSV and download directory
CSV_FILE = "/content/updated_filtered_transcription.csv"
DOWNLOAD_DIR = '/content/memes2'

# Create download directory if it doesn't exist
os.makedirs(DOWNLOAD_DIR, exist_ok=True)

def download_giphy_gif(search_term, filename):
  """Downloads a GIF from Giphy based on the search term."""
  params = {
      'api_key': API_KEY,
      'q': search_term,
      'limit': 1
  }
  response = requests.get(SEARCH_URL, params=params)
  data = response.json()

  if data['data']:
    gif_url = data['data'][0]['images']['original']['url']
    gif_response = requests.get(gif_url)

    with open(os.path.join(DOWNLOAD_DIR, filename), 'wb') as f:
      f.write(gif_response.content)
    print(f"Downloaded GIF for '{search_term}' as '{filename}'")
  else:
    print(f"No GIF found for '{search_term}'")

# Process the CSV file
with open(CSV_FILE, 'r', encoding='utf-8') as file:
  reader = csv.DictReader(file)
  for row in reader:
    word = row['word']
    filename = f"{word}.gif"
    download_giphy_gif(word, filename)

import moviepy.editor as mpe
import os
import csv

# CSV and download directory paths
CSV_FILE = '/content/updated_filtered_transcription.csv'
DOWNLOAD_DIR = '/content/memes2'
OUTPUT_VIDEO = 'updated_concatenated_memes.mp4'

# Get the GIF order and durations from the CSV file
gif_order = []
durations = {}
with open(CSV_FILE, 'r', encoding='utf-8') as file:
    reader = csv.DictReader(file)
    for row in reader:
        gif_filename = row['word'] + '.gif'
        duration = float(row['duration'])  # Ensure this matches the column name in your CSV
        gif_order.append(gif_filename)
        durations[gif_filename] = duration

# Load, crop, and concatenate GIFs
clips = []
for gif_filename in gif_order:
    gif_path = os.path.join(DOWNLOAD_DIR, gif_filename)
    if os.path.exists(gif_path):
        clip = mpe.VideoFileClip(gif_path).resize(height=480)  # Resize to the same height
        clip = clip.set_fps(24)  # Match the frame rate for consistency

        # Crop each GIF to the specified duration from the new CSV
        max_duration = durations.get(gif_filename, clip.duration)  # Use the duration from the CSV or the full clip duration if not found
        if clip.duration > max_duration:
            clip = clip.subclip(0, max_duration)  # Keep up to the specified duration

        clips.append(clip)
    else:
        print(f"Warning: GIF not found: {gif_filename}")

# Concatenate and save the video
if clips:
    final_clip = mpe.concatenate_videoclips(clips, method="compose")
    final_clip.write_videofile(OUTPUT_VIDEO, fps=24)  # Set fps to match the GIFs
    print(f"Concatenated video saved as {OUTPUT_VIDEO}")
else:
    print("No GIFs found to concatenate.")

"""### concate_audio_gif_music"""

import moviepy.editor as mpe
import os

# File paths
video_file = '/content/updated_concatenated_memes.mp4'
music_file = musicownpath
audio_file = "/content/audio_output.mp3"
output_file = '/content/final_output.mp4'

# Load the video, music, and audio files
video_clip = mpe.VideoFileClip(video_file)
music_clip = mpe.VideoFileClip(music_file)
audio_clip = mpe.AudioFileClip(audio_file)

# Duration of the video
video_duration = video_clip.duration

# Ensure the music duration matches the video duration
if music_clip.duration < video_duration:
    # Repeat the music to match the video duration
    n_repeats = int(video_duration // music_clip.duration) + 1
    music_clip = mpe.concatenate_videoclips([music_clip] * n_repeats).subclip(0, video_duration)
elif music_clip.duration > video_duration:
    music_clip = music_clip.subclip(0, video_duration)

# Adjust music volume to 50% and keep audio volume at 100%
music_clip = music_clip.volumex(0.3)  # Reduce music volume to 50%

# Ensure the audio duration matches the video duration
if audio_clip.duration < video_duration:
    # Repeat the audio to match the video duration
    n_repeats = int(video_duration // audio_clip.duration) + 1
    audio_clip = mpe.concatenate_audioclips([audio_clip] * n_repeats).subclip(0, video_duration)
elif audio_clip.duration > video_duration:
    audio_clip = audio_clip.subclip(0, video_duration)

# Set the audio of the video clip to the adjusted audio
video_clip = video_clip.set_audio(audio_clip)

# Write the final output video with the adjusted music and audio
final_clip = video_clip.set_audio(music_clip.audio)
final_clip.write_videofile(output_file, codec='libx264', audio_codec='aac')

print(f"Final video saved as {output_file}")

import moviepy.editor as mpe
import os

# File paths
video_file = '/content/updated_concatenated_memes.mp4'
music_file = musicownpath
audio_file = "/content/audio_output.mp3"
output_file = '/content/final_output2.mp4'

# Load the video, music, and audio files
video_clip = mpe.VideoFileClip(video_file)
music_clip = mpe.VideoFileClip(music_file)
audio_clip = mpe.AudioFileClip(audio_file)

# Duration of the video
video_duration = video_clip.duration

# Ensure the music duration matches the video duration
if music_clip.duration < video_duration:
    # Repeat the music to match the video duration
    n_repeats = int(video_duration // music_clip.duration) + 1
    music_clip = mpe.concatenate_videoclips([music_clip] * n_repeats).subclip(0, video_duration)
elif music_clip.duration > video_duration:
    music_clip = music_clip.subclip(0, video_duration)

# Ensure the audio duration matches the video duration
if audio_clip.duration < video_duration:
    # Repeat the audio to match the video duration
    n_repeats = int(video_duration // audio_clip.duration) + 1
    audio_clip = mpe.concatenate_audioclips([audio_clip] * n_repeats).subclip(0, video_duration)
elif audio_clip.duration > video_duration:
    audio_clip = audio_clip.subclip(0, video_duration)

# Adjust music volume to 50% and keep audio volume at 100%
music_clip = music_clip.volumex(0.2)  # Reduce music volume to 50%

# Set the audio of the video clip to the adjusted audio
video_clip = video_clip.set_audio(audio_clip)

# Combine the video with adjusted music
final_audio = mpe.CompositeAudioClip([music_clip.audio, audio_clip])
final_clip = video_clip.set_audio(final_audio)

# Write the final output video
final_clip.write_videofile(output_file, codec='libx264', audio_codec='aac')

print(f"Final video saved as {output_file}")