capradeepgujaran commited on
Commit
c08083d
1 Parent(s): dd791f7

Update openai_tts_tool.py

Browse files
Files changed (1) hide show
  1. openai_tts_tool.py +23 -36
openai_tts_tool.py CHANGED
@@ -1,42 +1,29 @@
1
  import openai
2
- import os
3
 
4
- # Ensure you load your API key from the environment or a .env file
5
- openai_api_key = os.getenv("OPENAI_API_KEY")
 
 
6
 
7
- # Ensure the OpenAI client is set up correctly
8
- client = openai.Client(api_key=openai_api_key)
 
9
 
10
- def generate_audio_and_text(api_key, text, model_name, voice_type, voice_speed, language, output_option, summary_length, additional_prompt):
11
- """
12
- Generate both text and audio based on the input parameters.
13
- - text: The text input for processing.
14
- - output_option: Either 'Text', 'Audio', or 'Both' for output.
15
- """
 
 
 
16
 
17
- # Placeholder for returning text (summary or full)
18
- generated_text = text
19
-
20
- # Generate audio only if audio is requested
21
- audio_path = None
 
22
 
23
- if output_option in ['Audio', 'Both']:
24
- audio_path = "output_audio.wav" # Define the output path for the audio
25
- try:
26
- response = client.audio.speech.create(
27
- model="tts-1-hd", # Use the updated model for TTS
28
- voice=voice_type,
29
- input=text
30
- )
31
- response.stream_to_file(audio_path)
32
- except Exception as e:
33
- return f"Error generating audio: {str(e)}", None, None
34
-
35
- # If 'Text' or 'Both' is selected, return the text as well
36
- if output_option in ['Text', 'Both']:
37
- if summary_length: # If the user specifies a summary, we would summarize the text here
38
- generated_text = f"Summarized text: {text[:summary_length]}..." # Placeholder for summary logic
39
- else:
40
- generated_text = text
41
-
42
- return generated_text, audio_path
 
1
  import openai
 
2
 
3
+ def generate_audio_and_text(api_key, input_text, model_name, voice_type, voice_speed, language, output_option, summary_length, additional_prompt):
4
+ # Set API key dynamically
5
+ openai.api_key = api_key
6
+ client = openai.OpenAI(api_key=api_key)
7
 
8
+ # Assuming text-to-speech and summarization logic goes here
9
+ if output_option in ["summary_text", "both"]:
10
+ text_summary = f"Generated summary for: {input_text[:100]}..." # Replace with real summary generation logic
11
 
12
+ if output_option in ["audio", "both"]:
13
+ response = client.audio.speech.create(
14
+ text=input_text,
15
+ model=model_name,
16
+ voice=voice_type,
17
+ language=language,
18
+ speed=voice_speed
19
+ )
20
+ audio_output = response['audio_file'] # Placeholder for the actual audio file output
21
 
22
+ if output_option == "summary_text":
23
+ return text_summary, None
24
+ elif output_option == "audio":
25
+ return None, audio_output
26
+ elif output_option == "both":
27
+ return text_summary, audio_output
28
 
29
+ return None, None