Spaces:
Runtime error
Runtime error
import gradio as gr | |
import openai, os, time, requests, io, config | |
import pandas as pd | |
import tempfile | |
import json | |
openai.api_key = config.OPENAI_API_KEY | |
###Prompt setup | |
name = 'Zac' | |
email = '[email protected]' | |
company = "CallConnect" | |
previous_notes = "this is the first call" | |
prompt = f"Who are you: you are a highly-skilled, cold-calling sales professional working with a sales company that helps companies make cold calls with AI, Call Connect.\ | |
your name is John Candy (only say your first name unless they ask). Who you are speaking to {name}, their email {email}, their company {company}, last call notes {previous_notes}. If the user says a different name ask for the person you're calling\ | |
your competitors: Currently no real competitors, but if the user brings up Sameday let them know that all same day does is have pre-recorded voice clips and we use AI to generate responses\ | |
Your goal: You are trying to get a lot of information about the user in about a 5-10min phone call. Do not ask for everything all at once, get to know the customer. you are trying to get the customer to see a demo of the AI cold calling software your company sells the software costs 2,500 a month as well as a 10000 onboarding fee. If they balk at this pricing let them know that the average SDR salary is 50000 a year before the commission, health insurance, and whatever else people get paid for. The software can do the following currently: Make cold calls, Pitch the company or product to the customer, Find the customer's pain, Get the best times for a demo, and Send a summary of the call to the sales rep who will be handling the next part of the sales process, and Have a conversation outside the scope of the product (make small talk, if needed).\ | |
Your goal is to push the person to a demo but you also need to find their pain points. Some of the common pain points are Cold calling is hard and take a lot of time and training for a rep to become good at it\ | |
, Sales reps are spending too much time on calling leads, sales reps More than likely won't hit 100 calls a day, If they get a lot of not interested in a row they start to question their job choice churn quickly, Might become flustered and forget the sales playbook and plan of action. Also, you need to ask the customer at the end of the call when are the three best times to book a demo with them later this week, if they cannot do it this week then push for early next week. If you do not know the answer to a question let the caller know that it is a great question but you will have to get back to them on that answer via email. If they start asking a lot of questions you do not know say something along the lines of these are great questions that can all be answered on the demo DO NOT DO: talk about pricing too much. get too into the weeds about features you are not sure about, makeup dates. The current date is March 9th, 2023. DO NOT GET OFF THE PHONE WITHOUT THE PAIN POINTS, HOW THEY CURRENTLY HANDLE COLD CALLING, HOW MANY SDRS THEY CURRENTLY HAVE, THEIR CURRENT EMAIL, AND A DEMO TIME. EVERY CALL MUST END WITH 'GOODBYE' OR 'BYE' AS THE LAST WORD WITH NO PUNCTUATION" | |
messages = [ | |
{"role": "system", "content": prompt}, | |
{"role": "system", "content": 'Only respond as John, wait for the user response before going further'}, | |
{"role": "system", "content": 'DO NOT use hyphens in response, nor symbols, replace all with appropriate meaningful words'}, | |
] | |
def respond(message): | |
completion = openai.ChatCompletion.create( | |
model="gpt-3.5-turbo", | |
messages=messages, | |
temperature = 0, | |
presence_penalty = 2, | |
frequency_penalty = 2 | |
) | |
print('Response: ', completion.choices[0].message.content) | |
return completion.choices[0].message.content | |
def create_audio(text): | |
new_url = f"https://api.elevenlabs.io/v1/text-to-speech/UhmVh8U8kVILeMXoBzCb/stream" | |
voice_settings = { | |
"stability": 0.4, | |
"similarity_boost": 0.8 | |
} | |
# create the request headers and body | |
headers = { | |
'xi-api-key': 'cef25a99905abc1ce8ddfa9c7c2a7e07', | |
'Content-Type': 'application/json' | |
} | |
body = { | |
'text': text, | |
'voice_settings': voice_settings | |
} | |
json_body = json.dumps(body) | |
# make the POST request to the API | |
print("making post request to 11 labs to make an audio") | |
posting = requests.post(new_url, headers=headers, data=json_body) | |
if posting.status_code == 200: | |
print('audio created, saving...') | |
# save the audio file to disk | |
aid = find_audio(text) | |
print(aid, "<---This is audio id after creation") | |
with open(f'audios/{aid}.wav', 'wb') as f: | |
f.write(posting.content) | |
print('audio saved') | |
else: | |
# print the error message | |
print(posting.text) | |
print(aid, "<---This is audio id after creation before returning") | |
return aid | |
def find_audio(text): | |
url = f"https://api.elevenlabs.io/v1/history" | |
headers = { | |
"accept": "*/*", | |
"xi-api-key": 'cef25a99905abc1ce8ddfa9c7c2a7e07', | |
"Content-Type": "application/json", | |
} | |
history = requests.get(url, headers=headers) | |
data = json.loads(history.content) | |
# Iterate over the history list and print the text field for each item | |
for item in data['history']: | |
if item["text"] == text: | |
result = item["history_item_id"] | |
break | |
else: | |
result = 0 | |
print(result) | |
return result | |
def summarize_call(chat_transcript): | |
prompt = f"Analyze the following text and Build a summary in a CRM format with key features: Lead name, figure out his email, pain points, current solutions used, whether the demo was scheduled and what time. And in the end include a short summary of the call form this transcript:\n{chat_transcript}" | |
response = openai.Completion.create( | |
engine="text-davinci-003", | |
prompt=prompt, | |
temperature=0, | |
max_tokens=1024, | |
top_p=1, | |
frequency_penalty=0, | |
presence_penalty=0 | |
) | |
summary = response.choices[0].text.strip() | |
return summary | |
chat_transcript = "" | |
## MAIN | |
def transcribe(audio_file_path): | |
global chat_transcript | |
with open(audio_file_path, "rb") as audio_file: | |
with tempfile.NamedTemporaryFile(mode='w+b', suffix='.wav') as temp_file: | |
temp_file.write(audio_file.read()) | |
temp_file.seek(0) | |
transcript = openai.Audio.transcribe("whisper-1", temp_file) | |
message = transcript['text'] | |
print(f"Transcribed message: {message}") | |
messages.append({"role": "user", "content": message}) | |
response = respond(message) | |
messages.append({"role": "assistant", "content": response}) | |
### Check for audio | |
audio_id = find_audio(response) | |
print('Audio ID:', audio_id) | |
if audio_id == 0: | |
print("Creating new audio...") | |
### GENERATE NEW AUDIO | |
audio_id = create_audio(response) | |
to_play = f'audios/{audio_id}.wav' | |
#audio_id = find_audio(response) | |
#to_play = get_audio(audio_id) | |
else: | |
to_play = f'audios/{audio_id}.wav'#f'audios/{audio_id}.wav' | |
chat_transcript += f'user: {message}\nassistant: {response}\n' | |
# Define the end to the conversation and write the call summary | |
if response.lower()[-1] in ['bye.', 'goodbye.', 'bye!', 'goodbye!']: | |
summary = summarize_call(chat_transcript) | |
# save the chat transcript to a file | |
folder_name = "Calls Made Today" | |
if not os.path.exists(folder_name): | |
os.makedirs(folder_name) | |
file_name = os.path.join(folder_name, "call_summary.txt") | |
with open(file_name, "w") as f: | |
f.write(chat_transcript) | |
f.write(f"\n\n And the summary:\n\ | |
{summary}") | |
return chat_transcript, to_play | |
ui = gr.Interface(fn=transcribe,live = True, inputs=[gr.Audio(source="microphone", type="filepath"),], title='Call Connect Automated Cold Calling AI', description = """This is a demo of what Call Connect's Cold Calling AI can do. Ask it a question and watch it respond""", outputs=["text", 'audio']) | |
ui.launch(debug=True, share=True) | |