import streamlit as st
from streamlit_option_menu import option_menu
from streamlit_chat import message
import openai
# from openai import OpenAI
import requests
import json
import streamlit.components.v1 as components
import webbrowser
import pickle
import random
from streamlit_pills import pills
from pathlib import Path
from streamlit_login_auth_ui.widgets import __login__
from streamlit_lottie import st_lottie
from typing import Optional, Any, Dict, List
from PIL import Image, ImageEnhance
from rembg import remove
from datetime import datetime, timedelta
import os
import gtts
from datetime import datetime
from gtts import gTTS
from googletrans import Translator
import urllib.request
import time
import warnings
warnings.filterwarnings("ignore")
# PPT Imports
import streamlit as st
# import plotly.express as px
from pptx import Presentation
from pptx.util import Inches
from datetime import date
import requests
from io import BytesIO
import glob
import base64
import os
import random
# import codecs
import re
import string
from datetime import datetime
import string
from pathlib import Path
import subprocess
from pptx.dml.color import RGBColor
import yaml
from yaml.loader import SafeLoader
# End of PPT Imports
# langchain methods and imports
from langchain import LLMChain, OpenAI
from langchain.agents import AgentExecutor, Tool, ZeroShotAgent
from langchain.chains import RetrievalQA
from langchain.chains.question_answering import load_qa_chain
from langchain.docstore.document import Document
from langchain.document_loaders import PyPDFLoader
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.llms import OpenAI
from langchain.memory import ConversationBufferMemory
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.vectorstores import VectorStore
from langchain.vectorstores.faiss import FAISS
from pypdf import PdfReader
# Define a function to parse a PDF file and extract its text content
@st.cache_data
def parse_pdf(file: BytesIO) -> List[str]:
pdf = PdfReader(file)
output = []
for page in pdf.pages:
text = page.extract_text()
# Merge hyphenated words
text = re.sub(r"(\w+)-\n(\w+)", r"\1\2", text)
# Fix newlines in the middle of sentences
text = re.sub(r"(? List[Document]:
"""Converts a string or list of strings to a list of Documents
with metadata."""
if isinstance(text, str):
# Take a single string as one page
text = [text]
page_docs = [Document(page_content=page) for page in text]
# Add page numbers as metadata
for i, doc in enumerate(page_docs):
doc.metadata["page"] = i + 1
# Split pages into chunks
doc_chunks = []
for doc in page_docs:
text_splitter = RecursiveCharacterTextSplitter(
chunk_size=2000,
separators=["\n\n", "\n", ".", "!", "?", ",", " ", ""],
chunk_overlap=0,
)
chunks = text_splitter.split_text(doc.page_content)
for i, chunk in enumerate(chunks):
doc = Document(
page_content=chunk, metadata={"page": doc.metadata["page"], "chunk": i}
)
# Add sources a metadata
doc.metadata["source"] = f"{doc.metadata['page']}-{doc.metadata['chunk']}"
doc_chunks.append(doc)
return doc_chunks
# Define a function for the embeddings
@st.cache_data
def test_embed():
embeddings = OpenAIEmbeddings(openai_api_key=api)
# Indexing
# Save in a Vector DB
with st.spinner("It's indexing..."):
index = FAISS.from_documents(pages, embeddings)
st.success("Embeddings done.", icon="β
")
return index
# End of langchain methods
import pandas as pd
df = pd.read_csv('files.csv')
# Load OpenAI key
# openai.api_key = open_api_key
# from sd2.generate import PIPELINE_NAMES, generate
# DEFAULT_PROMPT = "border collie puppy"
# DEFAULT_WIDTH, DEFAULT_HEIGHT = 512, 512
# OUTPUT_IMAGE_KEY = "output_img"
# LOADED_IMAGE_KEY = "loaded_image"
# For simple chat module
def get_initial_message():
messages=[
{"role": "system", "content": "You are a helpful AI Assistant created by Alpha AI. You can do anything."},
{"role": "user", "content": "I want to know a lot of things."},
{"role": "assistant", "content": "Thats awesome, what do you want to know about."}
]
return messages
def get_chatgpt_response(messages, model="gpt-4o"):
print("model: ", model)
response = openai.ChatCompletion.create(
model=model,
messages=messages
)
return response['choices'][0]['message']['content']
def update_chat(messages, role, content):
messages.append({"role": role, "content": content})
return messages
# ------
# Define image sizes
image_sizes = {
"256x256": "256x256",
"512x512": "512x512",
"1024x1024": "1024x1024"
}
# For making AD copies
# Define function to generate image
def generate_image_edit_dalle(prompt, size,input_img,mask_img):
outline_img = "A high resolution portrait of " + prompt
img_response = openai.Image.create_edit(
image=open(input_img, "rb"),
mask=open(mask_img, "rb"),
prompt=outline_img,
n=1,
size=size
)
image_url = img_response['data'][0]['url']
urllib.request.urlretrieve(image_url, 'img_dalle_inp.png')
img = Image.open("img_dalle_inp.png")
return img
# Define function to generate image
def generate_image(prompt, size):
outline_img = "A high resolution portrait of "
img_response = openai.Image.create(
prompt=prompt,
n=1,
size=size)
img_url = img_response['data'][0]['url']
urllib.request.urlretrieve(img_url, 'img.png')
img = Image.open("img.png")
return img
# Text to Speech Avatars
def text_to_speech_avatar(text):
tts = gTTS(text, lang='en', tld='co.uk', slow=False)
# try:
# my_file_name = text[0:20]
# except:
# my_file_name = "audio"
addition_name = st.session_state['name'][:5]
my_file_name = "dummy_" + addition_name
tts.save(f"temp/{my_file_name}.mp3")
# print(my_file_name)
return True
# PPT Methods
bad_coding_practice = ''.join(random.choice(string.ascii_uppercase + string.ascii_lowercase + string.digits) for _ in
range(16))
def refresh_bad_coding_practice():
global bad_coding_practice
bad_coding_practice = ''.join(random.choice(string.ascii_uppercase + string.ascii_lowercase + string.digits)
for _ in range(16))
return
def generate_content_blog(user_input):
completion = openai.ChatCompletion.create(model="gpt-4o", messages=[
{"role": "system", "content": "You are an expert blog writer and can produce perfect grammar, sentence formation and SEO capable blogs."},
{"role": "user", "content": user_input}
],max_tokens=2500, temperature = 0.6,presence_penalty = 0.1,frequency_penalty = 0.1,top_p=1,)
result = completion.choices[0].message.content
return result
def PrefixNameDownloader(image_description):
outline_img = "A high resolution image of "
img_response = openai.Image.create(
prompt = outline_img + image_description,
n=1,
size="512x512")
img_url = img_response['data'][0]['url']
img_name = 'prefix_' + bad_coding_practice + "img001.jpg"
urllib.request.urlretrieve(img_url, img_name)
# img = Image.open("img.png")
return img_name
def generate_ppt(topic, slide_length,color, font_colors):
root = Presentation("theme0.pptx")
message = f"""Create a slideshow presentation on the topic of {topic} which is {slide_length} slides
long. Add images on every slide except the title slide.
You are allowed to use the following slide types:
Slide types:
Title Slide - (Title, Subtitle)
Content Slide - (Title, Content)
Image Slide - (Title, Content, Image)
Thanks Slide - (Title)
Put this tag before the Title Slide: [L_TS]
Put this tag before the Content Slide: [L_CS]
Put this tag before the Image Slide: [L_IS]
Put this tag before the Thanks Slide: [L_THS]
Put "[SLIDEBREAK]" after each slide
For example:
[L_TS]
[TITLE]Mental Health[/TITLE]
[SLIDEBREAK]
[L_CS]
[TITLE]Mental Health Definition[/TITLE]
[CONTENT]
1. Definition: A personβs condition with regard to their psychological and emotional well-being
2. Can impact one's physical health
3. Stigmatized too often.
[/CONTENT]
[SLIDEBREAK]
Put this tag before the Title: [TITLE]
Put this tag after the Title: [/TITLE]
Put this tag before the Subitle: [SUBTITLE]
Put this tag after the Subtitle: [/SUBTITLE]
Put this tag before the Content: [CONTENT]
Put this tag after the Content: [/CONTENT]
Put this tag before the Image: [IMAGE]
Put this tag after the Image: [/IMAGE]
Elaborate on the Content, provide as much information as possible.
You put a [/CONTENT] at the end of the Content.
Do not reply as if you are talking about the slideshow itself. (ex. "Include pictures here about...")
Do not include any special characters (?, !, ., :, ) in the Title.
Do not include any additional information in your response and stick to the format."""
response = openai.ChatCompletion.create(
model="gpt-4o",
messages=[
{
"role": "system", "content": "Act as an expert presentation creator. You know all about designs, creativity, layouts and how to make amazing presentations",
"role": "user", "content": message
}
]
)
# """ Ref for slide types:
# 0 -> title and subtitle
# 1 -> title and content
# 2 -> section header
# 3 -> two content
# 4 -> Comparison
# 5 -> Title only
# 6 -> Blank
# 7 -> Content with caption
# 8 -> Pic with caption
# """
def delete_all_slides():
for i in range(len(root.slides)-1, -1, -1):
r_id = root.slides._sldIdLst[i].rId
root.part.drop_rel(r_id)
del root.slides._sldIdLst[i]
def create_title_slide(title, subtitle,slide_bg_color, font_colors):
layout = root.slide_layouts[0]
slide = root.slides.add_slide(layout)
background = slide.background
if slide_bg_color == "white":
background.fill.solid()
background.fill.fore_color.rgb = RGBColor(255, 255, 255)
elif slide_bg_color == "black":
background.fill.solid()
background.fill.fore_color.rgb = RGBColor(0, 0, 0)
elif slide_bg_color == "red":
background.fill.solid()
background.fill.fore_color.rgb = RGBColor(255, 0, 0)
elif slide_bg_color == "green":
background.fill.solid()
background.fill.fore_color.rgb = RGBColor(0, 255, 0)
elif slide_bg_color == "blue":
background.fill.solid()
background.fill.fore_color.rgb = RGBColor(0, 0, 255)
else:
# If an invalid color is provided, default to white
background.fill.solid()
background.fill.fore_color.rgb = RGBColor(255, 255, 255)
slide.shapes.title.text = title
slide.placeholders[1].text = subtitle
if font_colors == "white":
# slide.shapes.title.font.color.rgb = RGBColor(255, 255, 255)
slide.shapes.title.text_frame.paragraphs[0].font.color.rgb = RGBColor(255, 255, 255)
slide.placeholders[1].text_frame.paragraphs[0].font.color.rgb = RGBColor(255, 255, 255)
elif font_colors == "black":
slide.shapes.title.text_frame.paragraphs[0].font.color.rgb = RGBColor(0, 0, 0)
slide.placeholders[1].text_frame.paragraphs[0].font.color.rgb = RGBColor(0, 0, 0)
elif font_colors == "red":
slide.shapes.title.text_frame.paragraphs[0].font.color.rgb = RGBColor(255, 0, 0)
slide.placeholders[1].text_frame.paragraphs[0].font.color.rgb = RGBColor(255, 0, 0)
elif font_colors == "green":
slide.shapes.title.text_frame.paragraphs[0].font.color.rgb = RGBColor(0, 255, 0)
slide.placeholders[1].text_frame.paragraphs[0].font.color.rgb = RGBColor(0, 255, 0)
elif font_colors == "blue":
slide.shapes.title.text_frame.paragraphs[0].font.color.rgb = RGBColor(0, 0, 255)
slide.placeholders[1].text_frame.paragraphs[0].font.color.rgb = RGBColor(0, 0, 255)
else:
slide.shapes.title.text_frame.paragraphs[0].font.color.rgb = RGBColor(255, 255, 255)
slide.placeholders[1].text_frame.paragraphs[0].font.color.rgb = RGBColor(255, 255, 255)
def create_section_header_slide(title, slide_bg_color, font_colors):
layout = root.slide_layouts[2]
slide = root.slides.add_slide(layout)
background = slide.background
if slide_bg_color == "white":
background.fill.solid()
background.fill.fore_color.rgb = RGBColor(255, 255, 255)
elif slide_bg_color == "black":
background.fill.solid()
background.fill.fore_color.rgb = RGBColor(0, 0, 0)
elif slide_bg_color == "red":
background.fill.solid()
background.fill.fore_color.rgb = RGBColor(255, 0, 0)
elif slide_bg_color == "green":
background.fill.solid()
background.fill.fore_color.rgb = RGBColor(0, 255, 0)
elif slide_bg_color == "blue":
background.fill.solid()
background.fill.fore_color.rgb = RGBColor(0, 0, 255)
else:
# If an invalid color is provided, default to white
background.fill.solid()
background.fill.fore_color.rgb = RGBColor(255, 255, 255)
slide.shapes.title.text = title
if font_colors == "white":
slide.shapes.title.text_frame.paragraphs[0].font.color.rgb = RGBColor(255, 255, 255)
elif font_colors == "black":
slide.shapes.title.text_frame.paragraphs[0].font.color.rgb = RGBColor(0, 0, 0)
elif font_colors == "red":
slide.shapes.title.text_frame.paragraphs[0].font.color.rgb = RGBColor(255, 0, 0)
elif font_colors == "green":
slide.shapes.title.text_frame.paragraphs[0].font.color.rgb = RGBColor(0, 255, 0)
elif font_colors == "blue":
slide.shapes.title.text_frame.paragraphs[0].font.color.rgb = RGBColor(0, 0, 255)
else:
slide.shapes.title.text_frame.paragraphs[0].font.color.rgb = RGBColor(255, 255, 255)
def create_title_and_content_slide(title, content, slide_bg_color,font_colors):
layout = root.slide_layouts[1]
slide = root.slides.add_slide(layout)
background = slide.background
if slide_bg_color == "white":
background.fill.solid()
background.fill.fore_color.rgb = RGBColor(255, 255, 255)
elif slide_bg_color == "black":
background.fill.solid()
background.fill.fore_color.rgb = RGBColor(0, 0, 0)
elif slide_bg_color == "red":
background.fill.solid()
background.fill.fore_color.rgb = RGBColor(255, 0, 0)
elif slide_bg_color == "green":
background.fill.solid()
background.fill.fore_color.rgb = RGBColor(0, 255, 0)
elif slide_bg_color == "blue":
background.fill.solid()
background.fill.fore_color.rgb = RGBColor(0, 0, 255)
else:
# If an invalid color is provided, default to white
background.fill.solid()
background.fill.fore_color.rgb = RGBColor(255, 255, 255)
slide.shapes.title.text = title
# slide.placeholders[1].text = content
content_placeholder = slide.placeholders[1]
content_placeholder.text = content
# set font color of bullet points in content
# for paragraph in content_placeholder.text_frame.paragraphs:
# paragraph.font.color.rgb = RGBColor(255, 255, 255)
if font_colors == "white":
for paragraph in content_placeholder.text_frame.paragraphs:
paragraph.font.color.rgb = RGBColor(255, 255, 255)
slide.shapes.title.text_frame.paragraphs[0].font.color.rgb = RGBColor(255, 255, 255)
elif font_colors == "black":
slide.shapes.title.text_frame.paragraphs[0].font.color.rgb = RGBColor(0, 0, 0)
for paragraph in content_placeholder.text_frame.paragraphs:
paragraph.font.color.rgb = RGBColor(0, 0, 0)
elif font_colors == "red":
for paragraph in content_placeholder.text_frame.paragraphs:
paragraph.font.color.rgb = RGBColor(255, 0, 0)
slide.shapes.title.text_frame.paragraphs[0].font.color.rgb = RGBColor(255, 0, 0)
elif font_colors == "green":
for paragraph in content_placeholder.text_frame.paragraphs:
paragraph.font.color.rgb = RGBColor(0, 255, 0)
slide.shapes.title.text_frame.paragraphs[0].font.color.rgb = RGBColor(0, 255, 0)
elif font_colors == "blue":
for paragraph in content_placeholder.text_frame.paragraphs:
paragraph.font.color.rgb = RGBColor(0, 0, 255)
slide.shapes.title.text_frame.paragraphs[0].font.color.rgb = RGBColor(0, 0, 255)
else:
for paragraph in content_placeholder.text_frame.paragraphs:
paragraph.font.color.rgb = RGBColor(255, 255, 255)
slide.shapes.title.text_frame.paragraphs[0].font.color.rgb = RGBColor(255, 255, 255)
def create_title_and_content_and_image_slide(title, content, image_query, slide_bg_color,font_colors):
layout = root.slide_layouts[8]
slide = root.slides.add_slide(layout)
background = slide.background
if slide_bg_color == "white":
background.fill.solid()
background.fill.fore_color.rgb = RGBColor(255, 255, 255)
elif slide_bg_color == "black":
background.fill.solid()
background.fill.fore_color.rgb = RGBColor(0, 0, 0)
elif slide_bg_color == "red":
background.fill.solid()
background.fill.fore_color.rgb = RGBColor(255, 0, 0)
elif slide_bg_color == "green":
background.fill.solid()
background.fill.fore_color.rgb = RGBColor(0, 255, 0)
elif slide_bg_color == "blue":
background.fill.solid()
background.fill.fore_color.rgb = RGBColor(0, 0, 255)
else:
# If an invalid color is provided, default to white
background.fill.solid()
background.fill.fore_color.rgb = RGBColor(255, 255, 255)
slide.shapes.title.text = title
# slide.placeholders[2].text = content
content_placeholder = slide.placeholders[2]
content_placeholder.text = content
if font_colors == "white":
for paragraph in content_placeholder.text_frame.paragraphs:
paragraph.font.color.rgb = RGBColor(255, 255, 255)
slide.shapes.title.text_frame.paragraphs[0].font.color.rgb = RGBColor(255, 255, 255)
elif font_colors == "black":
for paragraph in content_placeholder.text_frame.paragraphs:
paragraph.font.color.rgb = RGBColor(0, 0, 0)
slide.shapes.title.text_frame.paragraphs[0].font.color.rgb = RGBColor(0, 0, 0)
elif font_colors == "red":
for paragraph in content_placeholder.text_frame.paragraphs:
paragraph.font.color.rgb = RGBColor(255, 0, 0)
slide.shapes.title.text_frame.paragraphs[0].font.color.rgb = RGBColor(255, 0, 0)
elif font_colors == "green":
for paragraph in content_placeholder.text_frame.paragraphs:
paragraph.font.color.rgb = RGBColor(0, 255, 0)
slide.shapes.title.text_frame.paragraphs[0].font.color.rgb = RGBColor(0, 255, 0)
elif font_colors == "blue":
for paragraph in content_placeholder.text_frame.paragraphs:
paragraph.font.color.rgb = RGBColor(0, 0, 255)
slide.shapes.title.text_frame.paragraphs[0].font.color.rgb = RGBColor(0, 0, 255)
else:
for paragraph in content_placeholder.text_frame.paragraphs:
paragraph.font.color.rgb = RGBColor(255, 255, 255)
slide.shapes.title.text_frame.paragraphs[0].font.color.rgb = RGBColor(255, 255, 255)
refresh_bad_coding_practice()
google_crawler = PrefixNameDownloader(image_query)
# google_crawler.crawl(keyword=image_query, max_num=1)
dir_path = os.path.dirname(os.path.realpath(google_crawler))
file_name = glob.glob(f"prefix_{bad_coding_practice}*")
# print(file_name)
img_path = os.path.join(dir_path, file_name[0])
slide.shapes.add_picture(img_path, slide.placeholders[1].left, slide.placeholders[1].top,
slide.placeholders[1].width, slide.placeholders[1].height)
def find_text_in_between_tags(text, start_tag, end_tag):
start_pos = text.find(start_tag)
end_pos = text.find(end_tag)
result = []
while start_pos > -1 and end_pos > -1:
text_between_tags = text[start_pos+len(start_tag):end_pos]
result.append(text_between_tags)
start_pos = text.find(start_tag, end_pos+len(end_tag))
end_pos = text.find(end_tag, start_pos)
res1 = "".join(result)
res2 = re.sub(r"\[IMAGE\].*?\[/IMAGE\]", '', res1)
if len(result) > 0:
return res2
else:
return ""
def search_for_slide_type(text):
tags = ["[L_TS]", "[L_CS]", "[L_IS]", "[L_THS]"]
found_text = next((s for s in tags if s in text), None)
return found_text
def parse_response(reply,color,font_colors):
slide_bg_color = color.lower()
list_of_slides = reply.split("[SLIDEBREAK]")
for slide in list_of_slides:
slide_type = search_for_slide_type(slide)
if slide_type == "[L_TS]":
create_title_slide(find_text_in_between_tags(str(slide), "[TITLE]", "[/TITLE]"),
find_text_in_between_tags(str(slide), "[SUBTITLE]", "[/SUBTITLE]"),slide_bg_color,font_colors)
elif slide_type == "[L_CS]":
create_title_and_content_slide("".join(find_text_in_between_tags(str(slide), "[TITLE]", "[/TITLE]")),
"".join(find_text_in_between_tags(str(slide), "[CONTENT]",
"[/CONTENT]")),slide_bg_color,font_colors)
elif slide_type == "[L_IS]":
create_title_and_content_and_image_slide("".join(find_text_in_between_tags(str(slide), "[TITLE]",
"[/TITLE]")),
"".join(find_text_in_between_tags(str(slide), "[CONTENT]",
"[/CONTENT]")),
"".join(find_text_in_between_tags(str(slide), "[IMAGE]",
"[/IMAGE]")),slide_bg_color,font_colors)
elif slide_type == "[L_THS]":
create_section_header_slide("".join(find_text_in_between_tags(str(slide), "[TITLE]", "[/TITLE]")),slide_bg_color,font_colors)
def find_title():
# res = ''.join(random.choices(string.ascii_uppercase + string.digits, k=7))
# val = str(res)
# return val
return root.slides[0].shapes.title.text
delete_all_slides()
# print(response)
parse_response(response['choices'][0]['message']['content'],color,font_colors)
path_new = "files"
root.save(f"{path_new}/{find_title()}.pptx")
binary_output = BytesIO()
root.save(binary_output)
# print("done")
# return rf"Done! {find_title()} is ready! You can find it at {os.getcwd()}\{find_title()}.pptx"
return f"{path_new}/{find_title()}.pptx",binary_output
# End of PPT Methods
# GTTS
def text_to_speech(text,ext):
tts = gTTS(text, lang='en', tld='co.uk', slow=False)
try:
my_file_name = text[0:20]
except:
my_file_name = "audio"
tts.save(f"temp/{my_file_name}.mp3")
# print(my_file_name)
return my_file_name
now_date = datetime.now()
# round to nearest 15 minutes
now_date = now_date.replace(minute=now_date.minute // 15 * 15, second=0, microsecond=0)
# split into date and time objects
now_time = now_date.time()
now_date = now_date.date() + timedelta(days=1)
# List of methods
def generateBlogTopics(prompt1):
response = openai.Completion.create(
engine="text-davinci-003",
# engine="gpt-3.5-turbo",
prompt="Generate blog topics on: {}. \n \n 1. ".format(prompt1),
temperature=0.7,
max_tokens=100,
top_p=1,
frequency_penalty=0,
presence_penalty=0
)
return response['choices'][0]['text']
def generateBlogSections(prompt1):
response = openai.Completion.create(
engine="text-davinci-003",
# engine="gpt-3.5-turbo",
prompt="Expand the blog title in to high level blog sections: {} \n\n- Introduction: ".format(prompt1),
temperature=0.6,
max_tokens=100,
top_p=1,
frequency_penalty=0,
presence_penalty=0
)
return response['choices'][0]['text']
def blogSectionExpander(prompt1):
response = openai.Completion.create(
engine="text-davinci-003",
# engine="gpt-3.5-turbo",
prompt="Expand the blog section in to a detailed professional , witty and clever explanation.\n\n {}".format(prompt1),
temperature=0.7,
max_tokens=750,
top_p=1,
frequency_penalty=0,
presence_penalty=0
)
return response['choices'][0]['text']
def generate_legal_content(user_input):
try:
completion = openai.ChatCompletion.create(model="gpt-4o", messages=[
{"role": "system", "content": "You are alphaGPT, an AI assistant custom trained and created by Alpha AI to work within the legal industry of India. You are proficient at everytask when it comes to law, legal processes, legal resources etc."},
{"role": "user", "content": prompt}
],max_tokens=3000, temperature = 0.7,presence_penalty = 0.1,frequency_penalty = 0.1)
# print(type(completion))
result = completion.choices[0].message.content
return result
except openai.error.APIError as e:
#Handle API error here, e.g. retry or log
print(f"OpenAI API returned an API Error: {e}")
pass
except openai.error.APIConnectionError as e:
#Handle connection error here
print(f"Failed to connect to OpenAI API: {e}")
pass
except openai.error.RateLimitError as e:
#Handle rate limit error (we recommend using exponential backoff)
print(f"OpenAI API request exceeded rate limit: {e}")
pass
def load_lottiefile(filepath: str):
with open(filepath, "r") as f:
return json.load(f)
im = Image.open("favicon.ico")
st.set_page_config(
page_title="Generative AI Toolkit",
page_icon=im,
layout="centered",
initial_sidebar_state="expanded",
)
# Define the metadata
image_url = "aai_edu.png"
hide_st_style = """
Alpha AI - Among India's leading AI Research and Development Startups
"""
st.markdown(hide_st_style, unsafe_allow_html=True)
def load_lottiefile(filepath: str):
with open(filepath, "r") as f:
return json.load(f)
def load_lottieurl(url: str):
r = requests.get(url)
if r.status_code != 200:
return None
return r.json()
# lottie_coding = load_lottiefile("comp_anim.json") # replace link to local lottie file
lottie_hello1 = load_lottieurl("https://assets6.lottiefiles.com/packages/lf20_AHptq1.json")
# lottie_hello1 = load_lottieurl("https://assets5.lottiefiles.com/packages/lf20_hlvOdjjxTF.json")
place1 = st.empty()
with place1.container():
anima1 , anima2 = st.columns([2,1])
with anima1:
# st.image("aai_black.png", width = 350, use_column_width=True)
st.image("aai_white.png", width = 350, use_column_width=True)
with anima2:
st_lottie(
lottie_hello1,
speed=1,
reverse=False,
loop=True,
quality="high", # medium ; high
height=220,
width=220,
key=None,
)
__login__obj = __login__(auth_token = "courier_auth_token",
company_name = "Alpha AI",
width = 200, height = 250,
logout_button_name = 'Logout', hide_menu_bool = False,
hide_footer_bool = True) #,
# lottie_url = 'https://assets2.lottiefiles.com/packages/lf20_jcikwtux.json')
LOGGED_IN = __login__obj.build_login_ui()
if 'openai.api_key' not in st.session_state:
st.session_state['openai.api_key'] = openai.api_key
place2 = st.empty()
with place1.container():
st.header('Welcome to our Generative AI Toolkit :sunglasses:')
st.subheader("The results of our toolkit are backed by a large-scale unsupervised language model that can generate paragraphs of text. This transformer-based language model is based on the GPT-3 model architecture initially proposed by OpenAI, intakes a sentence or partial sentence and predicts subsequent text from that input.")
st.subheader("There is a current limit of a few tokens on various aspects of the tool that implies that you can create content containing upto 750 words")
st.caption('For any feedback or to get personalization done for your usecases, contact us on alphaaiofficial@gmail.com :sunglasses:')
st.info("ChatGPT, GPT3 Models, Langchain have been integrated with the latest update...")
# with open('creds.yaml') as file:
# config = yaml.load(file, Loader=SafeLoader)
# # load hashed passwords
# file_path = Path(__file__).parent / "hashed_pw.pkl"
# with file_path.open("rb") as file:
# hashed_passwords = pickle.load(file)
# with st.container():
# tab1, tab2, tab3, tab4 = st.tabs(["Login", "Reset", "Register","Forgot Password"])
# with tab1:
# authenticator = stauth.Authenticate(
# config['credentials'],
# config['cookie']['name'],
# config['cookie']['key'],
# config['cookie']['expiry_days'],
# config['preauthorized']
# )
# name, authentication_status, username = authenticator.login("Login", "main")
# with tab2:
# try:
# if authenticator.reset_password(username, 'Reset password'):
# st.success('Password modified successfully')
# with open('creds.yaml', 'w') as file:
# yaml.dump(config, file, default_flow_style=False)
# except Exception as e:
# st.error(e)
# with tab3:
# try:
# if authenticator.register_user('Register user', preauthorization=False):
# st.success('User registered successfully')
# with open('creds.yaml', 'w') as file:
# yaml.dump(config, file, default_flow_style=False)
# except Exception as e:
# st.error(e)
# with tab4:
# try:
# username_forgot_pw, email_forgot_password, random_password = authenticator.forgot_password('Forgot password')
# if username_forgot_pw:
# st.success('New password sent securely')
# # Random password to be transferred to user securely
# with open('creds.yaml', 'w') as file:
# yaml.dump(config, file, default_flow_style=False)
# else:
# st.error('Username not found')
# except Exception as e:
# st.error(e)
# if authentication_status == False:
# st.error("Username/password is incorrect")
# if authentication_status == None:
# st.info("Please enter your username and password")
# if authentication_status:
if st.session_state['LOGGED_IN'] == True:
place1.empty()
place2.empty()
# horizontal menu
with st.sidebar:
# st.write(f"Hello user!")
# Store and display user's OpenAI API key
api_key = st.text_input("Enter your OpenAI API key:", value = st.session_state['openai.api_key'], type="password", key='openai_session_key')
openai.api_key = api_key
st.session_state["openai.api_key"] = api_key
st.write(f"Your OpenAI API key is: {api_key}")
selected = option_menu(
menu_title="Generative AI Toolkit", # required
options=[
"Home",
"---",
"Chat Mode",
"---" ,
"alphaGPT",
"---" ,
"Act-Prompts",
"---" ,
"Food Blogger",
"---" ,
"Travel Blogger",
"---" ,
"E-Commerce",
"---" ,
"Business Brief Generator",
"---" ,
"Keyword Extraction",
"---" ,
"Summarization",
"---" ,
"Grammar Correction",
"---" ,
"Restaurant Reviews",
"---" ,
"Image Generator",
"---" ,
"Blog Generator",
"---" ,
"Content Paraphraser",
"---" ,
"Story Teller",
"---" ,
"Social Media Copywriting",
"---" ,
"Marketing Campaign",
"---" ,
"PPT Generator",
"---" ,
"AD Generator",
"---" ,
"Legal Aid",
"---" ,
"Travel and Tourism",
"---",
"Document Chat"
# ,
# "---",
# "GPT-LipSync"
],# required
icons=[
"house-fill",
"---",
"robot",
"---" ,
"bi bi-chat-left-dots-fill",
"---" ,
"person-video",
"---" ,
"cup-straw",
"---" ,
"compass",
"---" ,
"badge-ad",
"---" ,
"file-earmark-richtext",
"---" ,
"card-text",
"---" ,
"justify",
"---" ,
"fonts",
"---" ,
"stars",
"---" ,
"images",
"---" ,
"bi bi-envelope",
"---" ,
"cursor-text",
"---" ,
"card-text",
"---" ,
"telegram",
"---" ,
"people",
"---" ,
"file-slides-fill",
"---" ,
"pencil-square",
"---" ,
"book",
"---" ,
"geo-alt-fill",
"---",
"file-pdf-fill"
# ,
# "---",
# "stop-circle"
], # optional
menu_icon="cast", # optional
default_index=0, # optional
orientation="vertical",
)
if selected == "Home":
st.header('Welcome to our Generative AI Toolkit :sunglasses:')
st.subheader("The results of our toolkit are backed by a large-scale unsupervised language model that can generate paragraphs of text. This transformer-based language model is based on the GPT-3 model architecture initially proposed by OpenAI, intakes a sentence or partial sentence and predicts subsequent text from that input.")
st.subheader("There is a current limit of a few tokens on various aspects of the tool that implies that you can create content containing upto 750 words")
st.caption('For any feedback or to get personalization done for your usecases, contact us on alphaaiofficial@gmail.com :sunglasses:')
st.info("ChatGPT, GPT3 Models, Langchain have been integrated with the latest update...")
#For Chat GPT
elif selected == "Chat Mode":
# Initialize model selection and session state
model = st.selectbox(
"Select a model",
("gpt-4o", "gpt-3.5-turbo", "gpt-3.5-turbo-0301")
)
# client = OpenAI(openai_api_key=api_key)
if "openai_model" not in st.session_state:
st.session_state["openai_model"] = model
if "messages" not in st.session_state:
st.session_state.messages = []
# Display existing chat messages
for message in st.session_state.messages:
with st.chat_message(message["role"]):
st.markdown(message["content"])
# User input and assistant response handling
if prompt := st.chat_input("What is up?"):
st.session_state.messages.append({"role": "user", "content": prompt})
with st.chat_message("user"):
st.markdown(prompt)
# Call OpenAI API to generate response
response = openai.ChatCompletion.create(
model=st.session_state["openai_model"],
messages=[
{"role": m["role"], "content": m["content"]}
for m in st.session_state.messages
],
)
# Extract and display assistant's response
assistant_response = response['choices'][0]['message']['content']
st.session_state.messages.append({"role": "assistant", "content": assistant_response})
with st.chat_message("assistant"):
st.markdown(assistant_response)
# Clear session state
st.info("Click on 'Clear' (at times, twice) to clear data!")
if st.button("Clear", type="primary", use_container_width=True):
st.session_state.clear()
#For Chat GPT
elif selected == "alphaGPT":
st.subheader("alphaGPT: An AI-powered chatbot")
# You can also use radio buttons instead
selected_alpha = st.radio("Stream the result in realtime or view it in one go!",("NO Streaming","Streaming"))
# selected = pills("", ["NO Streaming", "Streaming"], ["π", "π‘"])
user_input = st.text_area("You: ",placeholder = "Ask me anything ...", key="input", height=300)
if st.button("Submit", type="primary"):
st.markdown("----")
res_box = st.empty()
if selected_alpha == "Streaming":
report = []
temp_var = ""
# Looping over the response
model_v3 = "gpt-4o"
# model_v4 = "gpt-4-0314"
try:
for resp in openai.ChatCompletion.create(model=model_v3,
messages=[
{"role": "system", "content": "You are an AI language model custom trained and created by Alpha AI. You are proficient at everytask."},
{"role": "user", "content": user_input}
],
max_tokens=2500,
temperature = 0.6,
presence_penalty = 0.1,
frequency_penalty = 0.1,
stream = True):
# join method to concatenate the elements of the list
# into a single string,
# then strip out any empty strings
# print(resp)
if "content" in resp["choices"][0]["delta"]:
report.append(resp["choices"][0]["delta"]["content"])
# report.append(resp.choices[0].delta.content)
result = "".join(report).strip()
# result = result.replace("\n", "")
res_box.markdown(f'{result}')
temp_var = f'{result}'
else:
pass
# print(temp_var)
# For TTS
st.markdown(f"Wait for your audio to render")
ext = dt = datetime.now()
result_aud = text_to_speech(temp_var, ext)
audio_file = open(f"temp/{result_aud}.mp3", "rb")
audio_bytes = audio_file.read()
st.markdown(f"Your audio:")
st.audio(audio_bytes, format="audio/mp3", start_time=0)
except openai.error.APIError as e:
#Handle API error here, e.g. retry or log
print(f"OpenAI API returned an API Error: {e}")
pass
except openai.error.APIConnectionError as e:
#Handle connection error here
print(f"Failed to connect to OpenAI API: {e}")
pass
except openai.error.RateLimitError as e:
#Handle rate limit error (we recommend using exponential backoff)
print(f"OpenAI API request exceeded rate limit: {e}")
pass
else:
try:
completion = openai.ChatCompletion.create(model="gpt-4o", messages=[
{"role": "system", "content": "You are an AI assistant custom trained and created by Alpha AI. You are proficient at everytask."},
{"role": "user", "content": user_input}
],max_tokens=2500, temperature = 0.6,presence_penalty = 0.1,frequency_penalty = 0.1)
# print(type(completion))
result = completion.choices[0].message.content
res_box.write(result)
# For TTS
st.markdown(f"Wait for your audio to render")
ext = dt = datetime.now()
result_aud = text_to_speech(result,ext)
audio_file = open(f"temp/{result_aud}.mp3", "rb")
audio_bytes = audio_file.read()
st.markdown(f"Your audio:")
st.audio(audio_bytes, format="audio/mp3", start_time=0)
except openai.error.APIError as e:
#Handle API error here, e.g. retry or log
print(f"OpenAI API returned an API Error: {e}")
pass
except openai.error.APIConnectionError as e:
#Handle connection error here
print(f"Failed to connect to OpenAI API: {e}")
pass
except openai.error.RateLimitError as e:
#Handle rate limit error (we recommend using exponential backoff)
print(f"OpenAI API request exceeded rate limit: {e}")
pass
# pyttsx3.speak(result)
st.markdown("----")
st.warning("Below are some examples one can refer to. To use simply copy paste it and edit the content.")
st.info("""Write a product page for my company called HealthyBites that provides healthy organic meal delivery.
The goal is to persuade potential customers to choose HealthyBites as their partner in achieving better health and wellness
The target audience is health-conscious individuals who want to improve their eating habits but struggle with limited time or cooking skills.
Ensure the text is an ideal length based on the industry standard.
Write using the English language in a fun style and creative writing tone.
Use a first-person narrative.
Format the text in a table with the 5 rows: [hero section β add persuasive power words], [description β include emotional appeal], [benefits β use benefit-focused language], [FAQs β include relevant facts and data] and [call to action β express urgency].""")
st.info("""
Write a blog post that explains the difficulties and challenges implementing SEO.
The goal is to explain the challenges and provide some simple tips and guidelines to overcome these challenges.
The target audience is marketers and business owners.
Ensure the text is an ideal length based on the industry standard.
Write using the British language in a informal style and excitied writing tone.
Use a first-person narrative.
Provide a compelling and catchy title in H1 format.
Format the text as follows using HTML code and H2 sub headings: [introduction β add persuasive power words], [main body β include emotional appeal and break out into sub-sections] and [conclusion β express urgency and include a CTA].
""")
st.info("""
Generate a table summarising the keywords used in the above blog. Include 4 columns [keyword], [intent], [keyword density] and [user β include typical job titles that may be interested in the keywords]. Order by frequency use.
""")
st.info("""
Generate a table summarising keywords that are likely to be popular with people searching Google specifically for information on [topic/ideas]. Include 3 columns [keyword], [intent] and [user β typical job titles that may be interested]. Order relevancy to the topic.
""")
st.info("""
Write a blog post that [provide the title, topic or detail about whatβs needed].
The goal is [explain the desired outcome].
The target audience is [state the audience].
Ensure the text is an ideal length based on the industry standard.
Write using the [state your country] language in a [include the style β see examples below] style and [nclude the tone β see examples below] writing tone.
Use a [first-person/third-person] narrative.
Optimise the text for the following keywords [insert researched keywords] in an SEO friendly manner.
Provide a compelling and catchy title in H1 format.
Format the text as follows using HTML code and H2 sub headings: [introduction β add persuasive power words], [main body β include emotional appeal and break out into sub-sections] and [conclusion β express urgency and include a CTA].
""")
st.info("""
Write a short explanation of [topic] in the [country] language that includes specific bullet points relevant to a [country] audience
""")
st.info("""
I want you to act as a [state role]. You will come up with [explain whatβs expected and the audience]. Your words should have [provide additional context and detail, including style and tone]. My first request is [state the topic/request].
""")
elif selected == "Act-Prompts":
st.subheader("Acts & Prompts: Edit prompts and generate content based on acts dynamically.")
act_options = df['act'].unique().tolist()
selected_act = st.selectbox("Select an act", act_options)
# Get corresponding prompt for selected 'act'
prompt = df.loc[df['act'] == selected_act, 'prompt'].values[0]
# Display prompt
st.write("Prompt:")
# Allow user to choose and edit prompt
edited_prompt = st.text_area("Edit prompt", prompt,height=350)
# Generate response using GPT-3.5 API on submit button click
if st.button("Submit", type="primary"):
res_box = st.empty()
report = []
temp_var = ""
# Looping over the response
model_v3 = "gpt-4o"
# model_v4 = "gpt-4-0314"
try:
for resp in openai.ChatCompletion.create(model=model_v3,
messages=[
{"role": "system", "content": "You are an AI language model custom trained and created by Alpha AI. You are proficient at everytask."},
{"role": "user", "content": edited_prompt}
],
max_tokens=2500,
temperature = 0.6,
presence_penalty = 0.1,
frequency_penalty = 0.1,
stream = True):
# join method to concatenate the elements of the list
# into a single string,
# then strip out any empty strings
# print(resp)
if "content" in resp["choices"][0]["delta"]:
report.append(resp["choices"][0]["delta"]["content"])
# report.append(resp.choices[0].delta.content)
result = "".join(report).strip()
# result = result.replace("\n", "")
res_box.markdown(f'{result}')
temp_var = f'{result}'
else:
pass
# print(temp_var)
# For TTS
# st.markdown(f"Wait for your audio to render")
# ext = dt = datetime.now()
# result_aud = text_to_speech(temp_var, ext)
# audio_file = open(f"temp/{result_aud}.mp3", "rb")
# audio_bytes = audio_file.read()
# st.markdown(f"Your audio:")
# st.audio(audio_bytes, format="audio/mp3", start_time=0)
# st.markdown("----")
except openai.error.APIError as e:
#Handle API error here, e.g. retry or log
print(f"OpenAI API returned an API Error: {e}")
pass
except openai.error.APIConnectionError as e:
#Handle connection error here
print(f"Failed to connect to OpenAI API: {e}")
pass
except openai.error.RateLimitError as e:
#Handle rate limit error (we recommend using exponential backoff)
print(f"OpenAI API request exceeded rate limit: {e}")
pass
elif selected == "Food Blogger":
st.subheader("AI powered 'Food Blogger'")
# You can also use radio buttons instead
selected_food = st.radio("Stream the result in realtime or view it in one go!",("NO Streaming","Streaming"))
# selected = pills("", ["NO Streaming", "Streaming"], ["π", "π‘"])
user_input = st.text_input("You: ",placeholder = "Ask me anything ...", key="input")
if st.button("Submit", type="primary"):
st.markdown("----")
res_box = st.empty()
content_sys = "Act as an amazing food blogger who works with Tripsero and like to talk only about food, restaurants, cafe, meals and all about food and drinks"
if selected_food == "Streaming":
report = []
temp_var2 = ""
# Looping over the response
try:
for resp in openai.ChatCompletion.create(model="gpt-4o",
messages=[
{"role": "system", "content":content_sys},
{"role": "user", "content": user_input}
],
max_tokens=2500,
temperature = 0.6,
presence_penalty = 0.1,
frequency_penalty = 0.1,
stream = True):
# join method to concatenate the elements of the list
# into a single string,
# then strip out any empty strings
# print(resp)
if "content" in resp["choices"][0]["delta"]:
report.append(resp["choices"][0]["delta"]["content"])
# report.append(resp.choices[0].delta.content)
result = "".join(report).strip()
# result = result.replace("\n", "")
res_box.markdown(f'{result}')
temp_var2 = f'{result}'
else:
pass
# For TTS
st.markdown(f"Wait for your audio to render")
ext = dt = datetime.now()
result_aud2 = text_to_speech(temp_var2,ext)
audio_file = open(f"temp/{result_aud2}.mp3", "rb")
audio_bytes = audio_file.read()
st.markdown(f"Your audio:")
st.audio(audio_bytes, format="audio/mp3", start_time=0)
except openai.error.APIError as e:
#Handle API error here, e.g. retry or log
print(f"OpenAI API returned an API Error: {e}")
pass
except openai.error.APIConnectionError as e:
#Handle connection error here
print(f"Failed to connect to OpenAI API: {e}")
pass
except openai.error.RateLimitError as e:
#Handle rate limit error (we recommend using exponential backoff)
print(f"OpenAI API request exceeded rate limit: {e}")
pass
else:
try:
completion = openai.ChatCompletion.create(model="gpt-4o", messages=[
{"role": "system", "content":content_sys},
{"role": "user", "content": user_input}
],max_tokens=2500, temperature = 0.6,presence_penalty = 0.1,frequency_penalty = 0.1)
# print(type(completion))
result = completion.choices[0].message.content
res_box.write(result)
# For TTS
st.markdown(f"Wait for your audio to render")
ext = dt = datetime.now()
result_aud2 = text_to_speech(result,ext)
audio_file = open(f"temp/{result_aud2}.mp3", "rb")
audio_bytes = audio_file.read()
st.markdown(f"Your audio:")
st.audio(audio_bytes, format="audio/mp3", start_time=0)
# pyttsx3.speak(result)
except openai.error.APIError as e:
#Handle API error here, e.g. retry or log
print(f"OpenAI API returned an API Error: {e}")
pass
except openai.error.APIConnectionError as e:
#Handle connection error here
print(f"Failed to connect to OpenAI API: {e}")
pass
except openai.error.RateLimitError as e:
#Handle rate limit error (we recommend using exponential backoff)
print(f"OpenAI API request exceeded rate limit: {e}")
pass
st.markdown("----")
elif selected == "Travel Blogger":
st.subheader("AI powered 'Travel Blogger'")
# You can also use radio buttons instead
selected_travel = st.radio("Stream the result in realtime or view it in one go!",("NO Streaming","Streaming"))
# selected = pills("", ["NO Streaming", "Streaming"], ["π", "π‘"])
user_input = st.text_input("You: ",placeholder = "Ask me anything ...", key="input")
if st.button("Submit", type="primary"):
st.markdown("----")
res_box = st.empty()
content_sys = "Act as a famous Indian travel blogger who loves who works with Tripsero and travels to different places, writes about his personal experiences and more. You love adventures, random trips, personalized experience based travel and in general love the travel, tourism and hospitality industry."
if selected_travel == "Streaming":
report = []
temp_var3 = ""
# Looping over the response
try:
for resp in openai.ChatCompletion.create(model="gpt-4o",
messages=[
{"role": "system", "content":content_sys},
{"role": "user", "content": user_input}
],
max_tokens=2500,
temperature = 0.6,
presence_penalty = 0.1,
frequency_penalty = 0.1,
stream = True):
# join method to concatenate the elements of the list
# into a single string,
# then strip out any empty strings
# print(resp)
if "content" in resp["choices"][0]["delta"]:
report.append(resp["choices"][0]["delta"]["content"])
# report.append(resp.choices[0].delta.content)
result = "".join(report).strip()
# result = result.replace("\n", "")
res_box.markdown(f'{result}')
temp_var3 = f'{result}'
else:
pass
# For TTS
st.markdown(f"Wait for your audio to render")
ext = dt = datetime.now()
result_aud3 = text_to_speech(temp_var3,ext)
audio_file = open(f"temp/{result_aud3}.mp3", "rb")
audio_bytes = audio_file.read()
st.markdown(f"Your audio:")
st.audio(audio_bytes, format="audio/mp3", start_time=0)
except openai.error.APIError as e:
#Handle API error here, e.g. retry or log
print(f"OpenAI API returned an API Error: {e}")
pass
except openai.error.APIConnectionError as e:
#Handle connection error here
print(f"Failed to connect to OpenAI API: {e}")
pass
except openai.error.RateLimitError as e:
#Handle rate limit error (we recommend using exponential backoff)
print(f"OpenAI API request exceeded rate limit: {e}")
pass
else:
try:
completion = openai.ChatCompletion.create(model="gpt-4o", messages=[
{"role": "system", "content":content_sys},
{"role": "user", "content": user_input}
],max_tokens=2500, temperature = 0.6,presence_penalty = 0.1,frequency_penalty = 0.1)
# print(type(completion))
result = completion.choices[0].message.content
res_box.write(result)
# For TTS
st.markdown(f"Wait for your audio to render")
ext = dt = datetime.now()
result_aud3 = text_to_speech(result,ext)
audio_file = open(f"temp/{result_aud3}.mp3", "rb")
audio_bytes = audio_file.read()
st.markdown(f"Your audio:")
st.audio(audio_bytes, format="audio/mp3", start_time=0)
# pyttsx3.speak(result)
except openai.error.APIError as e:
#Handle API error here, e.g. retry or log
print(f"OpenAI API returned an API Error: {e}")
pass
except openai.error.APIConnectionError as e:
#Handle connection error here
print(f"Failed to connect to OpenAI API: {e}")
pass
except openai.error.RateLimitError as e:
#Handle rate limit error (we recommend using exponential backoff)
print(f"OpenAI API request exceeded rate limit: {e}")
pass
st.markdown("----")
# For E-Commerce
elif selected == "E-Commerce":
st.subheader('Generate content for various e-Commerce scenarios.')
option = st.selectbox('What would you like to do today??',
(
'Product Collection Description',
'Product Title Generator',
'Collection Title Generator',
'Product Descriptions',
'Ad-caption Generator',
'Ad-copy Generator',
'Article Generator',
'Social media page description',
'Email Content - Outreach',
'Homepage Content',
''
'Homepage Headline'
))
if option =="Product Collection Description":
product = st.text_input("What type of product is it?")
brand = st.text_input("Enter the brand name.")
final_pmt = "Act as an ecommerce merchandising expert and create a product collection description for the " + product + " from the brand " + brand
elif option =="Product Title Generator":
product = st.text_input("What type of product is it?")
brand = st.text_input("Enter the brand name.")
industry = st.text_input("Enter the industry of the brand.")
final_pmt = "Behave like an ecommerce merchandising specialist and draft a product title for " + product + " from " + brand + ", a " + industry + " brand."
elif option =="Collection Title Generator":
product = st.text_input("What type of product is it?")
brand = st.text_input("Enter the brand name.")
final_pmt = "Act as an ecommerce merchandising expert and create a product collection title for " + product + " from " + brand + "."
elif option =="Product Descriptions":
product = st.text_input("What type of product is it?")
brand = st.text_input("Enter the brand name.")
ideal_customer_persona = st.text_input("Enter the ideal customer persona.")
final_pmt = "Think like an ecommerce merchandising specialist and write a product description to list " +product+ " on an ecommerce store " + brand + " for a customer who is a "+ ideal_customer_persona
elif option =="Ad-caption Generator":
product = st.text_input("What type of product is it?")
brand = st.text_input("Enter the brand name.")
channel = st.selectbox('Choose the social media channel.',
(
'Instagram',
'Facebook',
'LinkedIn',
'Twitter',
))
final_pmt = "Think like an ecommerce digital advertiser and create an ad caption for the product "+ product+ " from the brand " + brand + ". Optimize the caption for " + channel + " ads and relevant character limits."
elif option =="Ad-copy Generator":
product = st.text_input("What type of product is it?")
brand = st.text_input("Enter the brand name.")
channel = st.selectbox('Choose the social media channel.',
(
'Instagram',
'Facebook',
'LinkedIn',
'Twitter',
))
final_pmt = "Think like an ecommerce digital advertising copywriter and create ad copy for the product " + product + " from the brand '"+brand+"'. Optimize the copy for "+channel+" ads."
elif option =="Article Generator":
product = st.text_input("What type of product is it?")
brand = st.text_input("Enter the brand name.")
final_pmt = "Act as an ecommerce content writer and write an article on the product '"+product+"' from the brand '"+brand+"'"
elif option =="Social media page description":
product = st.text_input("What type of product is it?")
brand = st.text_input("Enter the brand name.")
channel = st.selectbox('Choose the social media channel.',
(
'Instagram',
'Facebook',
'LinkedIn',
'Twitter',
))
industry = st.text_input("Enter the industry of the brand.")
final_pmt = "Think like an ecommerce social media specialist and write a 90 character "+channel+" page description for a "+industry+" brand named "+brand+" which sells "+product
elif option =="Email Content - Outreach":
product = st.text_input("What type of product is it?")
brand = st.text_input("Enter the brand name.")
ideal_customer_persona = st.text_input("Enter the ideal customer persona.")
industry = st.text_input("Enter the industry of the brand.")
final_pmt = "Act like an email marketing expert for ecommerce and draft an email campaign for the product '"+product+"' from the brand '"+brand+"', belonging to the "+industry+" industry. The campaign is intended for a "+ideal_customer_persona+"."
elif option =="Homepage Content":
# product = st.text_input("What type of product is it?")
brand = st.text_input("Enter the brand name.")
industry = st.text_input("Enter the industry of the brand.")
final_pmt = "Act like an ecommerce content writer and create homepage content for a "+industry+" product by the brand '"+brand+"'"
elif option =="Homepage Headline":
product = st.text_input("What type of product is it, what is it called?")
brand = st.text_input("Enter the brand name.")
industry = st.text_input("Enter the industry of the brand.")
final_pmt = "Behave like an ecommerce expert and create a homepage headline for a "+industry+" product called "+product+" from the brand "+brand
else:
st.text("Please select an option!")
if st.button("Submit", type="primary"):
res_box6 = st.empty()
report6 = []
# Looping over the response
try:
for resp in openai.ChatCompletion.create(model="gpt-4o",
messages=[
{
"role": "system", "content": "Act As An AI Advertiser custom trained and created by Alpha AI. You are proficient at everytask.",
"role": "user", "content": final_pmt
}
],
max_tokens=4000,
temperature = 0.7,
presence_penalty = 0.1,
frequency_penalty = 0.1,
stream = True):
if "content" in resp["choices"][0]["delta"]:
report6.append(resp["choices"][0]["delta"]["content"])
# report.append(resp.choices[0].delta.content)
result = "".join(report6).strip()
# result = result.replace("\n", "")
res_box6.markdown(f'{result}')
else:
pass
except openai.error.APIError as e:
#Handle API error here, e.g. retry or log
print(f"OpenAI API returned an API Error: {e}")
pass
except openai.error.APIConnectionError as e:
#Handle connection error here
print(f"Failed to connect to OpenAI API: {e}")
pass
except openai.error.RateLimitError as e:
#Handle rate limit error (we recommend using exponential backoff)
print(f"OpenAI API request exceeded rate limit: {e}")
pass
# For Keyword Extraction
elif selected == "Business Brief Generator":
st.subheader("Business Brief Generator.")
business_name = st.text_input("Enter the name of your business.", max_chars=15)
business_type = st.selectbox('What is the type of your business entity?',(
'Private Limited',
'Limited Liability Partnership',
'One Person Company',
'Partnership',
'Sole Proprietorship'
))
country = "India"
product_service = st.text_area("Please describe your product / service.", max_chars = 230)
short_description = st.text_area("Please provide a short description for your business.", max_chars = 300)
years = st.text_input("For how many years or months has your business been active?", value = "Eg. 2 years / 1 year / 1 month / 4 months.", max_chars = 7)
progress = st.text_area("Please tell us a little about your progress so far.", max_chars = 200)
prompt_business = "Generate an elaborate Business Plan for the following business, using the guidelines provided:\nBusiness Name: " + business_name + "\nBusiness Type: "+business_type+"\nCountry: "+country+"\nProduct or Service: "+product_service+"\nShort Business Description: "+short_description+"\nYears in operation: "+years+"\nBusiness progress to date: "+ progress+"\n\nGuidelines: Start the company description by listing the business name and company structure, if one is provided. Write a detailed business description for the short description provided, in a professional business tone. Describe the industry the business will be operating in and re-write the business progress to date. Finally, provide a numbered list of five suitable business objectives and a list of 5 plan of action deliverables for this business. For each objective and plan of action, describe how the it fits the business needs and how it will benefit the stakeholders in the long run."
if st.button("Generate", type="primary"):
res_box_bpg = st.empty()
report_bpg = []
# Looping over the response
try:
for resp in openai.ChatCompletion.create(model="gpt-4o",
messages=[
{
"role": "system", "content": "Act As a business plan maker professional. Very detailed and precise. You are proficient at everytask.",
"role": "user", "content": prompt_business}
],
max_tokens=3500,
temperature = 0.7,
presence_penalty = 0.2,
frequency_penalty = 0.1,
stream = True):
if "content" in resp["choices"][0]["delta"]:
report_bpg.append(resp["choices"][0]["delta"]["content"])
# report.append(resp.choices[0].delta.content)
result = "".join(report_bpg).strip()
# result = result.replace("\n", "")
res_box_bpg.markdown(f'{result}')
else:
pass
except openai.error.APIError as e:
#Handle API error here, e.g. retry or log
print(f"OpenAI API returned an API Error: {e}")
pass
except openai.error.APIConnectionError as e:
#Handle connection error here
print(f"Failed to connect to OpenAI API: {e}")
pass
except openai.error.RateLimitError as e:
#Handle rate limit error (we recommend using exponential backoff)
print(f"OpenAI API request exceeded rate limit: {e}")
pass
# For Keyword Extraction
elif selected == "Keyword Extraction":
st.subheader("Keyword Extraction")
# Create Text Area Widget to enable user to enter texts
article_text = st.text_area("Enter your text corpus & Press Enter, wait for the validation to happen!", max_chars = 500)
# Next, we'll add a check to make sure that the input text is long enough
# to summarize, and display a warning if it is not:
if len(article_text)>10:
if st.button("Extract Keywords",type='primary'):
# Use GPT-3 to generate a summary of the article
try:
response = openai.Completion.create(
engine="text-davinci-003",
# engine="gpt-3.5-turbo",
prompt="Extract keywords from this text: " + article_text,
max_tokens = 60,
temperature = 0.5,
top_p=1.0,
frequency_penalty=0.8,
presence_penalty=0.1
)
# Print the generated summary
res = response["choices"][0]["text"]
st.success(res)
st.download_button('Download result', res)
except openai.error.APIError as e:
#Handle API error here, e.g. retry or log
print(f"OpenAI API returned an API Error: {e}")
pass
except openai.error.APIConnectionError as e:
#Handle connection error here
print(f"Failed to connect to OpenAI API: {e}")
pass
except openai.error.RateLimitError as e:
#Handle rate limit error (we recommend using exponential backoff)
print(f"OpenAI API request exceeded rate limit: {e}")
pass
else:
st.warning("Not enough keywords available!")
# For "Summarization"
elif selected == "Summarization":
st.subheader("Summarization")
# Create Text Area Widget to enable user to enter texts
article_text = st.text_area("Enter your scientific texts to summarize", max_chars = 5000)
# Create Radio Button
output_size = st.radio( label = "What kind of output do you want?",
options= ["To-The-Point", "Concise", "Detailed"]
)
# First, we'll use an if statement to determine the desired output size
# and set the out_token variable accordingly:
if output_size == "To-The-Point":
out_token = 50
elif output_size == "Concise":
out_token = 128
else:
out_token = 516
# Next, we'll add a check to make sure that the input text is long enough
# to summarize, and display a warning if it is not:
if len(article_text)>100:
if st.button("Generate Summary",type='primary'):
# Use GPT-3 to generate a summary of the article
try:
response = openai.Completion.create(
engine="text-curie-001",
prompt="Please summarize this scientific article for me in a few sentences: " + article_text,
max_tokens = out_token,
temperature = 0.5,
)
# Print the generated summary
res = response["choices"][0]["text"]
st.success(res)
# pyttsx3.speak(res)
st.download_button('Download result', res)
# For TTS
st.markdown(f"Wait for your audio to render")
ext = dt = datetime.now()
result_aud4 = text_to_speech(res,ext)
audio_file = open(f"temp/{result_aud4}.mp3", "rb")
audio_bytes = audio_file.read()
st.markdown(f"Your audio:")
st.audio(audio_bytes, format="audio/mp3", start_time=0)
except openai.error.APIError as e:
#Handle API error here, e.g. retry or log
print(f"OpenAI API returned an API Error: {e}")
pass
except openai.error.APIConnectionError as e:
#Handle connection error here
print(f"Failed to connect to OpenAI API: {e}")
pass
except openai.error.RateLimitError as e:
#Handle rate limit error (we recommend using exponential backoff)
print(f"OpenAI API request exceeded rate limit: {e}")
pass
else:
st.warning("Not enough words to summarize!")
# For "Grammar Correction"
elif selected == "Grammar Correction":
st.subheader("Grammar Correction")
# Create Text Area Widget to enable user to enter texts
article_text = st.text_area("Enter the text you would like to correct", max_chars = 1000)
# Create Radio Buttons
output_size = st.radio( label = "What type of correction are you looking for?",
options= ["Standard English", "Sentence Formation"]
)
# First, we'll use an if statement to determine the desired output size
# and set the out_token variable accordingly:
Appending_desc = None
if output_size == "Standard English":
out_token = 600
Appending_desc = "Correct this to standard English: "
elif output_size == "Sentence Formation":
out_token = 600
Appending_desc = "Correct this to standard English sentence: "
# Next, we'll add a check to make sure that the input text is long enough
# to summarize, and display a warning if it is not:
if len(article_text)>1:
if st.button("Correct Grammar",type='primary'):
# Use GPT-3 to generate a summary of the article
try:
response = openai.Completion.create(
engine="text-davinci-003",
prompt= Appending_desc + article_text,
max_tokens = out_token,
temperature=0,
top_p=1.0,
frequency_penalty=0.0,
presence_penalty=0.0
)
# Print the generated summary
res = response["choices"][0]["text"]
st.success(res)
st.download_button('Download result', res)
except openai.error.APIError as e:
#Handle API error here, e.g. retry or log
print(f"OpenAI API returned an API Error: {e}")
pass
except openai.error.APIConnectionError as e:
#Handle connection error here
print(f"Failed to connect to OpenAI API: {e}")
pass
except openai.error.RateLimitError as e:
#Handle rate limit error (we recommend using exponential backoff)
print(f"OpenAI API request exceeded rate limit: {e}")
pass
else:
st.warning("Not enough words to correct the description!")
# For "Restaurant Reviews"
elif selected == "Restaurant Reviews":
st.subheader("Generate reviews for Restaurant / Cafe")
# Create Text Area Widget to enable user to enter texts
article_text = "Write a restaurant review based on these notes:\n\n"
st.text("Write a restaurant review based on these notes: Name: The Blue Wharf Lobster great, noisy, service polite, prices good.")
article_text1 = st.text_area("Name of the restaurant?")
article_text2 = st.text_area("Enter your short feedback")
article_text3 = st.text_input("Enter your desired name", 'John Doe')
# First, we'll use an if statement to determine the desired output size
# and set the out_token variable accordingly:
# Next, we'll add a check to make sure that the input text is long enough
# to summarize, and display a warning if it is not:
if len(article_text)>1:
if st.button("Generate Review",type='primary'):
pomm = "Your name is "+ article_text3 + ". " + article_text + "Name: " + article_text1 + "\n" + article_text2 + "\n\nReview:"
report = []
res_box = st.empty()
# Looping over the response
try:
for resp in openai.ChatCompletion.create(model="gpt-4o",
messages=[
{"role": "system", "content": "You are an AI assistant custom trained and created by Alpha AI. You are proficient at everytask."},
{"role": "user", "content": pomm}
],
max_tokens=64,
temperature = 0.6,
presence_penalty = 0.1,
frequency_penalty = 0.1,
stream = True):
# join method to concatenate the elements of the list
# into a single string,
# then strip out any empty strings
# print(resp)
if "content" in resp["choices"][0]["delta"]:
report.append(resp["choices"][0]["delta"]["content"])
# report.append(resp.choices[0].delta.content)
result = "".join(report).strip()
# result = result.replace("\n", "")
# st.success(f'{result}')
res_box.markdown(f'{result}')
else:
pass
st.text("Review by: " + article_text3)
# st.download_button('Download result', report)
# # Use GPT-3 to generate a summary of the article
# response = openai.Completion.create(
# engine="text-davinci-003",
# prompt= ,
# temperature=0.5,
# max_tokens=64,
# top_p=1.0,
# frequency_penalty=0.0,
# presence_penalty=0.0
# )
# # Print the generated summary
# res = response["choices"][0]["text"]
# st.success(res)
# st.download_button('Download result', res)
except openai.error.APIError as e:
#Handle API error here, e.g. retry or log
print(f"OpenAI API returned an API Error: {e}")
pass
except openai.error.APIConnectionError as e:
#Handle connection error here
print(f"Failed to connect to OpenAI API: {e}")
pass
except openai.error.RateLimitError as e:
#Handle rate limit error (we recommend using exponential backoff)
print(f"OpenAI API request exceeded rate limit: {e}")
pass
else:
st.warning("Not enough clarity to generate reviews!")
# IMAGE GENERATOR
elif selected == "Image Generator":
genre = st.radio(
"What type of content do you want to generate?",
('Dalle', 'Stable Diffusion'))
if genre == 'Dalle':
# Set up Streamlit app
st.subheader("Image Generator - Generating 4 Variants")
# Prompt user for image prompt
prompt = st.text_input("Enter image prompt:")
# Prompt user for image size
size = st.radio("Select image size:", list(image_sizes.keys()))
# Generate and display images
if st.button('Generate Image', type="primary"):
with st.spinner(text="Work in Progress... please wait"):
col1, col2 = st.columns(2)
with col1:
image1 = generate_image(prompt, image_sizes[size])
st.image(image1, caption="Generated image 1", use_column_width=True)
image3 = generate_image(prompt, image_sizes[size])
st.image(image3, caption="Generated image 3", use_column_width=True)
with col2:
image2 = generate_image(prompt, image_sizes[size])
st.image(image2, caption="Generated image 2", use_column_width=True)
image4 = generate_image(prompt, image_sizes[size])
st.image(image4, caption="Generated image 4", use_column_width=True)
# # Display images as a gallery
# images = [image1, image2, image3, image4]
# st.image(images, caption=["Generated image 1", "Generated image 2", "Generated image 3", "Generated image 4"], width=200)
else:
st.warning("Please enter an image prompt.")
st.text("Save Image by Right Click")
elif genre == 'Stable Diffusion':
st.write("Go to www.sdui.alphaai.biz to access the Stable Diffusion Toolkit!")
# BLOG GENERATOR
elif selected == "Blog Generator":
st.subheader('AI Blog Generator')
# st.text("Note: Audio will be only generated in the non-streaming mode.")
topic = st.text_input('Enter a topic to generate blog Topic on: ')
button_blogtopics = st.button('Generate Blog Topics', type="primary")
res_box1 = st.container()
if button_blogtopics:
st.markdown("----")
topic_ins = "Generate quality blog titles on " + str(topic)
res_box1.write(generate_content_blog(topic_ins))
st.markdown("----")
topic_blog = st.text_input('Enter a topic to generate blog outline and content on: ')
# stream_check = st.radio("Stream the result in realtime or view it in one go!",("NO Streaming","Streaming"))
if st.button("Submit", type="primary"):
st.markdown("----")
res_box2 = st.empty()
outline = "Generate a detailed blog on the topic " + str(topic_blog) + "."
report = []
# Looping over the response
try:
for resp in openai.ChatCompletion.create(model="gpt-4o",
messages=[
{
"role": "system", "content": """
Act as an expert blog writer. You write extremely well and long blogs. You take into account the SEO aspect of the content you write. Follow the context to generate blogs and keep in mind the number of characters specified in the context.
Context:
Add five keywords for each subheading.
Title
Write the title of the blog.
Write the Introduction to the blog in minimum 1500 characters.
Subheading
Generate content for subheading in minimum 1500 characters.
Subheading
Generate content for subheading in minimum 1500 characters.
Subheading
Generate content for subheading in minimum 1500 characters.
Subheading
Generate content for subheading in minimum 1500 characters.
Subheading
Generate content for subheading in minimum 1500 characters.
Conclusion
Write the conclusion to the blog in minimum 1000 characters.
"""},
{"role": "user", "content": outline}
],
max_tokens=3000,
temperature = 0.6,
presence_penalty = 0.1,
frequency_penalty = 0.1,
stream = True):
if "content" in resp["choices"][0]["delta"]:
report.append(resp["choices"][0]["delta"]["content"])
# report.append(resp.choices[0].delta.content)
result = "".join(report).strip()
# result = result.replace("\n", "")
res_box2.markdown(f'{result}')
else:
pass
except openai.error.APIError as e:
#Handle API error here, e.g. retry or log
print(f"OpenAI API returned an API Error: {e}")
pass
except openai.error.APIConnectionError as e:
#Handle connection error here
print(f"Failed to connect to OpenAI API: {e}")
pass
except openai.error.RateLimitError as e:
#Handle rate limit error (we recommend using exponential backoff)
print(f"OpenAI API request exceeded rate limit: {e}")
pass
# New tools 15th March 23
elif selected == "Content Paraphraser":
st.subheader('AI Content Paraphraser (Generation Limit - upto 5000 characters)')
# st.text("Note: Audio will be only generated in the non-streaming mode.")
topic = st.text_input('Enter content you would like to paraphrase')
if st.button("Submit", type="primary"):
res_box3 = st.empty()
outline = "Generate a detailed blog on the topic " + str(topic) + "."
report3 = []
# Looping over the response
try:
for resp in openai.ChatCompletion.create(model="gpt-4o",
messages=[
{
"role": "system", "content": "You are now ParaGPT. Your purpose is to paraphrase text. I will provide you with text, and then you will change up the words, the sentence structure, add or remove figurative language, etc and change anything necessary in order to paraphrase the text. However, it is extremely important you do not change the original meaning/significance of the text. "},
{"role": "user", "content": outline}
],
max_tokens=3000,
temperature = 0.6,
presence_penalty = 0.1,
frequency_penalty = 0.1,
stream = True):
if "content" in resp["choices"][0]["delta"]:
report3.append(resp["choices"][0]["delta"]["content"])
# report.append(resp.choices[0].delta.content)
result = "".join(report3).strip()
# result = result.replace("\n", "")
res_box3.markdown(f'{result}')
else:
pass
except openai.error.APIError as e:
#Handle API error here, e.g. retry or log
print(f"OpenAI API returned an API Error: {e}")
pass
except openai.error.APIConnectionError as e:
#Handle connection error here
print(f"Failed to connect to OpenAI API: {e}")
pass
except openai.error.RateLimitError as e:
#Handle rate limit error (we recommend using exponential backoff)
print(f"OpenAI API request exceeded rate limit: {e}")
pass
elif selected == "Story Teller":
st.subheader('AI Story Teller (Generation Limit - upto 5000 characters)')
# st.text("Note: Audio will be only generated in the non-streaming mode.")
topic = st.text_area('Enter the topic for generating the story.', max_chars = 500)
if st.button("Submit", type="primary"):
res_box4 = st.empty()
outline = "I want you to act as a storyteller. You will come up with entertaining stories that are engaging, imaginative and captivating for the audience. It can be fairy tales, educational stories or any other type of stories which has the potential to capture people's attention and imagination. Depending on the target audience, you may choose specific themes or topics for your storytelling session. Now i want you to write about " + str(topic) + "."
report4 = []
# Looping over the response
try:
for resp in openai.ChatCompletion.create(model="gpt-4o",
messages=[
{"role": "user", "content": outline}
],
max_tokens=3000,
temperature = 0.6,
presence_penalty = 0.1,
frequency_penalty = 0.1,
stream = True):
if "content" in resp["choices"][0]["delta"]:
report4.append(resp["choices"][0]["delta"]["content"])
# report.append(resp.choices[0].delta.content)
result = "".join(report4).strip()
# result = result.replace("\n", "")
res_box4.markdown(f'{result}')
else:
pass
except openai.error.APIError as e:
#Handle API error here, e.g. retry or log
print(f"OpenAI API returned an API Error: {e}")
pass
except openai.error.APIConnectionError as e:
#Handle connection error here
print(f"Failed to connect to OpenAI API: {e}")
pass
except openai.error.RateLimitError as e:
#Handle rate limit error (we recommend using exponential backoff)
print(f"OpenAI API request exceeded rate limit: {e}")
pass
elif selected == "Social Media Copywriting":
st.subheader('AI Social Media Copywriting (Generation Limit - upto 5000 characters)')
# st.text("Note: Audio will be only generated in the non-streaming mode.")
social_plat = st.radio("Which platform",('LinkedIn', 'Instagram', 'Facebook'))
purpose = st.text_area('Purpose of the post. Be explicit.')
keywords = st.text_input("Enter the required keywords in a comma separated form.", value = "Eg. Abc, Def, Ghi...")
rules = st.text_input("Enter the required rules to follow to make the post in a comma separated form.")
outline_smc = "Create a compelling, eye-catching " + social_plat + "post for " + purpose + "." + "The post should include " + keywords + ". " + "Also, it should follow these rules: " + rules + "."
if st.button("Submit", type="primary"):
res_box5 = st.empty()
report5 = []
# Looping over the response
try:
for resp in openai.ChatCompletion.create(model="gpt-4o",
messages=[
{"role": "user", "content": outline_smc}
],
max_tokens=3000,
temperature = 0.6,
presence_penalty = 0.1,
frequency_penalty = 0.1,
stream = True):
if "content" in resp["choices"][0]["delta"]:
report5.append(resp["choices"][0]["delta"]["content"])
# report.append(resp.choices[0].delta.content)
result = "".join(report5).strip()
# result = result.replace("\n", "")
res_box5.markdown(f'{result}')
else:
pass
except openai.error.APIError as e:
#Handle API error here, e.g. retry or log
print(f"OpenAI API returned an API Error: {e}")
pass
except openai.error.APIConnectionError as e:
#Handle connection error here
print(f"Failed to connect to OpenAI API: {e}")
pass
except openai.error.RateLimitError as e:
#Handle rate limit error (we recommend using exponential backoff)
print(f"OpenAI API request exceeded rate limit: {e}")
pass
elif selected == "Marketing Campaign":
st.subheader('Generate content for various marketing scenarios.')
option = st.selectbox('What would you like to do today??',
(
'Act As An Advertiser',
'Write AIDAs',
'Instagram Caption',
'Persuasive Texts',
'Influencer Marketing Campaign',
'Emotional Appeal Campaign',
'Instagram Story Ideas',
'Generate video script',
'Lean Startup Methodology'
))
if option =="Act As An Advertiser":
prompt_adv1 = "I want you to act as an advertiser. You will create a campaign to promote a product or service of your choice. You will choose a target audience, develop key messages and slogans, select the media channels for promotion, and decide on any additional activities needed to reach your goals. My first suggestion request is "
prompt_adv2 = st.text_input("What do you need an advertising campaign for?")
final_pmt = prompt_adv1 + prompt_adv2
elif option =="Write AIDAs":
st.text("AIDAS stands for Attention, Interest, Desire, Action, and Satisfaction.")
prompt_aidas = "Write an AIDA for "
input_aidas = st.text_input("Enter the topic to generate it AIDAs")
final_pmt = prompt_aidas + input_aidas
elif option =='Instagram Caption':
prompt_ic = "Write an attractive Instagram Caption "
input_ic = st.text_input("Insert Product description...")
final_pmt = prompt_ic + input_ic
elif option =='Persuasive Texts':
option_text = st.selectbox('Choose the type of text:',('Copy','Email','Blog','Newsletter','Article'))
# type_of_text = st.text_input("Type of persuasiveness, elaborate.")
ideal_customer_persona = st.text_input("Please elaborate on your ideal customer persona.")
type_of_prod = st.text_input("Elaborate on the type of product/program/subscription.")
prompt_ic = "I'm looking for a " + option_text + "that will convince " + ideal_customer_persona + " to sign up for my " + type_of_prod + "by explaining the value it brings and the benefits they'll receive."
final_pmt = prompt_ic
elif option =='Influencer Marketing Campaign':
type_of_content = st.text_input("Type of content needed from the influencer.")
ideal_customer_persona = st.text_input("Please elaborate on your ideal customer persona.")
type_of_prod = st.text_input("Elaborate on the type of product/program/subscription.")
influencer_type = st.text_input("Tell us a bit about your ideal type of influencer.")
final_pmt = "I need an influencer marketing campaign outline that will engage my "+ ideal_customer_persona+ " with " + type_of_content + " from " + influencer_type + " who can showcase the unique features and benefits of our " + type_of_prod + " in a fun and creative way."
elif option =='Emotional Appeal Campaign':
emotional_appeal = st.text_input("Describe the type of emotional appeal.")
ideal_customer_persona = st.text_input("Please elaborate on your ideal customer persona.")
type_of_prod = st.text_input("Elaborate on the type of product/program/subscription.")
type_of_emotion = st.text_input("List down ',' seperated emotions ideal for your campaign.")
prompt_ea = "Using the 'Emotional Appeal' framework, please write a marketing campaign outline that uses "+ emotional_appeal + " to persuade "+ ideal_customer_persona + "to take action and purchase our " + type_of_prod + ". Choose any of the emotions such as " + type_of_emotion + "."
final_pmt = prompt_ea
elif option =='Instagram Story Ideas':
ideal_customer_persona = st.text_input("Please elaborate on your ideal customer persona.")
prompt_is = "I need an Instagram story idea that will provide a sneak peek of upcoming products or services and create a sense of anticipation and excitement for my "+ ideal_customer_persona +" with a clear and compelling call to action."
final_pmt = prompt_is
elif option =='Generate video script':
duration = st.text_input("Enter the duration of the required video.", value = "Eg. 4 minutes or 30 second")
type_of_prod = st.text_input("Elaborate on the type of product/program/subscription.")
prompt_gvs = "Generate an " + duration + " video script for a YouTube and Instagram video about our newest " + type_of_prod
final_pmt = prompt_gvs
elif option =='Lean Startup Methodology':
rules = st.text_input("Please describe your product or service.")
ideal_customer = st.text_input("Please describe your ideal customer.")
outline_smc = "Outline a marketing campaign using the 'Lean Startup Methodology' framework that employs rapid experimentation and iteration to identify a scalable business model for our " + rules + "that appeals to our " + ideal_customer + ". Explain the steps taken to validate assumptions and obtain customer feedback to guide the marketing strategy."
final_pmt = outline_smc
else:
st.text("Please select an option!")
if st.button("Submit", type="primary"):
res_box6 = st.empty()
report6 = []
# Looping over the response
try:
for resp in openai.ChatCompletion.create(model="gpt-4o",
messages=[
{
"role": "system", "content": "Act As An AI Advertiser custom trained and created by Alpha AI. You are proficient at everytask.",
"role": "user", "content": final_pmt
}
],
max_tokens=4000,
temperature = 0.7,
presence_penalty = 0.1,
frequency_penalty = 0.1,
stream = True):
if "content" in resp["choices"][0]["delta"]:
report6.append(resp["choices"][0]["delta"]["content"])
# report.append(resp.choices[0].delta.content)
result = "".join(report6).strip()
# result = result.replace("\n", "")
res_box6.markdown(f'{result}')
else:
pass
except openai.error.APIError as e:
#Handle API error here, e.g. retry or log
print(f"OpenAI API returned an API Error: {e}")
pass
except openai.error.APIConnectionError as e:
#Handle connection error here
print(f"Failed to connect to OpenAI API: {e}")
pass
except openai.error.RateLimitError as e:
#Handle rate limit error (we recommend using exponential backoff)
print(f"OpenAI API request exceeded rate limit: {e}")
pass
# New tools 15th March 23
elif selected == "PPT Generator":
st.subheader("PPT Generator")
topic = st.text_input('Enter a relevant topic')
# api_key = st.text_input('Enter your API key')
slides_sst = st.slider("Select the number of slides:", 1, 20, 5)
slide_color = st.selectbox("Select the background color of the presentation:", ["White", "Black", "Red", "Green", "Blue"], key = "sbox1")
# Convert the selected color to lowercase
slide_colors = slide_color.lower()
font_color = st.selectbox("Select the color of the font:", ["White", "Black", "Red", "Green", "Blue"], key = "sbox2")
# Convert the selected color to lowercase
font_colors = font_color.lower()
button_blogtopics = st.button('Generate', type="primary")
if button_blogtopics:
st.text("Please wait for your slides to be made and displayed")
b,binary_output = generate_ppt(topic, slides_sst,slide_colors,font_colors)
st.markdown(b)
string_path = b
st.markdown("----")
# f_path = Path(b)
# print(type(f_path))
path = os.path.normpath(b)
list_files = subprocess.run(["libreoffice","--headless","--convert-to","pdf","--outdir", "output/",path])
print("The exit code was: %d" % list_files.returncode)
# PDF Path
string_path = string_path[:-5]
string_path = string_path[6:]
string_path = "output/" + string_path + ".pdf"
path_pdf = os.path.normpath(string_path)
def show_pdf(file_val = path_pdf):
with open(file_val,"rb") as f:
base64_pdf = base64.b64encode(f.read()).decode('utf-8')
pdf_display = f''
st.markdown(pdf_display, unsafe_allow_html=True)
show_pdf(path_pdf)
st.download_button(label='Click to download PowerPoint',data=binary_output.getvalue(),file_name=path)
# Legal assistant
elif selected == "AD Generator":
st.subheader("Generate ADs using alphaGPT")
st.success("The uploaded image should be in 1:1 ratio else it will result in an error.")
uploaded_file = st.file_uploader("Upload an image", type=["jpg", "jpeg", "png"])
if uploaded_file is not None:
# Read the uploaded image and display it
image = Image.open(uploaded_file)
# st.image(image, caption="Original Image", use_column_width=True)
output_path = 'output_dalle_image.png'
output_path2 = 'output_dalle_image2.png'
input_save = 'input_dalle_img.png'
image.save(input_save)
# input = Image.open(input_path),False,240,10,20,None,False,False
output2 = remove(image, only_mask=True)
# output = remove(image,alpha_matting=True)
output = remove(image)
output.save(output_path)
output2.save(output_path2)
# Display the original and processed images side by side
col1, col2, col3 = st.columns(3)
with col1:
st.image(image, caption="Original Image", use_column_width=True)
with col2:
st.image(output, caption="Masked Foreground", use_column_width=True)
with col3:
st.image(output2, caption="Masked Background", use_column_width=True)
# Create Radio Buttons
output_mask_selected = st.radio( label = "What type of mask",
options= ["Masked Foreground", "Masked Background"]
)
if output_mask_selected == "Masked Foreground":
mask_img = output_path
elif output_mask_selected == "Masked Background":
mask_img = output_path2
# Prompt user for image prompt
prompt = st.text_input("Enter image prompt:")
# Prompt user for image size
size = st.radio("Select image size:", list(image_sizes.keys()))
input_img = input_save
# mask_img = output_path
# Generate and display images
if st.button('Generate Image', type="primary"):
with st.spinner(text="Work in Progress... please wait"):
col1, col2 = st.columns(2)
with col1:
image1 = generate_image_edit_dalle(prompt, image_sizes[size],input_img,mask_img)
st.image(image1, caption="Generated image 1", use_column_width=True)
image3 = generate_image_edit_dalle(prompt, image_sizes[size],input_img,mask_img)
st.image(image3, caption="Generated image 2", use_column_width=True)
image5 = generate_image_edit_dalle(prompt, image_sizes[size],input_img,mask_img)
st.image(image5, caption="Generated image 3", use_column_width=True)
with col2:
image2 = generate_image_edit_dalle(prompt, image_sizes[size],input_img,mask_img)
st.image(image2, caption="Generated image 4", use_column_width=True)
image4 = generate_image_edit_dalle(prompt, image_sizes[size],input_img,mask_img)
st.image(image4, caption="Generated image 5", use_column_width=True)
image6 = generate_image_edit_dalle(prompt, image_sizes[size],input_img,mask_img)
st.image(image6, caption="Generated image 6", use_column_width=True)
# # Display images as a gallery
# images = [image1, image2, image3, image4]
# st.image(images, caption=["Generated image 1", "Generated image 2", "Generated image 3", "Generated image 4"], width=200)
else:
st.warning("Please enter an image prompt.")
st.warning("Here are some examples for the prompts!")
st.info("Beautiful pond surrounded by lavender and lilac, dappled sunbeams illuminating the scene, stunning photograph from lansdcaping magazine.")
st.info("A digital illustration of glowing toadstools beside a pond with lilypads, 4k, detailed, trending in artstation")
st.info("An oil painting of a mechanical clockwork flying machine from the renaissance, Gorgeous digital painting, amazing art, artstation 3, realistic")
st.info("Rubber Duck Aliens visiting the Earth for the first time, hyper-realistic, cinematic, detailed")
st.info("photo of an extremely cute alien fish swimming an alien habitable underwater planet, coral reefs, dream-like atmosphere, water, plants, peaceful, serenity, calm ocean, tansparent water, reefs, fish, coral, inner peace, awareness, silence, nature, evolution --version 3 --s 42000 --uplight --no text, blur")
st.info("2 medieval warriors ::0.4 travelling on a cliff to a background castle , view of a coast line landscape , English coastline, Irish coastline, scottish coastline, perspective, folklore, King Arthur, Lord of the Rings, Game of Thrones. Photographic, Photography, photorealistic, concept art, Artstation trending , cinematic lighting, cinematic composition, rule of thirds , ultra-detailed, dusk sky , low contrast, natural lighting, fog, realistic, light fogged, detailed, atmosphere hyperrealistic , volumetric light, ultra photoreal, | 35mm| , Matte painting, movie concept art, hyper-detailed, insanely detailed, corona render, octane render, 8k --no blur")
elif selected == "Legal Aid":
# Define the categories and options
categories = ['Legal Research', 'Drafting Legal Documents', 'Contract Drafting', 'Legal Forms and Documents', 'Legal Analysis','Legal Writing','Client Questions','Legal Procedures','Legal Terminology','Legal Citations']
options = {
'Legal Research': [
"Provide examples of [legal case/issue]",
"What are the latest developments in [legal area]?",
"What are the relevant laws or regulations regarding [legal issue]?",
"What is the history of [legal case/issue]?",
"What is the legal definition of [legal term or phrase]",
"What is the legal precedent for [legal case/issue]?",
"What are the pros and cons of [legal argument/position]?",
"What is the standard for [legal issue] in [jurisdiction]?",
"What are the key legal arguments in [legal case/issue]?",
"Provide a summary of [case name]",
"Summarize the following contract: [copy and paste contract]",
"What is the statute of limitations for [type of case] in [state or jurisdiction]?",
"Outline the steps involved in [legal process or procedure]",
"What is the significance of [case name]?"
],
'Drafting Legal Documents': [
"Draft a [legal document type] for [legal scenario]",
"Draft a [legal document type] for [party 1] and [party 2]",
"Write a [legal document type] for [legal issue]",
"Write a [legal document type] for [client name]",
"What should be included in a [legal document type]?",
"What are the standard clauses for [legal document type]?",
"What are the necessary elements for [legal document type]?",
"What are the typical terms for [legal document type]?",
"What are the recommended provisions for [legal document type]?",
"Provide a template for [legal document name]",
"What are the most common mistakes to avoid when drafting a [legal document name]?"
],
'Contract Drafting': [
"Draft a [contract type] between [party 1] and [party 2] for [consideration]",
"Draft a non-disclosure agreement (NDA) between [party 1] and [party 2]",
"Draft a confidentiality agreement between [party 1] and [party 2]",
"Draft an employment contract for [position] with [salary and benefits information]",
"Draft a service agreement between [party 1] and [party 2]",
"Draft a lease agreement for [property description]",
"Draft a purchase agreement for [item/property description]"
],
'Legal Forms and Documents': [
"Draft a power of attorney form",
"Draft a will",
"Draft a living trust",
"Draft a contract for [contract type]",
"Draft a non-disclosure agreement (NDA)",
"Draft a confidentiality agreement",
"Draft an employment contract",
"Draft a partnership agreement",
"Draft a prenuptial agreement",
"Draft a divorce agreement",
"Draft a property settlement agreement"
],
'Legal Analysis': [
"What are the strengths and weaknesses of [legal argument]?",
"What are the possible outcomes of [legal issue]?",
"What is the likelihood of [legal outcome]?",
"What is the impact of [legal issue] on [affected parties]?",
"What are the alternative solutions for [legal issue]?",
"What is the best course of action for [legal issue]?",
"What are the risks associated with [legal issue]?",
"What is the likelihood of success for [legal issue]?",
"What is the legal basis for [legal argument]?",
"What is the legal precedent for [legal argument]?",
"What are the legal arguments for and against [legal issue]?"
],
'Legal Writing':[
"Write a memo on [legal issue]",
"Write a brief on [legal issue]",
"Rephrase this clause: [clause]",
"Write an argument for [legal issue]",
"Write a legal opinion on [legal issue]",
"What is the appropriate tone for [legal writing type]?",
"What is the standard structure for [legal writing type]?",
"What are the key points to include in [legal writing type]?",
"What are the persuasive strategies for [legal writing type]?",
"What is the format for [legal writing type]?",
"Proofread the following: [copy and paste contract]",
"What are the best practices for [legal writing type]?"
],
'Client Questions':[
"What is the best way to [legal issue]?",
"What are the options for [legal issue]?",
"What is the process for [legal issue]?",
"What are the costs associated with [legal issue]?",
"What is the estimated time frame for [legal issue] resolution?",
"What is the likelihood of success for [legal issue]?",
"What are the potential consequences of [legal issue]?",
"What are the necessary steps to take for [legal issue]?",
"What are the legal requirements for [legal issue]?",
"What is the most common outcome for [legal issue]?"
],
'Legal Procedures':[
"What is the proper procedure for [legal action] in [jurisdiction]?",
"What are the necessary forms for [legal action] in [jurisdiction]?",
"What is the filing deadline for [legal action] in [jurisdiction]?",
"What is the fee for [legal action] in [jurisdiction]?",
"What is the estimated time frame for [legal action] in [jurisdiction]?",
"What is the expected outcome for [legal action] in [jurisdiction]?",
],
'Legal Terminology':[
"What is the context in which [legal term] is typically used?",
"What is the origin of [legal term]?",
"What are the synonyms of [legal term]?",
"What are the related terms to [legal term]?",
"What is the meaning of [legal term]?",
"What is the difference between [legal term 1] and [legal term 2]?",
"What is the definition of [legal term] in [jurisdiction]?",
"What is the explanation of [legal form or document]",
"What are the benefits of using [legal form or document]",
"What are the requirements for [legal form or document] in [jurisdiction]"
],
'Legal Citations':[
"What is the correct format for a [citation style] citation of [legal source]?",
"What is the [citation style] citation for [legal case]?",
"What is the [citation style] citation for [legal statute]?",
"What is the [citation style] citation for [legal regulation]?",
"What is the [citation style] citation for [legal treatise]?"
]
}
image_law = "law.png"
col1, col2 = st.columns([1,3])
with col2:
st.title('Legal Information Finder')
with col1:
st.image(image_law,width=120)
# Select a category
category = st.selectbox("Select a category", categories)
# Select an option
option = st.selectbox(f"Select an option for {category}", options[category])
# Get user input
prompt = st.text_area(f"Prompt editor", value = option)
# Generate text using OpenAI API
if st.button("Submit", type="primary"):
with st.spinner(text="Generating text..."):
text = generate_legal_content(prompt)
# st.write("Generated text:")
st.write(text)
ext = dt = datetime.now()
result_aud6 = text_to_speech(f"{text}",ext)
audio_file = open(f"temp/{result_aud6}.mp3", "rb")
audio_bytes = audio_file.read()
st.markdown(f"Your audio:")
st.audio(audio_bytes, format="audio/mp3", start_time=0)
elif selected == "Travel and Tourism":
st.subheader('AI Toolkit - Travel & Tourism.')
st.text("travel-oriented things including planning trips, setting aside budgets, suggesting places, and a lot more.")
option = st.selectbox('What would you like to do today??',
(
'Travel Guide',
'Suggest Landmarks',
'Imagine your destination',
'Plan trips',
'Detailed location suggestions'
))
if option =="Travel Guide":
prompt_adv1 = "I want you to act as a travel guide. I will write you my location and you will suggest places to visit near my location. In some cases, I will also give you the type of places I will visit. You will also suggest me places of a similar type that are close to my first location. My first suggestion request is "
prompt_adv2 = st.text_input("Enter location and purpose and a nearby places would be recommended to you.", value = "I am in London and I want to visit only museums.")
final_pmt = prompt_adv1 + prompt_adv2
elif option =="Suggest Landmarks":
place_visit = st.text_input("Enter the place of visit.")
country_visit = st.text_input("Enter the country where that place exists.")
prompt_aidas = "I'm planning on visiting " + place_visit + " in "+ country_visit + ". What are some of the landmarks I must see?"
final_pmt = prompt_aidas
elif option =='Imagine your destination':
prompt_ic = "I want you to act as my time travel guide. I will provide you with the historical period or future time I want to visit and you will suggest the best events, sights, or people to experience. Do not write explanations, simply provide suggestions and any necessary information. My first request is "
input_ic = st.text_input("Quote a request based on the example value...", value = "I want to visit the Renaissance period, can you suggest some interesting events, sights, or people for me to experience?")
final_pmt = prompt_ic + input_ic
elif option =='Plan trips':
st.subheader('Let us plan your trip!')
# Form for User Input
st.subheader('Budget')
budget = st.text_input("Enter your travel budget.", value = "$2000 dollars")
st.subheader('Destination')
destination = st.text_input('Destination', value='Eg. Dubai')
st.subheader('Arriving from')
source = st.text_input('Source', value='Eg. Mumbai')
st.subheader('Duration')
duration = st.text_input("Duration of the travel.", value = "4 days")
add_info = st.text_area('Additional Information', height=200, value='I want to visit as many places as possible! (respect time)')
final_pmt = "I have " + budget + " and travelling for " + duration + ". Plan a trip for me to " + destination + " from " + source + ". Please take into account the additional information as well which is " + add_info
elif option =='Detailed location suggestions':
st.text("Simply follow the value inside the textbox as an example...")
type_of_content = st.text_input("Prompt", value = "I'm planning on visiting the UK for 20 days. Give me 10 cities I should visit while there.")
final_pmt = type_of_content
elif option =='General travel tips':
st.text("Simply follow the value inside the textbox as an example...")
type_of_content = st.text_input("Prompt", value = "What is the best time of year to visit Hawaii?")
final_pmt = type_of_content
else:
st.text("Please select an option!")
if st.button("Submit", type="primary"):
res_box6 = st.empty()
report6 = []
# Looping over the response
try:
for resp in openai.ChatCompletion.create(model="gpt-4o",
messages=[
{
"role": "system", "content": "You are an AI Language model custom trained and created by Alpha AI. You are proficient at everytask.",
"role": "user", "content": final_pmt
}
],
max_tokens=4000,
temperature = 0.7,
presence_penalty = 0.1,
frequency_penalty = 0.1,
stream = True):
if "content" in resp["choices"][0]["delta"]:
report6.append(resp["choices"][0]["delta"]["content"])
# report.append(resp.choices[0].delta.content)
result = "".join(report6).strip()
# result = result.replace("\n", "")
res_box6.markdown(f'{result}')
else:
pass
except openai.error.APIError as e:
#Handle API error here, e.g. retry or log
print(f"OpenAI API returned an API Error: {e}")
pass
except openai.error.APIConnectionError as e:
#Handle connection error here
print(f"Failed to connect to OpenAI API: {e}")
pass
except openai.error.RateLimitError as e:
#Handle rate limit error (we recommend using exponential backoff)
print(f"OpenAI API request exceeded rate limit: {e}")
pass
elif selected == "Document Chat":
st.subheader("Chat with your document!")
st.markdown(
"""
#### π¨οΈ Chat with your PDF files π with `Conversational Buffer Memory`
> *powered by [LangChain]('https://langchain.readthedocs.io/en/latest/modules/memory.html#memory') +
[OpenAI]('https://platform.openai.com/docs/models/gpt-3-5') + [DataButton](https://www.databutton.io/)*
----
"""
)
st.markdown(
"""
`openai`
`langchain`
`tiktoken`
`pypdf`
`faiss-cpu`
---------
"""
)
# # Set up the sidebar
# st.sidebar.markdown(
# """
# ### Steps:
# 1. Upload PDF File
# 2. Enter Your Secret Key for Embeddings
# 3. Perform Q&A
# **Note : File content and API key not stored in any form.**
# """
# )
# Allow the user to upload a PDF file
uploaded_file = st.file_uploader("**Upload Your PDF File**", type=["pdf"])
if uploaded_file:
name_of_file = uploaded_file.name
doc = parse_pdf(uploaded_file)
pages = text_to_docs(doc)
if pages:
# Allow the user to select a page and view its content
with st.expander("Show Page Content", expanded=False):
page_sel = st.number_input(
label="Select Page", min_value=1, max_value=len(pages), step=1
)
pages[page_sel - 1]
# Allow the user to enter an OpenAI API key
# api = st.text_input(
# "**Enter OpenAI API Key**",
# type="password",
# placeholder="sk-",
# help="https://platform.openai.com/account/api-keys",
# )
api = openai.api_key
if api:
# Test the embeddings and save the index in a vector database
index = test_embed()
# Set up the question-answering system
qa = RetrievalQA.from_chain_type(
llm=OpenAI(openai_api_key=api),
chain_type = "map_reduce",
retriever=index.as_retriever(),
)
# Set up the conversational agent
tools = [
Tool(
name="Indian Legal QA System",
func=qa.run,
description="Useful for when you need to answer questions about the aspects asked. Input may be a partial or fully formed question.",
)
]
prefix = """Have a conversation with a human, answering the following questions as best you can based on the context and memory available.
You have access to a single tool:"""
suffix = """Begin!"
{chat_history}
Question: {input}
{agent_scratchpad}"""
prompt = ZeroShotAgent.create_prompt(
tools,
prefix=prefix,
suffix=suffix,
input_variables=["input", "chat_history", "agent_scratchpad"],
)
if "memory" not in st.session_state:
st.session_state.memory = ConversationBufferMemory(
memory_key="chat_history"
)
llm_chain = LLMChain(
llm=OpenAI(
temperature=0, openai_api_key=api, model_name="gpt-4o"
),
prompt=prompt,
)
agent = ZeroShotAgent(llm_chain=llm_chain, tools=tools, verbose=True)
agent_chain = AgentExecutor.from_agent_and_tools(
agent=agent, tools=tools, verbose=True, memory=st.session_state.memory
)
# Allow the user to enter a query and generate a response
query = st.text_input(
"**What's on your mind?**",
placeholder="Ask me anything from {}".format(name_of_file),
)
if query:
with st.spinner(
"Generating Answer to your Query : `{}` ".format(query)
):
res = agent_chain.run(query)
st.info(res, icon="π€")
# Allow the user to view the conversation history and other information stored in the agent's memory
with st.expander("History/Memory"):
st.session_state.memory