Sohag1's picture
Create app.py
78580d3
from transformers import GPT2TokenizerFast
from typing import List
import os
from time import time,sleep
import textwrap
import gradio as gr
import re
import openai
openai.api_key = "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"
def gpt3_completion(prompt, engine='text-davinci-002', temp=0.6, top_p=1.0, tokens=2000, freq_pen=0.25, pres_pen=0.0, stop=['<<END>>']):
max_retry = 5
retry = 0
while True:
try:
response = openai.Completion.create(
engine=engine,
prompt=prompt,
temperature=temp,
max_tokens=tokens,
top_p=top_p,
frequency_penalty=freq_pen,
presence_penalty=pres_pen,
stop=stop)
text = response['choices'][0]['text'].strip()
text = re.sub('\s+', ' ', text)
# filename = '%s_gpt3.txt' % time()
# with open('gpt3_logs/%s' % filename, 'w') as outfile:
# outfile.write('PROMPT:\n\n' + prompt + '\n\n==========\n\nRESPONSE:\n\n' + text)
return text
except Exception as oops:
retry += 1
if retry >= max_retry:
return "GPT3 error: %s" % oops
print('Error communicating with OpenAI:', oops)
sleep(1)
def summarizer(input):
chunks = textwrap.wrap(input, 4000)
result = list()
s=""
count = 0
for chunk in chunks:
prompt = f'''Write a concise summary of the following:
{chunk}
CONCISE SUMMARY: '''
summary = gpt3_completion(prompt)
count=count+1
s=s+'\nSummarization of chunk number : '+str(count)+ '\n'
s=s+summary
s=s+'\n'
#print(s)
return s
interface = gr.Interface(
fn=summarizer,
inputs=gr.inputs.Textbox(lines=5,placeholder="Enter your Text"),
outputs="text",
live=True,
description = "The key benefit to zero shot summarization is the ease at which you can get the system off the ground. If you understand building prompts at a high level you can reach fairly good accuracy on a ton of different use cases",
examples=[['''Formed in 2015 as a nonprofit, OpenAI developed GPT-3 as one of its research projects. It aimed to tackle the larger goals of promoting and developing "friendly AI" in a way that benefits humanity as a whole. The first version of GPT was released in 2018 and contained 117 million parameters. The second version of the model, GPT-2, was released in 2019 with around 1.5 billion parameters. As the latest version, GPT-3 jumps over the last model by a huge margin with more than 175 billion parameters -- more than 100 times its predecessor and 10 times more than comparable programs. Earlier pre-trained models -- such as BERT -- demonstrated the viability of the text generator method and showed the power that neural networks have to generate long strings of text that previously seemed unachievable. OpenAI released access to the model incrementally to see how it would be used and to avoid potential problems. The model was released during a beta period that required users apply to use the model, initially at no cost. However, the beta period ended in October 2020, and the company released a pricing model based on a tiered credit-based system that ranges from a free access level for 100,000 credits or three months of access to hundreds of dollars per month for larger-scale access. In 2020, Microsoft invested $1 billion in OpenAI to become the exclusive licensee of the GPT-3 model. This means that Microsoft has sole access to GPT-3's underlying model. ChatGPT launched in November 2022 and was free for public use during its research phase. This brought GPT-3 more mainstream attention than it previously had, giving many nontechnical users an opportunity to try the technology'''],
['''GPT-3, or the third-generation Generative Pre-trained Transformer, is a neural network machine learning model trained using internet data to generate any type of text. Developed by OpenAI, it requires a small amount of input text to generate large volumes of relevant and sophisticated machine-generated text. GPT-3's deep learning neural network is a model with over 175 billion machine learning parameters. To put things into scale, the largest trained language model before GPT-3 was Microsoft's Turing Natural Language Generation (NLG) model, which had 10 billion parameters. As of early 2021, GPT-3 is the largest neural network ever produced. As a result, GPT-3 is better than any prior model for producing text that is convincing enough to seem like a human could have written it. As with any automation, GPT-3 would be able to handle quick repetitive tasks, enabling humans to handle more complex tasks that require a higher degree of critical thinking. There are many situations where it is not practical or efficient to enlist a human to generate text output, or there might be a need for automatic text generation that seems human. For example, customer service centers can use GPT-3 to answer customer questions or support chatbots; sales teams can use it to connect with potential customers. Marketing teams can write copy using GPT-3. This type of content also requires fast production and is low risk, meaning, if there is a mistake in the copy, the consequences are relatively minor''']
]
)
interface.launch(inline=False)