TitleDescGen / app.py
ravithejads's picture
Update app.py
346b8d4
raw
history blame contribute delete
No virus
4.1 kB
import numpy as np
import gradio as gr
import openai
def titledescriptiongenerator(text, openaikey, style_ = "viral", media = "Twitter"):
if not text:
return "Enter Some text to give facts"
openai.api_key = openaikey
prompt = "Generate " + str(style_) + " " + str(media) + " post title for the text.\n" + text + "." + "\n\n"
response = openai.Completion.create(
model="text-davinci-003",
prompt = prompt,
temperature=0.7,
max_tokens=256,
top_p=1,
frequency_penalty=0,
presence_penalty=0
)
title = response['choices'][0]['text']
prompt = "Generate " + str(style_) + " " + str(media) + " post short description for the text.\n" + text + "." + "\n\n"
response = openai.Completion.create(
model="text-davinci-003",
prompt = prompt,
temperature=0.7,
max_tokens=256,
top_p=1,
frequency_penalty=0,
presence_penalty=0
)
description = response['choices'][0]['text']
return title, description
callback = gr.CSVLogger()
with gr.Blocks() as demo:
gr.Markdown(
"""
<h1><center><b>TitleDescGen</center></h1>
""")
gr.Markdown(
"""
This app generates a VIRAL/ FORMAL Title and Short description for your social media post. It uses OpenAI GPT3 in the backend, get your
<a href = "https://beta.openai.com/account/api-keys">Openai key here</a> \n
""")
gr.Markdown(
"""
**To use this space effectively**
<br>- Enter your text in the input box or select one of the examples at the bottom.
<br>- Use dropdown to select the style of title and description - Viral/ Formal
<br>- Use dropdown to select the social media - Twitter/ Linkedin/ Blog post
<br>- Enter your openai key </br>
"""
)
with gr.Row():
with gr.Column():
text = gr.Textbox(lines = 5, label = "Enter text")
style_ = gr.Dropdown(["formal", "viral"],label="Click here to select style of the title/ description", value = "viral")
media = gr.Dropdown(["Twitter", "Linkedin", "Blog"],label="Click here to select media", value = "Twitter")
openaikey = gr.Textbox(lines = 1, label = "Enter OpenAI Key")
text_button = gr.Button("Submit")
with gr.Column():
title_output = gr.Textbox(label = "Title")
description_output = gr.Textbox(label = "Short Description")
flag_btn = gr.Button("Flag")
# This needs to be called at some point prior to the first call to callback.flag()
callback.setup([text, openaikey, style_, media, title_output, description_output], "flagged_data_points")
text_button.click(titledescriptiongenerator, inputs=[text, openaikey, style_, media], outputs= [title_output, description_output] )
# We can choose which components to flag -- in this case, we'll flag all of them
flag_btn.click(lambda *args: callback.flag(args), [text, openaikey, style_, media, title_output, description_output], None, preprocess=False)
gr.Examples([["Language model (LM) pretraining can learn various knowledge from text corpora, helping downstream tasks. However, existing methods such as BERT model a single document, and do not capture dependencies or knowledge that span across documents. In this work, we propose LinkBERT, an LM pretraining method that leverages links between documents, e.g., hyperlinks. Given a text corpus, we view it as a graph of documents and create LM inputs by placing linked documents in the same context. We then pretrain the LM with two joint self-supervised objectives: masked language modeling and our new proposal, document relation prediction. We show that LinkBERT outperforms BERT on various downstream tasks across two domains: the general domain (pretrained on Wikipedia with hyperlinks) and biomedical domain (pretrained on PubMed with citation links). LinkBERT is especially effective for multi-hop reasoning and few-shot QA (+5% absolute improvement on HotpotQA and TriviaQA), and our biomedical LinkBERT sets new states of the art on various BioNLP tasks (+7% on BioASQ and USMLE)."]],
inputs = [text])
demo.launch()