Spaces:
Runtime error
Runtime error
daniellefranca96
commited on
Commit
•
3f12a86
1
Parent(s):
3bbe5fd
melhora interface e prompt
Browse files- app.py +60 -11
- style_scribble.py +80 -0
app.py
CHANGED
@@ -1,35 +1,84 @@
|
|
1 |
import os
|
2 |
|
3 |
import gradio as gr
|
4 |
-
from
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
5 |
|
6 |
|
7 |
def process(model, key, repo, example, prompt):
|
8 |
set_key(key)
|
9 |
-
generate_text =
|
10 |
if model == "HugginFaceHub":
|
11 |
model = repo
|
12 |
generate_text.set_imp_llm(model)
|
13 |
return generate_text.run()
|
14 |
|
15 |
|
16 |
-
|
17 |
def set_key(key):
|
18 |
os.environ['OPENAI_API_KEY'] = key
|
19 |
os.environ['HUGGINGFACEHUB_API_TOKEN'] = key
|
20 |
os.environ['ANTHROPIC_API_KEY'] = key
|
21 |
|
22 |
|
23 |
-
title = "StyleScribble Demo"
|
24 |
description = "This is a demo of StyleScribble the AI powered app that generates text in your writing style, " \
|
25 |
"you can learn more and sign-up for full launch here: https://stylescribble.fly.dev"
|
26 |
|
27 |
-
|
28 |
-
|
29 |
-
|
30 |
-
|
31 |
-
|
32 |
-
|
33 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
34 |
|
35 |
demo.launch()
|
|
|
1 |
import os
|
2 |
|
3 |
import gradio as gr
|
4 |
+
from style_scribble import StyleScribble
|
5 |
+
|
6 |
+
seed_text = "To serve the intricate and varied demands of image editing, precise and flexible manipulation " \
|
7 |
+
"of image content is indispensable. Recently, DragGAN has achieved impressive editing results" \
|
8 |
+
" through point-based manipulation. However, we have observed that DragGAN struggles with miss " \
|
9 |
+
"tracking, where DragGAN encounters difficulty in effectively tracking the desired handle points, " \
|
10 |
+
"and ambiguous tracking, where the tracked points are situated within other regions that bear " \
|
11 |
+
"resemblance to the handle points. To deal with the above issues, we propose FreeDrag, which adopts " \
|
12 |
+
"a feature-oriented approach to free the burden on point tracking within the point-oriented methodology" \
|
13 |
+
" of DragGAN. The FreeDrag incorporates adaptive template features, line search, and fuzzy localization " \
|
14 |
+
"techniques to perform stable and efficient point-based image editing. Extensive experiments demonstrate" \
|
15 |
+
" that our method is superior to the DragGAN and enables stable point-based editing in challenging " \
|
16 |
+
"scenarios with similar structures, fine details, or under multi-point targets."
|
17 |
+
|
18 |
+
prompt_example = "Write a story about a girl participating in a dancing competition"
|
19 |
|
20 |
|
21 |
def process(model, key, repo, example, prompt):
|
22 |
set_key(key)
|
23 |
+
generate_text = StyleScribble(example=example, prompt=prompt)
|
24 |
if model == "HugginFaceHub":
|
25 |
model = repo
|
26 |
generate_text.set_imp_llm(model)
|
27 |
return generate_text.run()
|
28 |
|
29 |
|
|
|
30 |
def set_key(key):
|
31 |
os.environ['OPENAI_API_KEY'] = key
|
32 |
os.environ['HUGGINGFACEHUB_API_TOKEN'] = key
|
33 |
os.environ['ANTHROPIC_API_KEY'] = key
|
34 |
|
35 |
|
|
|
36 |
description = "This is a demo of StyleScribble the AI powered app that generates text in your writing style, " \
|
37 |
"you can learn more and sign-up for full launch here: https://stylescribble.fly.dev"
|
38 |
|
39 |
+
with gr.Blocks(css=".no-border {border: none !important;} center: {justify-content: center;}") as demo:
|
40 |
+
with gr.Box(elem_classes="no-border"):
|
41 |
+
with gr.Row():
|
42 |
+
with gr.Box(elem_classes="no-border"):
|
43 |
+
gr.Markdown(
|
44 |
+
f"""
|
45 |
+
# StyleScribble Demo
|
46 |
+
{description}
|
47 |
+
""", elem_classes="text")
|
48 |
+
with gr.Row():
|
49 |
+
with gr.Box(elem_classes="no-border"):
|
50 |
+
example = gr.Button(value="Example")
|
51 |
+
with gr.Row():
|
52 |
+
with gr.Box(elem_classes="no-border"):
|
53 |
+
model_c = gr.Dropdown(choices=["GPT3", "GPT4", "Claude", "HugginFaceHub"], label="Model",
|
54 |
+
value="GPT3")
|
55 |
+
key_c = gr.Textbox(label="API Key")
|
56 |
+
repo_c = gr.Textbox(label="HF Repo", visible=False)
|
57 |
+
example_c = gr.Textbox(label="Paste here a style of text you want to imitate", lines=15)
|
58 |
+
prompt_c = gr.Textbox(label="Write here the desription of the text you want to generate", lines=5)
|
59 |
+
with gr.Box(elem_classes="no-border"):
|
60 |
+
output = gr.Textbox(label="Generated Text:", lines=34, show_copy_button=True)
|
61 |
+
with gr.Row():
|
62 |
+
with gr.Box(elem_classes="no-border center"):
|
63 |
+
with gr.Row():
|
64 |
+
with gr.Column():
|
65 |
+
with gr.Row():
|
66 |
+
gr.ClearButton([key_c, repo_c, example_c, prompt_c, output])
|
67 |
+
run = gr.Button(variant="primary")
|
68 |
+
with gr.Column():
|
69 |
+
pass
|
70 |
+
|
71 |
+
|
72 |
+
def show_example():
|
73 |
+
return [gr.update(value=seed_text), gr.update(value=prompt_example)]
|
74 |
+
|
75 |
+
|
76 |
+
def show_repo_id(model):
|
77 |
+
return gr.update(visible=model == "HugginFaceHub")
|
78 |
+
|
79 |
+
|
80 |
+
run.click(fn=process, inputs=[model_c, key_c, repo_c, example_c, prompt_c], outputs=output)
|
81 |
+
example.click(show_example, [], [example_c, prompt_c])
|
82 |
+
model_c.select(show_repo_id, inputs=[model_c], outputs=[repo_c])
|
83 |
|
84 |
demo.launch()
|
style_scribble.py
ADDED
@@ -0,0 +1,80 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from langchain.base_language import BaseLanguageModel
|
2 |
+
from langchain.chains import LLMChain, SequentialChain
|
3 |
+
from langchain.chat_models import ChatAnthropic
|
4 |
+
from langchain.chat_models import ChatOpenAI
|
5 |
+
from langchain.llms import HuggingFaceHub
|
6 |
+
from langchain.prompts import (
|
7 |
+
PromptTemplate,
|
8 |
+
ChatPromptTemplate,
|
9 |
+
SystemMessagePromptTemplate,
|
10 |
+
HumanMessagePromptTemplate,
|
11 |
+
)
|
12 |
+
|
13 |
+
|
14 |
+
class StyleScribble:
|
15 |
+
example: str
|
16 |
+
prompt: str
|
17 |
+
llm: BaseLanguageModel
|
18 |
+
|
19 |
+
def __init__(self, example=None, prompt=None, llm=None):
|
20 |
+
self.example = example
|
21 |
+
self.prompt = prompt
|
22 |
+
self.llm = llm
|
23 |
+
|
24 |
+
def set_imp_llm(self, model):
|
25 |
+
if model == 'GPT3':
|
26 |
+
self.llm = ChatOpenAI(model_name="gpt-3.5-turbo-16k")
|
27 |
+
elif model == "GPT4":
|
28 |
+
self.llm = ChatOpenAI(model_name="gpt-4")
|
29 |
+
elif model == "Claude":
|
30 |
+
self.llm = ChatAnthropic()
|
31 |
+
else:
|
32 |
+
self.llm = HuggingFaceHub(repo_id=model)
|
33 |
+
|
34 |
+
def run(self):
|
35 |
+
return self.process()
|
36 |
+
|
37 |
+
def process(self):
|
38 |
+
seq_chain = SequentialChain(
|
39 |
+
chains=[self.get_extract_tone_chain(), self.get_generate_text_chain(self.prompt),
|
40 |
+
self.get_apply_style_chain()],
|
41 |
+
input_variables=["text"], verbose=True)
|
42 |
+
result = seq_chain({'text': self.example, "style": ""})
|
43 |
+
return str(result.get('result'))
|
44 |
+
|
45 |
+
def create_chain(self, chat_prompt, output_key):
|
46 |
+
return LLMChain(llm=self.llm,
|
47 |
+
prompt=chat_prompt, output_key=output_key)
|
48 |
+
|
49 |
+
def get_extract_tone_chain(self):
|
50 |
+
template = """Building upon the nuances and distinctive traits in the sample text, establish
|
51 |
+
a comprehensive style guide that encapsulates the unique tone and writing style present in the sample.
|
52 |
+
This guide should focus on compelling tactics that foster a sense of connection between readers and
|
53 |
+
the content. Refrain from discussing the specific theme of the sample text or using it as a direct example.
|
54 |
+
Instead, formulate your analysis in such a way that it remains abstract, facilitating its application to any
|
55 |
+
other text that might be inspired by or originate from the sample. This abstract analysis will enable writers to adopt the essence of the style while allowing for versatility across various themes or topics..
|
56 |
+
"""
|
57 |
+
system_message_prompt = SystemMessagePromptTemplate.from_template(template)
|
58 |
+
human_template = "{text}"
|
59 |
+
human_message_prompt = HumanMessagePromptTemplate.from_template(human_template)
|
60 |
+
chat_prompt = ChatPromptTemplate.from_messages([system_message_prompt, human_message_prompt])
|
61 |
+
|
62 |
+
return self.create_chain(chat_prompt, "style")
|
63 |
+
|
64 |
+
def get_generate_text_chain(self, prompt):
|
65 |
+
template = """Generate a text following the user_request(use same language of the request):
|
66 |
+
{user_request}
|
67 |
+
""".replace("{user_request}", prompt)
|
68 |
+
return self.create_chain(PromptTemplate.from_template(template),
|
69 |
+
"generated_text")
|
70 |
+
|
71 |
+
def get_apply_style_chain(self):
|
72 |
+
template = """STYLE:
|
73 |
+
{style}
|
74 |
+
REWRITE THE TEXT BELLOW APPLYING THE STYLE GUIDE ABOVE(use same language of the request):
|
75 |
+
{generated_text}
|
76 |
+
"""
|
77 |
+
|
78 |
+
prompt = PromptTemplate.from_template(template=template)
|
79 |
+
prompt.partial(style="")
|
80 |
+
return self.create_chain(prompt, "result")
|