Spaces:
Runtime error
Runtime error
# import the required libraries | |
import gradio as gr | |
import json | |
from llmlingua import PromptCompressor | |
import tiktoken | |
# load the pre-trained models | |
compressors = { | |
"xlm-roberta-large": PromptCompressor( | |
model_name="microsoft/llmlingua-2-xlm-roberta-large-meetingbank", | |
use_llmlingua2=True, | |
device_map="cpu" | |
), | |
"mbert-base": PromptCompressor( | |
model_name="microsoft/llmlingua-2-bert-base-multilingual-cased-meetingbank", | |
use_llmlingua2=True, | |
device_map="cpu" | |
) | |
} | |
tokenizer = tiktoken.encoding_for_model("gpt-4") | |
with open('data/examples_MeetingBank.json', 'r') as f: | |
examples = json.load(f) # list of examples, each example is a list of 3 group of values: idx (), original prompt (str), QA pairs (list of list of 2 strings) | |
original_prompt_list = [[s["original_prompt"]] for s in examples] | |
qa_list = [s["QA_pairs"] for s in examples] | |
def compress(original_prompt, compression_rate, base_model="xlm-roberta-large", force_tokens=['\n'], chunk_end_tokens=['.', '\n']): | |
if '\\n' in force_tokens: | |
idx = force_tokens.index('\\n') | |
force_tokens[idx] = '\n' | |
compressor = compressors.get(base_model, compressors["mbert-base"]) | |
results = compressor.compress_prompt_llmlingua2( | |
original_prompt, | |
rate=compression_rate, | |
force_tokens=force_tokens, | |
chunk_end_tokens=chunk_end_tokens, | |
return_word_label=True, | |
drop_consecutive=True | |
) | |
compressed_prompt = results["compressed_prompt"] | |
n_word_compressed = len(tokenizer.encode(compressed_prompt)) | |
word_sep = "\t\t|\t\t" | |
label_sep = " " | |
lines = results["fn_labeled_original_prompt"].split(word_sep) | |
preserved_tokens = [] | |
for line in lines: | |
word, label = line.split(label_sep) | |
preserved_tokens.append((word, '+') if label == '1' else (word, None)) | |
return compressed_prompt, preserved_tokens, n_word_compressed | |
title = "LLMLingua-2-cpu" | |
with gr.Blocks(title=title) as app: | |
with gr.Row(): | |
with gr.Column(scale=3): | |
original_prompt = gr.Textbox(label="Original Prompt", lines=10, max_lines=10, interactive=True) | |
compressed_prompt = gr.Textbox(value='', label="Compressed Prompt", lines=10, max_lines=10, interactive=False) | |
with gr.Column(scale=1): | |
base_model = gr.Radio(["mbert-base", "xlm-roberta-large"], label="Base Model", value="mbert-base", interactive=True) | |
force_tokens = gr.Dropdown(['\\n', '.', '!', '?', ','], | |
label="Tokens to Preserve", | |
value=['\\n', '.', '!', '?', ','], | |
multiselect=True, | |
interactive=True) | |
compression_rate = gr.Slider(minimum=0.1, maximum=1.0, step=0.1, value=0.7, label="Compression rate", info="after compr. / befor compr.", interactive=True) | |
n_word_original = gr.Textbox(lines=1, label="Original (GPT-4 Tokens)", interactive=False, value=len(tokenizer.encode(original_prompt_text))) | |
n_word_compressed = gr.Textbox(lines=1, label="Compressed (GPT-4 Tokens)", interactive=False) | |
button = gr.Button("⚡Click to Compress") | |
with gr.Accordion(label="Compression Details", open=False): | |
diff_text = gr.HighlightedText(label="Diff", combine_adjacent=False, show_legend=True, color_map={"+": "green"}) | |
original_prompt.change(lambda x: len(tokenizer.encode(x)), inputs=[original_prompt], outputs=[n_word_original]) | |
original_prompt.change(lambda x: ("", "", []), inputs=[original_prompt], outputs=[compressed_prompt, n_word_compressed, diff_text]) | |
button.click(fn=compress, | |
inputs=[original_prompt, compression_rate, base_model, force_tokens], | |
outputs=[compressed_prompt, diff_text, n_word_compressed]) | |
app.queue(max_size=10, api_open=False).launch(show_api=False) |