|
from llmlingua import PromptCompressor |
|
|
|
compressor = PromptCompressor( |
|
model_name="microsoft/llmlingua-2-bert-base-multilingual-cased-meetingbank", |
|
use_llmlingua2=True |
|
) |
|
|
|
original_prompt = """John: So, um, I've been thinking about the project, you know, and I believe we need to, uh, make some changes. I mean, we want the project to succeed, right? So, like, I think we should consider maybe revising the timeline. |
|
|
|
Sarah: I totally agree, John. I mean, we have to be realistic, you know. The timeline is, like, too tight. You know what I mean? We should definitely extend it. |
|
|
|
""" |
|
|
|
results = compressor.compress_prompt_llmlingua2( |
|
original_prompt, |
|
rate=0.6, |
|
force_tokens=['\n', '.', '!', '?', ','], |
|
chunk_end_tokens=['.', '\n'], |
|
return_word_label=True, |
|
drop_consecutive=True |
|
) |
|
|
|
print(results.keys()) |
|
print(f"Compressed prompt: {results['compressed_prompt']}") |
|
|