|
from langchain_community.llms.huggingface_pipeline import HuggingFacePipeline |
|
from langchain.chains.summarize import load_summarize_chain |
|
from langchain.prompts import PromptTemplate |
|
|
|
|
|
def get_refine_chain(pipeline_or_llm, model_type): |
|
if model_type == "openai": |
|
llm = pipeline_or_llm |
|
question_template = """Write a concise summary of the following: |
|
{text} |
|
CONCISE SUMMARY:""" |
|
question_prompt = PromptTemplate.from_template(question_template) |
|
refine_template = """Your job is to produce a final summary |
|
We have provided an existing summary up to a certain point: {existing_answer} |
|
We have the opportunity to refine the existing summary (only if needed) with some more context below. |
|
------------ |
|
{text} |
|
------------ |
|
Given the new context, refine the original summary in bullets. If the context isn't useful return the original summary.""" |
|
refine_prompt = PromptTemplate.from_template(refine_template) |
|
|
|
else: |
|
question_prompt = PromptTemplate.from_template(template="{text}") |
|
refine_prompt = PromptTemplate.from_template(template= "{existing_answer}\n{text}") |
|
llm = HuggingFacePipeline(pipeline=pipeline_or_llm) |
|
|
|
|
|
refine_chain = load_summarize_chain( |
|
llm=llm, |
|
chain_type="refine", |
|
question_prompt=question_prompt, |
|
refine_prompt=refine_prompt, |
|
return_intermediate_steps=False, |
|
input_key="input_documents", |
|
output_key="output_text", |
|
verbose=True, |
|
) |
|
return refine_chain |
|
|
|
|
|
|
|
|