from transformers import AutoTokenizer, AutoModelForSeq2SeqLM from langchain.prompts import PromptTemplate tokenizer = AutoTokenizer.from_pretrained("facebook/bart-large-cnn") model = AutoModelForSeq2SeqLM.from_pretrained("facebook/bart-large-cnn") def generate_answer(context): prompt_template = PromptTemplate(template="Summarise the following context: {context}", input_variables=["context"], output_variables=["answer"]) # Model loading format_prompt = prompt_template.format(context=context) encoded_input = tokenizer(format_prompt, return_tensors='pt') # Run the model output = model.generate(**encoded_input) # Use generate method for text generation # Decode the model output to text decoded_output = tokenizer.decode(output[0]) return decoded_output