Spaces:
Sleeping
Sleeping
from fastapi import FastAPI | |
from pydantic import BaseModel | |
from transformers import GPT2Tokenizer, GPT2Model | |
from langchain.prompts import PromptTemplate | |
app = FastAPI() | |
tokenizer = GPT2Tokenizer.from_pretrained('gpt2') | |
model = GPT2Model.from_pretrained('gpt2') | |
class TextRequest(BaseModel): | |
text: str | |
def preprocess_text(text: str): | |
return text.lower() | |
def classify_text(question: str): | |
prompt_template = PromptTemplate(template="Answer the following question and classify it: {question}", input_variables = ["question"], output_variables=["answer", "classification"]) | |
# Model loading | |
format_prompt = prompt_template.format(question=question) | |
encoded_input = tokenizer(format_prompt, return_tensors='pt') | |
output = model(encoded_input) | |
# chain = LLMChain(llm=llm, prompt=prompt_template, verbose=True) | |
# response = chain({"question": question}) | |
return output | |
async def classify_text_endpoint(request: TextRequest): | |
preprocessed_text = preprocess_text(request.text) | |
response = classify_text(preprocessed_text) | |
answer = response['text'] | |
return {"answer": answer} |