Spaces:
Sleeping
Sleeping
from fastapi import FastAPI | |
from fastapi.middleware.cors import CORSMiddleware | |
from src.TinyLLama import text_generation | |
from src.classmodels.inputforgeneration import InputForGeneration | |
from src.classmodels.generatedoutput import GeneratedOutput | |
import uvicorn | |
app = FastAPI() | |
origins = ["*"] | |
app.add_middleware( | |
CORSMiddleware, | |
allow_origins=origins, | |
allow_credentials=True, | |
allow_methods=["*"], | |
allow_headers=["*"] | |
) | |
def warmupGenerationModel(): | |
warmupModelMessage = text_generation.warmupTextGenerationModel() | |
return warmupModelMessage | |
async def generateTextUsingLLama(inputSettings:InputForGeneration) -> GeneratedOutput: | |
try: | |
output = text_generation.generateText(inputSettings) | |
if output is not None: | |
return GeneratedOutput(status_code=200, generated_text=output) | |
else: | |
return GeneratedOutput(status_code=400, message="error when generating text") | |
except Exception as e: | |
return GeneratedOutput(status_code=500, message=str(e)) | |
if __name__ == "__main__": | |
uvicorn.run(app=app) | |