kraken-api / app /main.py
wjbmattingly's picture
init
cf28c32
raw
history blame
No virus
1.71 kB
from fastapi import FastAPI, UploadFile, File
from fastapi.responses import JSONResponse
import subprocess
import json
import os
import tempfile
import shutil
from pydantic import BaseModel
import torch
app = FastAPI()
class LineDetectionResponse(BaseModel):
lines: list
@app.post("/detect_lines", response_model=LineDetectionResponse)
async def detect_lines(file: UploadFile = File(...)):
# Check if CUDA is available
device = "cuda" if torch.cuda.is_available() else "cpu"
# Create a temporary directory
with tempfile.TemporaryDirectory() as temp_dir:
# Save the uploaded file
temp_file_path = os.path.join(temp_dir, file.filename)
with open(temp_file_path, "wb") as buffer:
shutil.copyfileobj(file.file, buffer)
# Set up the output JSON path
lines_json_path = os.path.join(temp_dir, "lines.json")
# Run Kraken for line detection
kraken_command = f"kraken -i {temp_file_path} {lines_json_path} segment -bl --device {device}"
subprocess.run(kraken_command, shell=True, check=True)
# Load the lines from the JSON file
with open(lines_json_path, 'r') as f:
lines_data = json.load(f)
# Return the lines data
return LineDetectionResponse(lines=lines_data['lines'])
# Optionally, you can add a root endpoint for basic information
@app.get("/")
async def root():
return {"message": "Welcome to the Kraken Line Detection API"}
# To run the app with GPU support on Hugging Face Spaces, you need to use uvicorn with the following settings:
if __name__ == "__main__":
import uvicorn
uvicorn.run(app, host="0.0.0.0", port=7860, workers=1)