Spaces:
Running
Running
File size: 1,714 Bytes
cf28c32 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 |
from fastapi import FastAPI, UploadFile, File
from fastapi.responses import JSONResponse
import subprocess
import json
import os
import tempfile
import shutil
from pydantic import BaseModel
import torch
app = FastAPI()
class LineDetectionResponse(BaseModel):
lines: list
@app.post("/detect_lines", response_model=LineDetectionResponse)
async def detect_lines(file: UploadFile = File(...)):
# Check if CUDA is available
device = "cuda" if torch.cuda.is_available() else "cpu"
# Create a temporary directory
with tempfile.TemporaryDirectory() as temp_dir:
# Save the uploaded file
temp_file_path = os.path.join(temp_dir, file.filename)
with open(temp_file_path, "wb") as buffer:
shutil.copyfileobj(file.file, buffer)
# Set up the output JSON path
lines_json_path = os.path.join(temp_dir, "lines.json")
# Run Kraken for line detection
kraken_command = f"kraken -i {temp_file_path} {lines_json_path} segment -bl --device {device}"
subprocess.run(kraken_command, shell=True, check=True)
# Load the lines from the JSON file
with open(lines_json_path, 'r') as f:
lines_data = json.load(f)
# Return the lines data
return LineDetectionResponse(lines=lines_data['lines'])
# Optionally, you can add a root endpoint for basic information
@app.get("/")
async def root():
return {"message": "Welcome to the Kraken Line Detection API"}
# To run the app with GPU support on Hugging Face Spaces, you need to use uvicorn with the following settings:
if __name__ == "__main__":
import uvicorn
uvicorn.run(app, host="0.0.0.0", port=7860, workers=1) |