Spaces:
Running
Running
wjbmattingly
commited on
Commit
•
cf28c32
1
Parent(s):
2db2340
init
Browse files- Dockerfile +25 -0
- app/main.py +50 -0
- main.py +50 -0
- requirements.txt +5 -0
Dockerfile
ADDED
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
FROM nvidia/cuda:11.8.0-base-ubuntu22.04
|
2 |
+
|
3 |
+
# Install system dependencies
|
4 |
+
RUN apt-get update && apt-get install -y \
|
5 |
+
python3-pip \
|
6 |
+
libglib2.0-0 \
|
7 |
+
libsm6 \
|
8 |
+
libxext6 \
|
9 |
+
libxrender-dev \
|
10 |
+
libgl1-mesa-glx
|
11 |
+
|
12 |
+
# Set the working directory in the container
|
13 |
+
WORKDIR /code
|
14 |
+
|
15 |
+
# Copy the requirements file
|
16 |
+
COPY ./requirements.txt /code/requirements.txt
|
17 |
+
|
18 |
+
# Install Python dependencies
|
19 |
+
RUN pip install --no-cache-dir --upgrade -r /code/requirements.txt
|
20 |
+
|
21 |
+
# Copy the FastAPI app into the container
|
22 |
+
COPY ./app /code/app
|
23 |
+
|
24 |
+
# Command to run the FastAPI app
|
25 |
+
CMD ["uvicorn", "app.main:app", "--host", "0.0.0.0", "--port", "7860"]
|
app/main.py
ADDED
@@ -0,0 +1,50 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from fastapi import FastAPI, UploadFile, File
|
2 |
+
from fastapi.responses import JSONResponse
|
3 |
+
import subprocess
|
4 |
+
import json
|
5 |
+
import os
|
6 |
+
import tempfile
|
7 |
+
import shutil
|
8 |
+
from pydantic import BaseModel
|
9 |
+
import torch
|
10 |
+
|
11 |
+
app = FastAPI()
|
12 |
+
|
13 |
+
class LineDetectionResponse(BaseModel):
|
14 |
+
lines: list
|
15 |
+
|
16 |
+
@app.post("/detect_lines", response_model=LineDetectionResponse)
|
17 |
+
async def detect_lines(file: UploadFile = File(...)):
|
18 |
+
# Check if CUDA is available
|
19 |
+
device = "cuda" if torch.cuda.is_available() else "cpu"
|
20 |
+
|
21 |
+
# Create a temporary directory
|
22 |
+
with tempfile.TemporaryDirectory() as temp_dir:
|
23 |
+
# Save the uploaded file
|
24 |
+
temp_file_path = os.path.join(temp_dir, file.filename)
|
25 |
+
with open(temp_file_path, "wb") as buffer:
|
26 |
+
shutil.copyfileobj(file.file, buffer)
|
27 |
+
|
28 |
+
# Set up the output JSON path
|
29 |
+
lines_json_path = os.path.join(temp_dir, "lines.json")
|
30 |
+
|
31 |
+
# Run Kraken for line detection
|
32 |
+
kraken_command = f"kraken -i {temp_file_path} {lines_json_path} segment -bl --device {device}"
|
33 |
+
subprocess.run(kraken_command, shell=True, check=True)
|
34 |
+
|
35 |
+
# Load the lines from the JSON file
|
36 |
+
with open(lines_json_path, 'r') as f:
|
37 |
+
lines_data = json.load(f)
|
38 |
+
|
39 |
+
# Return the lines data
|
40 |
+
return LineDetectionResponse(lines=lines_data['lines'])
|
41 |
+
|
42 |
+
# Optionally, you can add a root endpoint for basic information
|
43 |
+
@app.get("/")
|
44 |
+
async def root():
|
45 |
+
return {"message": "Welcome to the Kraken Line Detection API"}
|
46 |
+
|
47 |
+
# To run the app with GPU support on Hugging Face Spaces, you need to use uvicorn with the following settings:
|
48 |
+
if __name__ == "__main__":
|
49 |
+
import uvicorn
|
50 |
+
uvicorn.run(app, host="0.0.0.0", port=7860, workers=1)
|
main.py
ADDED
@@ -0,0 +1,50 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from fastapi import FastAPI, UploadFile, File
|
2 |
+
from fastapi.responses import JSONResponse
|
3 |
+
import subprocess
|
4 |
+
import json
|
5 |
+
import os
|
6 |
+
import tempfile
|
7 |
+
import shutil
|
8 |
+
from pydantic import BaseModel
|
9 |
+
import torch
|
10 |
+
|
11 |
+
app = FastAPI()
|
12 |
+
|
13 |
+
class LineDetectionResponse(BaseModel):
|
14 |
+
lines: list
|
15 |
+
|
16 |
+
@app.post("/detect_lines", response_model=LineDetectionResponse)
|
17 |
+
async def detect_lines(file: UploadFile = File(...)):
|
18 |
+
# Check if CUDA is available
|
19 |
+
device = "cuda" if torch.cuda.is_available() else "cpu"
|
20 |
+
|
21 |
+
# Create a temporary directory
|
22 |
+
with tempfile.TemporaryDirectory() as temp_dir:
|
23 |
+
# Save the uploaded file
|
24 |
+
temp_file_path = os.path.join(temp_dir, file.filename)
|
25 |
+
with open(temp_file_path, "wb") as buffer:
|
26 |
+
shutil.copyfileobj(file.file, buffer)
|
27 |
+
|
28 |
+
# Set up the output JSON path
|
29 |
+
lines_json_path = os.path.join(temp_dir, "lines.json")
|
30 |
+
|
31 |
+
# Run Kraken for line detection
|
32 |
+
kraken_command = f"kraken -i {temp_file_path} {lines_json_path} segment -bl --device {device}"
|
33 |
+
subprocess.run(kraken_command, shell=True, check=True)
|
34 |
+
|
35 |
+
# Load the lines from the JSON file
|
36 |
+
with open(lines_json_path, 'r') as f:
|
37 |
+
lines_data = json.load(f)
|
38 |
+
|
39 |
+
# Return the lines data
|
40 |
+
return LineDetectionResponse(lines=lines_data['lines'])
|
41 |
+
|
42 |
+
# Optionally, you can add a root endpoint for basic information
|
43 |
+
@app.get("/")
|
44 |
+
async def root():
|
45 |
+
return {"message": "Welcome to the Kraken Line Detection API"}
|
46 |
+
|
47 |
+
# To run the app with GPU support on Hugging Face Spaces, you need to use uvicorn with the following settings:
|
48 |
+
if __name__ == "__main__":
|
49 |
+
import uvicorn
|
50 |
+
uvicorn.run(app, host="0.0.0.0", port=7860, workers=1)
|
requirements.txt
ADDED
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
fastapi
|
2 |
+
python-multipart
|
3 |
+
uvicorn
|
4 |
+
kraken
|
5 |
+
torch
|