Paulus Michael Leang
commited on
Commit
•
3aa9ef0
1
Parent(s):
2bd5d74
Hotfix
Browse files- Dockerfile +9 -4
- app.py +1 -2
Dockerfile
CHANGED
@@ -1,16 +1,21 @@
|
|
1 |
# Read the doc: https://huggingface.co/docs/hub/spaces-sdks-docker
|
2 |
# you will also find guides on how best to write your Dockerfile
|
3 |
|
4 |
-
FROM python:3.
|
5 |
|
6 |
RUN useradd -m -u 1000 user
|
7 |
-
USER
|
8 |
ENV PATH="/home/user/.local/bin:$PATH"
|
9 |
|
10 |
-
WORKDIR /
|
|
|
|
|
|
|
|
|
|
|
11 |
|
12 |
COPY --chown=user ./requirements.txt requirements.txt
|
13 |
RUN pip install --no-cache-dir --upgrade -r requirements.txt
|
14 |
|
15 |
COPY --chown=user . /app
|
16 |
-
CMD ["uvicorn", "app:app", "--host", "0.0.0.0", "--port", "7860"]
|
|
|
1 |
# Read the doc: https://huggingface.co/docs/hub/spaces-sdks-docker
|
2 |
# you will also find guides on how best to write your Dockerfile
|
3 |
|
4 |
+
FROM python:3.12
|
5 |
|
6 |
RUN useradd -m -u 1000 user
|
7 |
+
USER root
|
8 |
ENV PATH="/home/user/.local/bin:$PATH"
|
9 |
|
10 |
+
WORKDIR /
|
11 |
+
|
12 |
+
RUN apt-get update && apt-get install -y \
|
13 |
+
libgl1 \
|
14 |
+
libglib2.0-0 \
|
15 |
+
&& rm -rf /var/lib/apt/lists/*
|
16 |
|
17 |
COPY --chown=user ./requirements.txt requirements.txt
|
18 |
RUN pip install --no-cache-dir --upgrade -r requirements.txt
|
19 |
|
20 |
COPY --chown=user . /app
|
21 |
+
CMD ["uvicorn", "app:app", "--host", "0.0.0.0", "--port", "7860"]
|
app.py
CHANGED
@@ -1,5 +1,4 @@
|
|
1 |
from fastapi import FastAPI
|
2 |
-
from transformers import AutoModelForCausalLM, AutoTokenizer
|
3 |
from pydantic import BaseModel
|
4 |
from langchain_ollama import OllamaLLM
|
5 |
import uvicorn
|
@@ -18,7 +17,7 @@ def greet_json():
|
|
18 |
|
19 |
@app.post("/generate_chat")
|
20 |
def generateAi(request: ChatRequest):
|
21 |
-
result = llmModel.invoke(input=
|
22 |
|
23 |
return {"answer": result}
|
24 |
|
|
|
1 |
from fastapi import FastAPI
|
|
|
2 |
from pydantic import BaseModel
|
3 |
from langchain_ollama import OllamaLLM
|
4 |
import uvicorn
|
|
|
17 |
|
18 |
@app.post("/generate_chat")
|
19 |
def generateAi(request: ChatRequest):
|
20 |
+
result = llmModel.invoke(input=request.prompt)
|
21 |
|
22 |
return {"answer": result}
|
23 |
|