File size: 910 Bytes
c12ddc3
 
 
 
 
3e0e8cb
 
 
9bf2007
07a4fb2
 
 
bb0f9ac
07a4fb2
cc94792
07a4fb2
da45991
 
2d5d217
 
e48a0c0
2d5d217
c12ddc3
 
 
fafbe92
 
 
 
 
 
 
 
 
 
 
 
c12ddc3
9bf2007
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
FROM python:3.10.9

# Set the working directory to /
WORKDIR /

# Copy the current directory contents into the container at .
COPY . .

RUN pip install transformers

# Install pytorch starts

RUN pip install torch

#Install pytorch ends

RUN pip install accelerate

# Install hugging face hub to download llama2 model
RUN pip install --upgrade huggingface_hub

RUN CMAKE_ARGS="-DLLAMA_BLAS=ON -DLLAMA_BLAS_VENDOR=OpenBLAS" pip install 'llama-cpp-python[server]' --upgrade --force-reinstall --no-cache-dir 
# Install requirements.txt 
RUN pip install --no-cache-dir --upgrade -r /requirements.txt

# write access
RUN useradd -m -u 1000 user

USER user

ENV HOME=/home/user \
	PATH=/home/user/.local/bin:$PATH

WORKDIR $HOME/app

COPY --chown=user . $HOME/app

# Start the FastAPI app on port 7860, the default port expected by Spaces
CMD ["uvicorn", "question_paper:app", "--host", "0.0.0.0", "--port", "7860"]