gguf-my-lora / Dockerfile
ngxson's picture
ngxson HF staff
init
57c7ce1
raw
history blame
1.27 kB
FROM python:3.10.13-slim-bullseye
ENV DEBIAN_FRONTEND=noninteractive
RUN apt-get update && \
apt-get upgrade -y && \
apt-get install -y --no-install-recommends \
git \
git-lfs \
wget \
curl \
# python build dependencies \
build-essential
RUN useradd -m -u 1000 user
USER user
ENV HOME=/home/user \
PATH=/home/user/.local/bin:${PATH}
WORKDIR ${HOME}/app
RUN pip install --no-cache-dir -U pip setuptools wheel && \
pip install "huggingface-hub" "hf-transfer" "gradio[oauth]>=4.28.0" "gradio_huggingfacehub_search==0.0.7" "APScheduler"
COPY --chown=1000 . ${HOME}/app
# TODO: revert once the PR is merged
# RUN git clone https://github.com/ggerganov/llama.cpp --depth 1
RUN git clone https://github.com/ngxson/llama.cpp -b xsn/lora_convert_base_is_optional --depth 1
RUN pip install -r llama.cpp/requirements.txt
ENV PYTHONPATH=${HOME}/app \
PYTHONUNBUFFERED=1 \
HF_HUB_ENABLE_HF_TRANSFER=1 \
GRADIO_ALLOW_FLAGGING=never \
GRADIO_NUM_PORTS=1 \
GRADIO_SERVER_NAME=0.0.0.0 \
GRADIO_THEME=huggingface \
TQDM_POSITION=-1 \
TQDM_MININTERVAL=1 \
SYSTEM=spaces \
LD_LIBRARY_PATH=/usr/local/cuda/lib64:${LD_LIBRARY_PATH} \
PATH=/usr/local/nvidia/bin:${PATH}
ENTRYPOINT /bin/bash start.sh