|
FROM nvidia/cuda:11.8.0-devel-ubuntu22.04 |
|
ENV DEBIAN_FRONTEND noninteractive |
|
|
|
WORKDIR /usr/local/src |
|
ENV dir=/usr/local/src/MiniGPT-4 |
|
ENV llama_version=llama-13b-hf |
|
ENV vicuna_diff=vicuna-13b-delta-v0 |
|
COPY requriments-13b.txt pretrained_minigpt4_13b.pth . |
|
|
|
RUN apt-get update -y && apt-get upgrade -y && apt-get install -y libgl1 libglib2.0-0 wget git git-lfs python3-pip python-is-python3 && pip3 install --upgrade pip |
|
RUN pip install torch==1.12.1 torchaudio==0.12.1 torchvision==0.13.1 |
|
RUN pip install -r requriments-13b.txt \ |
|
&& git clone https://github.com/Vision-CAIR/MiniGPT-4.git \ |
|
&& sed -i -e '11c\ \ ckpt: "/usr/local/src/MiniGPT-4/pretrained_minigpt4_13b.pth"' ${dir}/eval_configs/minigpt4_eval.yaml \ |
|
&& cd MiniGPT-4 \ |
|
&& git lfs install \ |
|
&& git clone https://huggingface.co/lmsys/${vicuna_diff} \ |
|
&& git clone https://huggingface.co/decapoda-research/${llama_version} \ |
|
&& pip install git+https://github.com/lm-sys/FastChat.git@v0.1.10 \ |
|
&& sed -i 's/LLaMATokenizer/LlamaTokenizer/' ${llama_version}/tokenizer_config.json \ |
|
&& python -m fastchat.model.apply_delta --base ${dir}/${llama_version}/ --target ${dir}/vicuna_out --delta ${dir}/${vicuna_diff}/ \ |
|
&& sed -i -e '16c\ \ llama_model: "/usr/local/src/MiniGPT-4/vicuna_out"' ${dir}/minigpt4/configs/models/minigpt4.yaml |
|
|
|
RUN adduser --disabled-password --gecos '' user |
|
RUN chown -R user:user ${dir} |
|
RUN chmod -R 777 ${dir} |
|
USER user |
|
|
|
EXPOSE 7860 |
|
ENTRYPOINT [cd MiniGPT-4 && python, 'demo.py', '--cfg-path', 'eval_configs/minigpt4_eval.yaml'] |
|
|