Yash Sachdeva commited on
Commit
6db4d36
1 Parent(s): b63fd3c

download llama

Browse files
Files changed (1) hide show
  1. Dockerfile +3 -2
Dockerfile CHANGED
@@ -18,8 +18,6 @@ RUN pip install accelerate
18
 
19
  # Install hugging face hub to download llama2 model
20
  RUN pip install --upgrade huggingface_hub
21
- RUN huggingface-cli download TheBloke/Llama-2-7b-Chat-GGUF llama-2-7b-chat.Q4_K_M.gguf --local-dir . --local-dir-use-symlinks False
22
-
23
  RUN CMAKE_ARGS="-DLLAMA_BLAS=ON -DLLAMA_BLAS_VENDOR=OpenBLAS" pip install 'llama-cpp-python[server]' --upgrade --force-reinstall --no-cache-dir
24
  # Install requirements.txt
25
  RUN pip install --no-cache-dir --upgrade -r /requirements.txt
@@ -34,6 +32,9 @@ ENV HOME=/home/user \
34
 
35
  WORKDIR $HOME/app
36
 
 
 
 
37
  COPY --chown=user . $HOME/app
38
 
39
  # Start the FastAPI app on port 7860, the default port expected by Spaces
 
18
 
19
  # Install hugging face hub to download llama2 model
20
  RUN pip install --upgrade huggingface_hub
 
 
21
  RUN CMAKE_ARGS="-DLLAMA_BLAS=ON -DLLAMA_BLAS_VENDOR=OpenBLAS" pip install 'llama-cpp-python[server]' --upgrade --force-reinstall --no-cache-dir
22
  # Install requirements.txt
23
  RUN pip install --no-cache-dir --upgrade -r /requirements.txt
 
32
 
33
  WORKDIR $HOME/app
34
 
35
+ RUN huggingface-cli download TheBloke/Llama-2-7b-Chat-GGUF llama-2-7b-chat.Q4_K_M.gguf --local-dir . --local-dir-use-symlinks False
36
+
37
+
38
  COPY --chown=user . $HOME/app
39
 
40
  # Start the FastAPI app on port 7860, the default port expected by Spaces