johnpaulbin commited on
Commit
f2c9d17
1 Parent(s): 9587bf3

Update Dockerfile

Browse files
Files changed (1) hide show
  1. Dockerfile +30 -13
Dockerfile CHANGED
@@ -1,22 +1,39 @@
1
- FROM ollama/ollama:latest
 
2
 
3
- RUN apt-get update && apt-get install curl -y
 
 
 
4
 
5
- # https://huggingface.co/docs/hub/spaces-sdks-docker-first-demo
6
- RUN useradd -m -u 1000 user
 
7
 
8
- USER user
 
9
 
10
- ENV HOME=/home/user \
11
- PATH=/home/user/.local/bin:$PATH \
12
- OLLAMA_HOST=0.0.0.0
 
13
 
14
- WORKDIR $HOME/app
15
 
16
- COPY --chown=user:user Modelfile $HOME/app/
 
17
 
18
- RUN curl -fsSL https://huggingface.co/johnpaulbin/translator-llm/resolve/main/translator-llama3.1-q4km-2.gguf?download=true -o llama.gguf
 
 
 
 
19
 
20
- RUN ollama serve & sleep 5 && ollama create translator -f Modelfile && ollama run translator
 
 
 
 
 
21
 
22
- EXPOSE 11434
 
 
1
+ # Builder stage
2
+ FROM ubuntu:latest
3
 
4
+ # Update packages and install curl and gnupg
5
+ RUN apt-get update && apt-get install -y \
6
+ curl \
7
+ gnupg
8
 
9
+ # Add NVIDIA package repositories
10
+ RUN curl -fsSL https://nvidia.github.io/libnvidia-container/gpgkey | gpg --dearmor -o /usr/share/keyrings/nvidia-container-toolkit-keyring.gpg \
11
+ && echo "deb [signed-by=/usr/share/keyrings/nvidia-container-toolkit-keyring.gpg] https://nvidia.github.io/libnvidia-container/stable/deb/ $(. /etc/os-release; echo $UBUNTU_CODENAME) main" > /etc/apt/sources.list.d/nvidia-container-toolkit.list
12
 
13
+ # Install NVIDIA container toolkit (Check for any updated methods or URLs for Ubuntu jammy)
14
+ RUN apt-get update && apt-get install -y nvidia-container-toolkit || true
15
 
16
+ # Install application
17
+ RUN curl https://ollama.ai/install.sh | sh
18
+ # Below is to fix embedding bug as per
19
+ # RUN curl -fsSL https://ollama.com/install.sh | sed 's#https://ollama.com/download#https://github.com/jmorganca/ollama/releases/download/v0.1.29#' | sh
20
 
 
21
 
22
+ # Create the directory and give appropriate permissions
23
+ RUN mkdir -p /.ollama && chmod 777 /.ollama
24
 
25
+ WORKDIR /.ollama
26
+
27
+ # Copy the entry point script
28
+ COPY entrypoint.sh /entrypoint.sh
29
+ RUN chmod +x /entrypoint.sh
30
 
31
+ # Set the entry point script as the default command
32
+ ENTRYPOINT ["/entrypoint.sh"]
33
+ CMD ["ollama", "serve"]
34
+
35
+ # Set the model as an environment variable (this can be overridden)
36
+ RUN curl -fsSL https://huggingface.co/johnpaulbin/translator-llm/resolve/main/translator-llama3.1-q4km-2.gguf?download=true -o llama.gguf
37
 
38
+ # Expose the server port
39
+ EXPOSE 7860