ollama / Dockerfile
Yaya86's picture
Update Dockerfile
7b57894 verified
raw
history blame
1.53 kB
FROM ubuntu:latest
#22.04
#RUN apt-get update && apt-get install -y whoami
#RUN whoami
# Ajouter un utilisateur non-root pour exécuter l'application (recommandé pour des raisons de sécurité)
RUN apt update && apt install curl -y
RUN curl -fsSL https://ollama.com/install.sh | sh
ENV OLLAMA_HOST=0.0.0.0
RUN cut -d: -f1,3 /etc/passwd
RUN userdel ubuntu && useradd -m -u 1000 appuser
#RUN chown -R appuser:appuser /home/appuser
#RUN apt-get update && apt-get install -y \
#python3-pip
#USER appuser
#RUN useradd -m -u 1000 appuser
#&& chown -R appuser:appuser /home/appuser
# Create the directory and give appropriate permissions
#RUN mkdir -p /.ollama && chmod 777 /.ollama
#RUN mkdir -p /home/appuser/.ollama && chmod 777 /home/appuser/.ollama
#RUN mkdir -p /home/appuser/.ollama/models &&
#USER appuser
#WORKDIR /.ollama
COPY --chown=appuser model_llm_local/model_ollama3 /usr/share/ollama/.ollama/models
#COPY --chown=appuser model_llm_local/jsonl_llama3_instruct /usr/share/ollama/.ollama/models
#RUN chmod 777 /home/appuser/.ollama/models
# Copy the entry point script
#COPY --chown=appuser start.sh /start.sh
#RUN chmod +x /start.sh
# Set the entry point script as the default command
#ENTRYPOINT ["/start.sh"]
CMD ollama serve
#& sleep 30 && ollama pull llama3:8b-instruct-q8_0
# Set the model as an environment variable (this can be overridden)
#ENV model="nomic-embed-text","yayarun/mixtral_erbot"
ENV OLLAMA_MODELS="/usr/share/ollama/.ollama/models"
# Expose the server port
EXPOSE 7860