Spaces:
Runtime error
Runtime error
João Artur
commited on
Commit
•
c04be2d
1
Parent(s):
0a6550d
cache dir
Browse files- Dockerfile +2 -0
- app.py +1 -1
Dockerfile
CHANGED
@@ -8,6 +8,8 @@ RUN pip install --no-cache-dir --upgrade -r /code/requirements.txt
|
|
8 |
|
9 |
COPY . .
|
10 |
|
|
|
|
|
11 |
EXPOSE 7860
|
12 |
|
13 |
CMD ["shiny", "run", "app.py", "--host", "0.0.0.0", "--port", "7860"]
|
|
|
8 |
|
9 |
COPY . .
|
10 |
|
11 |
+
RUN mkdir /cache && chmod -R 777 /cache
|
12 |
+
|
13 |
EXPOSE 7860
|
14 |
|
15 |
CMD ["shiny", "run", "app.py", "--host", "0.0.0.0", "--port", "7860"]
|
app.py
CHANGED
@@ -3,7 +3,7 @@ import torch
|
|
3 |
import os
|
4 |
|
5 |
# Set a writable cache directory for Hugging Face
|
6 |
-
os.environ['TRANSFORMERS_CACHE'] = '
|
7 |
|
8 |
# Load LLaMA 3 model and tokenizer
|
9 |
# model_name = "https://huggingface.co/nvidia/Llama3-ChatQA-2-70B" # Replace with LLaMA 3 model path or Hugging Face model link if available
|
|
|
3 |
import os
|
4 |
|
5 |
# Set a writable cache directory for Hugging Face
|
6 |
+
os.environ['TRANSFORMERS_CACHE'] = '/cache'
|
7 |
|
8 |
# Load LLaMA 3 model and tokenizer
|
9 |
# model_name = "https://huggingface.co/nvidia/Llama3-ChatQA-2-70B" # Replace with LLaMA 3 model path or Hugging Face model link if available
|