Update app.py
Browse files
app.py
CHANGED
@@ -8,15 +8,17 @@ from llama_index.llms.huggingface import HuggingFaceInferenceAPI
|
|
8 |
from llama_index.embeddings.huggingface import HuggingFaceEmbedding
|
9 |
from pydantic import BaseModel
|
10 |
import datetime
|
|
|
|
|
11 |
# Define Pydantic model for incoming request body
|
12 |
class MessageRequest(BaseModel):
|
13 |
message: str
|
14 |
|
15 |
|
16 |
-
os.environ["HF_TOKEN"] = ""
|
17 |
app = FastAPI()
|
18 |
|
19 |
-
app.mount("/static", StaticFiles(directory="
|
20 |
|
21 |
|
22 |
# Configure Llama index settings
|
@@ -24,7 +26,7 @@ Settings.llm = HuggingFaceInferenceAPI(
|
|
24 |
model_name="meta-llama/Meta-Llama-3-8B-Instruct",
|
25 |
tokenizer_name="meta-llama/Meta-Llama-3-8B-Instruct",
|
26 |
context_window=3000,
|
27 |
-
token="",
|
28 |
max_new_tokens=512,
|
29 |
generate_kwargs={"temperature": 0.1},
|
30 |
)
|
|
|
8 |
from llama_index.embeddings.huggingface import HuggingFaceEmbedding
|
9 |
from pydantic import BaseModel
|
10 |
import datetime
|
11 |
+
from dotenv import load_dotenv
|
12 |
+
load_dotenv()
|
13 |
# Define Pydantic model for incoming request body
|
14 |
class MessageRequest(BaseModel):
|
15 |
message: str
|
16 |
|
17 |
|
18 |
+
os.environ["HF_TOKEN"] = os.getenv("HF_TOKEN")
|
19 |
app = FastAPI()
|
20 |
|
21 |
+
app.mount("/static", StaticFiles(directory="static"), name="static")
|
22 |
|
23 |
|
24 |
# Configure Llama index settings
|
|
|
26 |
model_name="meta-llama/Meta-Llama-3-8B-Instruct",
|
27 |
tokenizer_name="meta-llama/Meta-Llama-3-8B-Instruct",
|
28 |
context_window=3000,
|
29 |
+
token=os.getenv("HF_TOKEN"),
|
30 |
max_new_tokens=512,
|
31 |
generate_kwargs={"temperature": 0.1},
|
32 |
)
|