Shreyas094
commited on
Commit
•
b38068f
1
Parent(s):
0a3824a
Update app.py
Browse files
app.py
CHANGED
@@ -2,6 +2,7 @@ import random
|
|
2 |
import requests
|
3 |
from bs4 import BeautifulSoup
|
4 |
from transformers import AutoTokenizer, AutoModelForCausalLM
|
|
|
5 |
|
6 |
!huggingface-cli login
|
7 |
|
@@ -98,6 +99,10 @@ model_name = 'mistralai/Mistral-7B-Instruct-v0.3'
|
|
98 |
tokenizer = AutoTokenizer.from_pretrained(model_name)
|
99 |
model = AutoModelForCausalLM.from_pretrained(model_name)
|
100 |
|
|
|
|
|
|
|
|
|
101 |
# Example usage
|
102 |
search_term = "How did Tesla perform in Q1 2024"
|
103 |
search_results = google_search(search_term, num_results=3)
|
|
|
2 |
import requests
|
3 |
from bs4 import BeautifulSoup
|
4 |
from transformers import AutoTokenizer, AutoModelForCausalLM
|
5 |
+
import torch
|
6 |
|
7 |
!huggingface-cli login
|
8 |
|
|
|
99 |
tokenizer = AutoTokenizer.from_pretrained(model_name)
|
100 |
model = AutoModelForCausalLM.from_pretrained(model_name)
|
101 |
|
102 |
+
# Check if a GPU is available and if not, fall back to CPU
|
103 |
+
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
104 |
+
model.to(device)
|
105 |
+
|
106 |
# Example usage
|
107 |
search_term = "How did Tesla perform in Q1 2024"
|
108 |
search_results = google_search(search_term, num_results=3)
|