Update app.py
Browse files
app.py
CHANGED
@@ -14,7 +14,7 @@ with open(".config/application_default_credentials.json", 'w') as file:
|
|
14 |
|
15 |
vertexai.init(project=os.getenv('project_id'))
|
16 |
model = GenerativeModel("gemini-1.0-pro-vision")
|
17 |
-
|
18 |
|
19 |
def extract_image_urls(text):
|
20 |
url_regex = r"(https?:\/\/.*\.(?:png|jpg|jpeg|gif|webp|svg))"
|
@@ -143,18 +143,7 @@ def search(url):
|
|
143 |
|
144 |
import random
|
145 |
|
146 |
-
|
147 |
-
"google/gemma-7b",
|
148 |
-
"google/gemma-7b-it",
|
149 |
-
"google/gemma-2b",
|
150 |
-
"google/gemma-2b-it"
|
151 |
-
]
|
152 |
-
clients=[
|
153 |
-
InferenceClient(models[0]),
|
154 |
-
InferenceClient(models[1]),
|
155 |
-
InferenceClient(models[2]),
|
156 |
-
InferenceClient(models[3]),
|
157 |
-
]
|
158 |
|
159 |
def load_models(inp):
|
160 |
return gr.update(label=models[inp])
|
@@ -169,9 +158,7 @@ def format_prompt(message, history, cust_p):
|
|
169 |
return prompt
|
170 |
|
171 |
def chat_inf(system_prompt,prompt,history,memory,client_choice,seed,temp,tokens,top_p,rep_p,chat_mem,cust_p):
|
172 |
-
print(client_choice)
|
173 |
hist_len=0
|
174 |
-
client=clients[int(client_choice)-1]
|
175 |
if not history:
|
176 |
history = []
|
177 |
hist_len=0
|
|
|
14 |
|
15 |
vertexai.init(project=os.getenv('project_id'))
|
16 |
model = GenerativeModel("gemini-1.0-pro-vision")
|
17 |
+
client = InferenceClient("google/gemma-7b-it")
|
18 |
|
19 |
def extract_image_urls(text):
|
20 |
url_regex = r"(https?:\/\/.*\.(?:png|jpg|jpeg|gif|webp|svg))"
|
|
|
143 |
|
144 |
import random
|
145 |
|
146 |
+
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
147 |
|
148 |
def load_models(inp):
|
149 |
return gr.update(label=models[inp])
|
|
|
158 |
return prompt
|
159 |
|
160 |
def chat_inf(system_prompt,prompt,history,memory,client_choice,seed,temp,tokens,top_p,rep_p,chat_mem,cust_p):
|
|
|
161 |
hist_len=0
|
|
|
162 |
if not history:
|
163 |
history = []
|
164 |
hist_len=0
|