Update app.py
Browse files
app.py
CHANGED
@@ -1,6 +1,8 @@
|
|
1 |
from huggingface_hub import InferenceClient
|
2 |
import gradio as gr
|
3 |
import os
|
|
|
|
|
4 |
import http.client
|
5 |
import typing
|
6 |
import urllib.request
|
@@ -11,6 +13,19 @@ vertexai.init(project=os.getenv('project_id'))
|
|
11 |
model = GenerativeModel("gemini-1.0-pro-vision")
|
12 |
client = InferenceClient("google/gemma-7b-it")
|
13 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
14 |
def load_image_from_url(image_url: str) -> Image:
|
15 |
with urllib.request.urlopen(image_url) as response:
|
16 |
response = typing.cast(http.client.HTTPResponse, response)
|
@@ -47,6 +62,9 @@ def generate(
|
|
47 |
seed=42,
|
48 |
)
|
49 |
|
|
|
|
|
|
|
50 |
formatted_prompt = format_prompt(f"{system_prompt}, {prompt}", history)
|
51 |
stream = client.text_generation(formatted_prompt, **generate_kwargs, stream=True, details=True, return_full_text=False)
|
52 |
output = ""
|
|
|
1 |
from huggingface_hub import InferenceClient
|
2 |
import gradio as gr
|
3 |
import os
|
4 |
+
import re
|
5 |
+
import requests
|
6 |
import http.client
|
7 |
import typing
|
8 |
import urllib.request
|
|
|
13 |
model = GenerativeModel("gemini-1.0-pro-vision")
|
14 |
client = InferenceClient("google/gemma-7b-it")
|
15 |
|
16 |
+
def extract_image_urls(text):
|
17 |
+
url_regex = r"(https?:\/\/.*\.(?:png|jpg|jpeg|gif|webp|svg))"
|
18 |
+
image_urls = re.findall(url_regex, text, flags=re.IGNORECASE)
|
19 |
+
valid_image_url = ""
|
20 |
+
for url in image_urls:
|
21 |
+
try:
|
22 |
+
response = requests.head(url) # Use HEAD request for efficiency
|
23 |
+
if response.status_code in range(200, 300) and 'image' in response.headers.get('content-type', ''):
|
24 |
+
valid_image_url = url
|
25 |
+
except requests.exceptions.RequestException:
|
26 |
+
pass # Ignore inaccessible URLs
|
27 |
+
return valid_image_url
|
28 |
+
|
29 |
def load_image_from_url(image_url: str) -> Image:
|
30 |
with urllib.request.urlopen(image_url) as response:
|
31 |
response = typing.cast(http.client.HTTPResponse, response)
|
|
|
62 |
seed=42,
|
63 |
)
|
64 |
|
65 |
+
image = extract_image_urls(prompt)
|
66 |
+
if image:
|
67 |
+
prompt = prompt.replace(image, search(image))
|
68 |
formatted_prompt = format_prompt(f"{system_prompt}, {prompt}", history)
|
69 |
stream = client.text_generation(formatted_prompt, **generate_kwargs, stream=True, details=True, return_full_text=False)
|
70 |
output = ""
|