ItzRoBeerT commited on
Commit
0a5fba5
1 Parent(s): a56edad

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +28 -6
app.py CHANGED
@@ -7,6 +7,7 @@ import requests
7
  import random
8
  import dom
9
  import os
 
10
 
11
  NUM_IMAGES = 2
12
 
@@ -23,6 +24,20 @@ headers = {"Authorization": f"Bearer {os.getenv('api_token')}"}
23
  model_id_image_description = "vikhyatk/moondream2"
24
  revision = "2024-08-26"
25
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
26
  torch_dtype = torch.float32
27
  if torch.cuda.is_available():
28
  torch_dtype = torch.bfloat16 # Optimización en GPU
@@ -32,6 +47,7 @@ print("Cargando modelo de descripción de imágenes...")
32
  model_description = AutoModelForCausalLM.from_pretrained(model_id_image_description, trust_remote_code=True, revision=revision)
33
  tokenizer_description = AutoTokenizer.from_pretrained(model_id_image_description, revision=revision)
34
 
 
35
  def generate_description(image_path):
36
  image_test = Image.open(image_path)
37
  enc_image = model_description.encode_image(image_test)
@@ -42,6 +58,7 @@ def query(payload):
42
  response = requests.post(API_URL, headers=headers, json=payload)
43
  return response.content
44
 
 
45
  def generate_image_by_description(description, avatar_style=None):
46
  images = []
47
  for _ in range(NUM_IMAGES):
@@ -52,29 +69,34 @@ def generate_image_by_description(description, avatar_style=None):
52
  image_bytes = query({"inputs": prompt, "parameters": {"seed": random.randint(0, 1000)}})
53
  image = Image.open(io.BytesIO(image_bytes))
54
  images.append(image)
55
- print(images)
56
  return images
57
 
58
  def process_and_generate(image, avatar_style):
59
  description = generate_description(image)
60
  return generate_image_by_description(description, avatar_style)
61
 
62
- with gr.Blocks(js=dom.generate_title) as demo:
63
- with gr.Row():
64
- gr.Markdown(dom.generate_markdown)
65
- gr.Markdown(dom.models)
66
  with gr.Row():
67
  with gr.Column(scale=2, min_width=300):
68
  selected_image = gr.Image(type="filepath", label="Upload an Image of the Pigeon", height=300)
69
  example_image = gr.Examples(["./examples/pigeon.webp"], label="Example Images", inputs=[selected_image])
70
  avatar_style = gr.Radio(
71
  ["Realistic", "Pixel Art", "Imaginative", "Cartoon"],
72
- label="(optional) Select the avatar style:"
 
73
  )
74
  generate_button = gr.Button("Generate Avatar", variant="primary")
75
  with gr.Column(scale=2, min_width=300):
76
  generated_image = gr.Gallery(type="pil", label="Generated Avatar", height=300)
77
 
78
  generate_button.click(process_and_generate, inputs=[selected_image, avatar_style], outputs=generated_image)
 
 
 
 
 
 
 
 
79
 
80
  demo.launch()
 
7
  import random
8
  import dom
9
  import os
10
+ import time
11
 
12
  NUM_IMAGES = 2
13
 
 
24
  model_id_image_description = "vikhyatk/moondream2"
25
  revision = "2024-08-26"
26
 
27
+ # Para medir el rendimiento de los métodos, voy a crear este decorador, que simplemente imprime en nuestra terminal
28
+ # el tiempo de ejecucion de los metodos que tengan los modelos y los usen, de esta manera podremos estudiar
29
+ # el tiempo que este cada modelo activo
30
+ def measure_performance(func):
31
+ def wrapper(*args, **kwargs):
32
+ print(f"Starting execution of '{func.__name__}' with 'args={args}, kwargs={kwargs}'")
33
+ start = time.time()
34
+ result = func(*args, **kwargs)
35
+ end = time.time()
36
+ duration = end - start
37
+ print(f"Execution time of '{func.__name__}' with 'args={args}, kwargs={kwargs}': {duration:.4f} seconds")
38
+ return result
39
+ return wrapper
40
+
41
  torch_dtype = torch.float32
42
  if torch.cuda.is_available():
43
  torch_dtype = torch.bfloat16 # Optimización en GPU
 
47
  model_description = AutoModelForCausalLM.from_pretrained(model_id_image_description, trust_remote_code=True, revision=revision)
48
  tokenizer_description = AutoTokenizer.from_pretrained(model_id_image_description, revision=revision)
49
 
50
+ @measure_performance
51
  def generate_description(image_path):
52
  image_test = Image.open(image_path)
53
  enc_image = model_description.encode_image(image_test)
 
58
  response = requests.post(API_URL, headers=headers, json=payload)
59
  return response.content
60
 
61
+ @measure_performance
62
  def generate_image_by_description(description, avatar_style=None):
63
  images = []
64
  for _ in range(NUM_IMAGES):
 
69
  image_bytes = query({"inputs": prompt, "parameters": {"seed": random.randint(0, 1000)}})
70
  image = Image.open(io.BytesIO(image_bytes))
71
  images.append(image)
 
72
  return images
73
 
74
  def process_and_generate(image, avatar_style):
75
  description = generate_description(image)
76
  return generate_image_by_description(description, avatar_style)
77
 
78
+ with gr.Blocks(js=dom.generate_title) as demo:
 
 
 
79
  with gr.Row():
80
  with gr.Column(scale=2, min_width=300):
81
  selected_image = gr.Image(type="filepath", label="Upload an Image of the Pigeon", height=300)
82
  example_image = gr.Examples(["./examples/pigeon.webp"], label="Example Images", inputs=[selected_image])
83
  avatar_style = gr.Radio(
84
  ["Realistic", "Pixel Art", "Imaginative", "Cartoon"],
85
+ label="(optional) Select the avatar style:",
86
+ value="Pixel Art"
87
  )
88
  generate_button = gr.Button("Generate Avatar", variant="primary")
89
  with gr.Column(scale=2, min_width=300):
90
  generated_image = gr.Gallery(type="pil", label="Generated Avatar", height=300)
91
 
92
  generate_button.click(process_and_generate, inputs=[selected_image, avatar_style], outputs=generated_image)
93
+ with gr.Tab(label="Description"):
94
+ gr.Markdown(dom.generate_markdown)
95
+ gr.Markdown(dom.models)
96
+ with gr.Tab(label="Documentation"):
97
+ gr.Markdown(dom.doccumentation)
98
+
99
+
100
+
101
 
102
  demo.launch()