Steven Anderson commited on
Commit
04bda51
1 Parent(s): 8378f45

Works, and examples

Browse files
Files changed (3) hide show
  1. app.py +11 -6
  2. cat.jpg +0 -0
  3. dog.jpg +0 -0
app.py CHANGED
@@ -13,26 +13,31 @@ print("Loaded model.")
13
  def process(image, prompt):
14
  print("Inferring...")
15
  image = preprocess(image).unsqueeze(0).to(device)
16
- print(image)
17
 
18
  prompts = prompt.split("\n")
 
19
  text = clip.tokenize(prompts).to(device)
20
- print(text)
21
 
22
  with torch.no_grad():
23
  logits_per_image, logits_per_text = model(image, text)
24
- probs = logits_per_image.softmax(dim=-1).cpu().numpy()
25
- print(probs)
26
 
27
- return dict(zip(prompts, probs))
28
 
29
 
30
  iface = gr.Interface(
31
  fn=process,
32
  inputs=[
33
- gr.Image(type="pil"),
34
  gr.Textbox(lines=5, label="Prompts (newline-separated)"),
35
  ],
36
  outputs="label",
 
 
 
 
37
  )
38
  iface.launch()
 
13
  def process(image, prompt):
14
  print("Inferring...")
15
  image = preprocess(image).unsqueeze(0).to(device)
16
+ print("Image: ", image)
17
 
18
  prompts = prompt.split("\n")
19
+ print("Prompts: ", prompts)
20
  text = clip.tokenize(prompts).to(device)
21
+ print("Tokens: ", text)
22
 
23
  with torch.no_grad():
24
  logits_per_image, logits_per_text = model(image, text)
25
+ probs = logits_per_image.softmax(dim=-1).cpu()
26
+ print("Probs: ", probs)
27
 
28
+ return {k: v.item() for (k,v) in zip(prompts, probs[0])}
29
 
30
 
31
  iface = gr.Interface(
32
  fn=process,
33
  inputs=[
34
+ gr.Image(type="pil", label="Image"),
35
  gr.Textbox(lines=5, label="Prompts (newline-separated)"),
36
  ],
37
  outputs="label",
38
+ examples=[
39
+ ["dog.jpg", "a photo of a dog\na photo of a cat"],
40
+ ["cat.jpg", "a photo of a dog\na photo of a cat"],
41
+ ]
42
  )
43
  iface.launch()
cat.jpg ADDED
dog.jpg ADDED