wendys-llc commited on
Commit
b2250a6
1 Parent(s): 8440562

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +8 -11
app.py CHANGED
@@ -6,13 +6,10 @@ from clip_interrogator import Config, Interrogator
6
  # MODELS = ['ViT-L (best for Stable Diffusion 1.*)',]
7
 
8
  # load BLIP and ViT-L https://huggingface.co/openai/clip-vit-large-patch14
9
- config = Config(clip_model_name="ViT-L-14/openai")
 
10
 
11
- ci = Interrogator(config)
12
- ci.clip_model = ci_vitl.clip_model.to("cpu")
13
- ci.config.blip_num_beams = 64
14
- ci.config.chunk_size = 2048
15
- ci.config.flavor_intermediate_count = 2048 # 1024
16
 
17
  def image_analysis(image):
18
  image = image.convert('RGB')
@@ -33,7 +30,7 @@ def image_analysis(image):
33
  return medium_ranks, artist_ranks, movement_ranks, trending_ranks, flavor_ranks
34
 
35
 
36
- def image_to_prompt(image, clip_model_name, mode):
37
  image = image.convert('RGB')
38
  if mode == 'best':
39
  prompt = ci.interrogate(image)
@@ -129,7 +126,7 @@ def analyze_tab():
129
  trending = gr.Label(label="Trending", num_top_classes=5)
130
  flavor = gr.Label(label="Flavor", num_top_classes=5)
131
 
132
- button = gr.Button("Analyze", api_name="image-analysis")
133
  button.click(image_analysis, inputs=[image, model], outputs=[medium, artist, movement, trending, flavor])
134
 
135
  examples=[['example01.jpg', MODELS[0]], ['example02.jpg', MODELS[0]]]
@@ -153,14 +150,14 @@ with gr.Blocks(css=CSS) as block:
153
  input_image = gr.Image(type='pil', elem_id="input-img")
154
  with gr.Column():
155
  input_mode = gr.Radio(['best', 'fast', 'classic', 'negative'], value='best', label='Mode')
156
- submit_btn = gr.Button("Submit", api_name="image-to-prompt")
157
  output_text = gr.Textbox(label="Output", elem_id="output-txt")
158
 
159
  examples=[['example01.jpg', MODELS[0], 'best'], ['example02.jpg', MODELS[0], 'best']]
160
  ex = gr.Examples(
161
  examples=examples,
162
  fn=image_to_prompt,
163
- inputs=[input_image, input_model, input_mode],
164
  outputs=[output_text],
165
  cache_examples=True,
166
  run_on_click=True
@@ -174,7 +171,7 @@ with gr.Blocks(css=CSS) as block:
174
 
175
  submit_btn.click(
176
  fn=image_to_prompt,
177
- inputs=[input_image, input_model, input_mode],
178
  outputs=[output_text]
179
  )
180
  share_button.click(None, [], [], _js=share_js)
 
6
  # MODELS = ['ViT-L (best for Stable Diffusion 1.*)',]
7
 
8
  # load BLIP and ViT-L https://huggingface.co/openai/clip-vit-large-patch14
9
+ from PIL import Image
10
+ from clip_interrogator import Config, Interrogator
11
 
12
+ ci = Interrogator(Config(clip_model_name="ViT-L-14/openai"))
 
 
 
 
13
 
14
  def image_analysis(image):
15
  image = image.convert('RGB')
 
30
  return medium_ranks, artist_ranks, movement_ranks, trending_ranks, flavor_ranks
31
 
32
 
33
+ def image_to_prompt(image, mode):
34
  image = image.convert('RGB')
35
  if mode == 'best':
36
  prompt = ci.interrogate(image)
 
126
  trending = gr.Label(label="Trending", num_top_classes=5)
127
  flavor = gr.Label(label="Flavor", num_top_classes=5)
128
 
129
+ button = gr.Button("Analyze")
130
  button.click(image_analysis, inputs=[image, model], outputs=[medium, artist, movement, trending, flavor])
131
 
132
  examples=[['example01.jpg', MODELS[0]], ['example02.jpg', MODELS[0]]]
 
150
  input_image = gr.Image(type='pil', elem_id="input-img")
151
  with gr.Column():
152
  input_mode = gr.Radio(['best', 'fast', 'classic', 'negative'], value='best', label='Mode')
153
+ submit_btn = gr.Button("Submit")
154
  output_text = gr.Textbox(label="Output", elem_id="output-txt")
155
 
156
  examples=[['example01.jpg', MODELS[0], 'best'], ['example02.jpg', MODELS[0], 'best']]
157
  ex = gr.Examples(
158
  examples=examples,
159
  fn=image_to_prompt,
160
+ inputs=[input_image, input_mode],
161
  outputs=[output_text],
162
  cache_examples=True,
163
  run_on_click=True
 
171
 
172
  submit_btn.click(
173
  fn=image_to_prompt,
174
+ inputs=[input_image, input_mode],
175
  outputs=[output_text]
176
  )
177
  share_button.click(None, [], [], _js=share_js)