yizhangliu commited on
Commit
d829f40
1 Parent(s): 08c23ad

update app.py

Browse files
Files changed (1) hide show
  1. app.py +6 -6
app.py CHANGED
@@ -739,9 +739,6 @@ def run_anything_task(input_image, text_prompt, task_type, inpaint_prompt, box_t
739
  size = image_pil.size
740
  H, W = size[1], size[0]
741
 
742
- if remove_use_segment == False and task_type == 'remove':
743
- remove_mode = 'rectangle'
744
-
745
  # run grounding dino model
746
  if (task_type in ['inpainting', 'outpainting'] or task_type == 'remove') and mask_source_radio == mask_source_draw:
747
  pass
@@ -774,10 +771,13 @@ def run_anything_task(input_image, text_prompt, task_type, inpaint_prompt, box_t
774
 
775
  logger.info(f'run_anything_task_[{file_temp}]_{task_type}_2_')
776
 
777
- if task_type == 'segment' or ((task_type in ['inpainting', 'outpainting'] or (task_type == 'remove' and remove_use_segment )) and mask_source_radio == mask_source_segment):
 
778
  image = np.array(input_img)
 
 
779
 
780
- if sam_predictor:
781
  sam_predictor.set_image(image)
782
 
783
  for i in range(boxes_filt.size(0)):
@@ -785,7 +785,7 @@ def run_anything_task(input_image, text_prompt, task_type, inpaint_prompt, box_t
785
  boxes_filt[i][:2] -= boxes_filt[i][2:] / 2
786
  boxes_filt[i][2:] += boxes_filt[i][:2]
787
 
788
- if sam_predictor:
789
  boxes_filt = boxes_filt.to(sam_device)
790
  transformed_boxes = sam_predictor.transform.apply_boxes_torch(boxes_filt, image.shape[:2])
791
 
 
739
  size = image_pil.size
740
  H, W = size[1], size[0]
741
 
 
 
 
742
  # run grounding dino model
743
  if (task_type in ['inpainting', 'outpainting'] or task_type == 'remove') and mask_source_radio == mask_source_draw:
744
  pass
 
771
 
772
  logger.info(f'run_anything_task_[{file_temp}]_{task_type}_2_')
773
 
774
+ use_sam_predictor = True
775
+ if task_type == 'segment' or ((task_type in ['inpainting', 'outpainting'] or task_type == 'remove') and mask_source_radio == mask_source_segment):
776
  image = np.array(input_img)
777
+ if task_type == 'remove' and remove_use_segment == False:
778
+ use_sam_predictor = False
779
 
780
+ if sam_predictor and use_sam_predictor:
781
  sam_predictor.set_image(image)
782
 
783
  for i in range(boxes_filt.size(0)):
 
785
  boxes_filt[i][:2] -= boxes_filt[i][2:] / 2
786
  boxes_filt[i][2:] += boxes_filt[i][:2]
787
 
788
+ if sam_predictor and use_sam_predictor:
789
  boxes_filt = boxes_filt.to(sam_device)
790
  transformed_boxes = sam_predictor.transform.apply_boxes_torch(boxes_filt, image.shape[:2])
791