Update modeling_GOT.py
Browse files- modeling_GOT.py +5 -2
modeling_GOT.py
CHANGED
@@ -716,7 +716,7 @@ class GOTQwenForCausalLM(Qwen2ForCausalLM):
|
|
716 |
return processed_images
|
717 |
|
718 |
|
719 |
-
def chat_crop(self, tokenizer, image_file, render=False, save_render_file=None, print_prompt=False, gradio_input=False):
|
720 |
# Model
|
721 |
self.disable_torch_init()
|
722 |
multi_page=False
|
@@ -751,7 +751,10 @@ class GOTQwenForCausalLM(Qwen2ForCausalLM):
|
|
751 |
# print("len ll: ", ll)
|
752 |
|
753 |
else:
|
754 |
-
|
|
|
|
|
|
|
755 |
if gradio_input:
|
756 |
img = image_file.copy()
|
757 |
else:
|
|
|
716 |
return processed_images
|
717 |
|
718 |
|
719 |
+
def chat_crop(self, tokenizer, image_file, ocr_type, render=False, save_render_file=None, print_prompt=False, gradio_input=False):
|
720 |
# Model
|
721 |
self.disable_torch_init()
|
722 |
multi_page=False
|
|
|
751 |
# print("len ll: ", ll)
|
752 |
|
753 |
else:
|
754 |
+
if ocr_type == 'format':
|
755 |
+
qs = 'OCR with format upon the patch reference: '
|
756 |
+
else:
|
757 |
+
qs = 'OCR upon the patch reference: '
|
758 |
if gradio_input:
|
759 |
img = image_file.copy()
|
760 |
else:
|