srimanth-d commited on
Commit
d5f1f2e
1 Parent(s): c5b2116

Delete modeling_got.py

Browse files
Files changed (1) hide show
  1. modeling_got.py +0 -881
modeling_got.py DELETED
@@ -1,881 +0,0 @@
1
- from transformers import Qwen2Config, Qwen2Model, Qwen2ForCausalLM, StoppingCriteria, TextStreamer
2
- from transformers.modeling_outputs import BaseModelOutputWithPast, CausalLMOutputWithPast
3
- from typing import List, Optional, Tuple, Union
4
- from transformers.cache_utils import Cache
5
- import requests
6
- from PIL import Image
7
- from io import BytesIO
8
- import torch
9
- import torch.nn as nn
10
- from torch.nn import CrossEntropyLoss
11
- from .got_vision_b import build_GOT_vit_b
12
- from torchvision import transforms
13
- from torchvision.transforms.functional import InterpolationMode
14
- import dataclasses
15
- ###
16
-
17
- DEFAULT_IMAGE_TOKEN = "<image>"
18
- DEFAULT_IMAGE_PATCH_TOKEN = '<imgpad>'
19
- DEFAULT_IM_START_TOKEN = '<img>'
20
- DEFAULT_IM_END_TOKEN = '</img>'
21
-
22
- from enum import auto, Enum
23
- class SeparatorStyle(Enum):
24
- """Different separator style."""
25
- SINGLE = auto()
26
- TWO = auto()
27
- MPT = auto()
28
-
29
-
30
- @dataclasses.dataclass
31
- class Conversation:
32
- """A class that keeps all conversation history."""
33
- system: str
34
- roles: List[str]
35
- messages: List[List[str]]
36
- offset: int
37
- sep_style: SeparatorStyle = SeparatorStyle.SINGLE
38
- sep: str = "<|im_end|>"
39
- sep2: str = None
40
- version: str = "Unknown"
41
-
42
- skip_next: bool = False
43
-
44
- def get_prompt(self):
45
- if self.sep_style == SeparatorStyle.SINGLE:
46
- ret = self.system + self.sep + '\n'
47
- for role, message in self.messages:
48
- if message:
49
- if type(message) is tuple:
50
- message, _, _ = message
51
- ret += role + ": " + message + self.sep
52
- else:
53
- ret += role + ":"
54
- return ret
55
- elif self.sep_style == SeparatorStyle.TWO:
56
- seps = [self.sep, self.sep2]
57
- ret = self.system + seps[0]
58
- for i, (role, message) in enumerate(self.messages):
59
- if message:
60
- if type(message) is tuple:
61
- message, _, _ = message
62
- ret += role + ": " + message + seps[i % 2]
63
- else:
64
- ret += role + ":"
65
- return ret
66
- if self.sep_style == SeparatorStyle.MPT:
67
- if self.system:
68
- ret = self.system + self.sep
69
- else:
70
- ret = ''
71
- for role, message in self.messages:
72
- if message:
73
- if type(message) is tuple:
74
- message, _, _ = message
75
- ret += role + message + self.sep
76
- else:
77
- ret += role
78
- return ret
79
- else:
80
- raise ValueError(f"Invalid style: {self.sep_style}")
81
-
82
-
83
- def append_message(self, role, message):
84
- self.messages.append([role, message])
85
-
86
- def copy(self):
87
- return Conversation(
88
- system=self.system,
89
- roles=self.roles,
90
- messages=[[x, y] for x, y in self.messages],
91
- offset=self.offset,
92
- sep_style=self.sep_style,
93
- sep=self.sep,
94
- sep2=self.sep2)
95
-
96
-
97
-
98
- class KeywordsStoppingCriteria(StoppingCriteria):
99
- def __init__(self, keywords, tokenizer, input_ids):
100
- self.keywords = keywords
101
- self.keyword_ids = [tokenizer(keyword).input_ids for keyword in keywords]
102
- self.keyword_ids = [keyword_id[0] for keyword_id in self.keyword_ids if type(keyword_id) is list and len(keyword_id) == 1]
103
- self.tokenizer = tokenizer
104
- self.start_len = None
105
- self.input_ids = input_ids
106
-
107
- def __call__(self, output_ids: torch.LongTensor, scores: torch.FloatTensor, **kwargs) -> bool:
108
- if self.start_len is None:
109
- self.start_len = self.input_ids.shape[1]
110
- else:
111
- for keyword_id in self.keyword_ids:
112
- if output_ids[0, -1] == keyword_id:
113
- return True
114
- outputs = self.tokenizer.batch_decode(output_ids[:, self.start_len:], skip_special_tokens=True)[0]
115
- for keyword in self.keywords:
116
- if keyword in outputs:
117
- return True
118
- return False
119
-
120
-
121
- class GOTImageEvalProcessor:
122
- def __init__(self, image_size=384, mean=None, std=None):
123
- if mean is None:
124
- mean = (0.48145466, 0.4578275, 0.40821073)
125
- if std is None:
126
- std = (0.26862954, 0.26130258, 0.27577711)
127
-
128
- self.normalize = transforms.Normalize(mean, std)
129
-
130
- self.transform = transforms.Compose(
131
- [
132
- transforms.Resize(
133
- (image_size, image_size), interpolation=InterpolationMode.BICUBIC
134
- ),
135
- transforms.ToTensor(),
136
- self.normalize,
137
- ]
138
- )
139
- def __call__(self, item):
140
- return self.transform(item)
141
-
142
-
143
-
144
- class GOTConfig(Qwen2Config):
145
- model_type = "GOT"
146
-
147
-
148
- class GOTQwenModel(Qwen2Model):
149
- config_class = GOTConfig
150
-
151
- def __init__(self, config: Qwen2Config):
152
- super(GOTQwenModel, self).__init__(config)
153
-
154
- self.vision_tower_high = build_GOT_vit_b()
155
-
156
- self.mm_projector_vary = nn.Linear(1024, 1024)
157
-
158
-
159
- def initialize_vision_modules(
160
- self,
161
- vision_tower,
162
- pretrained_stage1_model=None,
163
- freeze_vision_tower=False,
164
- use_im_start_end=False,
165
- vision_select_layer=-1,
166
- dtype=torch.float16,
167
- device="mps"
168
- ):
169
-
170
-
171
- image_processor_high = GOTImageEvalProcessor(image_size=1024)
172
-
173
- self.vision_tower_high = self.vision_tower_high.to(dtype=dtype, device=device)
174
-
175
- self.mm_projector_vary = self.mm_projector_vary.to(dtype=dtype, device=device)
176
-
177
-
178
- image_token_len = 256
179
-
180
- self.config.vision_tower = vision_tower
181
- self.config.image_token_len = image_token_len
182
-
183
- self.config.use_im_start_end = True
184
-
185
- self.config.vision_select_layer = vision_select_layer
186
- self.config.freeze_vision_tower = freeze_vision_tower
187
-
188
- return dict(
189
- image_processor_high=image_processor_high,
190
- image_token_len=image_token_len,
191
- )
192
-
193
-
194
- def forward(
195
- self,
196
- input_ids: torch.LongTensor = None,
197
- attention_mask: Optional[torch.Tensor] = None,
198
- position_ids: Optional[torch.LongTensor] = None,
199
- past_key_values: Optional[List[torch.FloatTensor]] = None,
200
- inputs_embeds: Optional[torch.FloatTensor] = None,
201
- use_cache: Optional[bool] = None,
202
- output_attentions: Optional[bool] = None,
203
- output_hidden_states: Optional[bool] = None,
204
- images: Optional[torch.FloatTensor] = None,
205
- return_dict: Optional[bool] = None,
206
- ) -> Union[Tuple, BaseModelOutputWithPast]:
207
-
208
- # HACK: replace back original embeddings for LLaVA pretraining
209
- orig_embeds_params = getattr(self, 'orig_embeds_params', None)
210
- if orig_embeds_params is not None:
211
- with torch.no_grad():
212
- self.get_input_embeddings().weight[:-self.num_new_tokens] = orig_embeds_params[:-self.num_new_tokens].data
213
-
214
- if inputs_embeds is None:
215
- inputs_embeds = self.embed_tokens(input_ids)
216
-
217
-
218
- vision_tower_high = getattr(self, 'vision_tower_high', None)
219
-
220
-
221
- if vision_tower_high is not None and (input_ids.shape[1] != 1 or self.training) and images is not None:
222
- use_im_start_end = getattr(self.config, "use_im_start_end", -1)
223
-
224
- vision_select_layer = getattr(self.config, "vision_select_layer", -1)
225
- im_patch_token = getattr(self.config, "im_patch_token", -1)
226
- im_start_token = getattr(self.config, "im_start_token", -1)
227
- im_end_token = getattr(self.config, "im_end_token", -1)
228
- freeze_vision_tower = getattr(self.config, "freeze_vision_tower", False)
229
-
230
- im_patch_token = 151859
231
-
232
- im_start_token = 151857
233
-
234
- im_end_token = 151858
235
-
236
- image_features = []
237
-
238
- for image in images:
239
- P, C, H, W = image.shape
240
- if P == 1:
241
- with torch.set_grad_enabled(False):
242
- cnn_feature = vision_tower_high(image)
243
- cnn_feature = cnn_feature.flatten(2).permute(0, 2, 1) # 256*1024
244
- image_feature = self.mm_projector_vary(cnn_feature)
245
- image_features.append(image_feature)
246
-
247
- else:
248
- image_patches = torch.unbind(image)
249
- image_patches_features = []
250
- for image_patch in image_patches:
251
- image_p = torch.stack([image_patch])
252
-
253
- with torch.set_grad_enabled(False):
254
- cnn_feature_p = vision_tower_high(image_p)
255
- cnn_feature_p = cnn_feature_p.flatten(2).permute(0, 2, 1)
256
- image_feature_p = self.mm_projector_vary(cnn_feature_p)
257
- image_patches_features.append(image_feature_p)
258
- image_feature = torch.cat(image_patches_features, dim=1)
259
- image_features.append(image_feature)
260
-
261
-
262
- dummy_image_features_2 = torch.zeros(256, 1024, device=inputs_embeds.device, dtype=inputs_embeds.dtype)
263
- dummy_image_features = dummy_image_features_2
264
- use_im_start_end = True
265
- new_input_embeds = []
266
- for cur_input_ids, cur_input_embeds, cur_image_features in zip(input_ids, inputs_embeds, image_features):
267
- if (cur_input_ids == im_patch_token).sum() == 0:
268
- cur_input_embeds = cur_input_embeds + (0. * dummy_image_features).sum()
269
- new_input_embeds.append(cur_input_embeds)
270
- continue
271
-
272
- if use_im_start_end:
273
- if (cur_input_ids == im_start_token).sum() != (cur_input_ids == im_end_token).sum():
274
- raise ValueError("The number of image start tokens and image end tokens should be the same.")
275
-
276
- image_start_tokens = torch.where(cur_input_ids == im_start_token)[0]
277
- for image_start_token_pos, per_cur_image_features in zip(image_start_tokens, cur_image_features):
278
- per_cur_image_features = per_cur_image_features.to(device=cur_input_embeds.device)
279
- num_patches = per_cur_image_features.shape[0]
280
-
281
- if cur_input_ids[image_start_token_pos + num_patches + 1] != im_end_token:
282
- raise ValueError("The image end token should follow the image start token.")
283
-
284
- cur_input_embeds = torch.cat(
285
- (
286
- cur_input_embeds[:image_start_token_pos+1],
287
- per_cur_image_features,
288
- cur_input_embeds[image_start_token_pos + num_patches + 1:]
289
- ),
290
- dim=0
291
- )
292
-
293
-
294
- new_input_embeds.append(cur_input_embeds)
295
- else:
296
- raise NotImplementedError
297
-
298
- inputs_embeds = torch.stack(new_input_embeds, dim=0)
299
-
300
- return super(GOTQwenModel, self).forward(
301
- input_ids=None, attention_mask=attention_mask, past_key_values=past_key_values,
302
- inputs_embeds=inputs_embeds, use_cache=use_cache, position_ids = position_ids,
303
- output_attentions=output_attentions, output_hidden_states=output_hidden_states,
304
- return_dict=return_dict
305
- )
306
-
307
-
308
-
309
- class GOTQwenForCausalLM(Qwen2ForCausalLM):
310
- config_class = GOTConfig
311
- # supports_gradient_checkpointing = True
312
-
313
- def __init__(self, config):
314
- super(Qwen2ForCausalLM, self).__init__(config)
315
- self.model = GOTQwenModel(config)
316
-
317
- self.vocab_size = config.vocab_size
318
- self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
319
-
320
- # Initialize weights and apply final processing
321
- self.post_init()
322
-
323
- def get_model(self):
324
- return self.model
325
-
326
- def forward(
327
- self,
328
- input_ids: torch.LongTensor = None,
329
- attention_mask: Optional[torch.Tensor] = None,
330
- position_ids: Optional[torch.LongTensor] = None,
331
- past_key_values: Optional[List[torch.FloatTensor]] = None,
332
- inputs_embeds: Optional[torch.FloatTensor] = None,
333
- labels: Optional[torch.LongTensor] = None,
334
- use_cache: Optional[bool] = None,
335
- output_attentions: Optional[bool] = None,
336
- output_hidden_states: Optional[bool] = None,
337
- images: Optional[torch.FloatTensor] = None,
338
- return_dict: Optional[bool] = None,
339
-
340
- ) -> Union[Tuple, CausalLMOutputWithPast]:
341
- output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
342
- output_hidden_states = (
343
- output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
344
- )
345
- return_dict = return_dict if return_dict is not None else self.config.use_return_dict
346
-
347
- outputs = self.model(
348
- input_ids=input_ids,
349
- past_key_values=past_key_values,
350
- attention_mask=attention_mask,
351
- position_ids=position_ids,
352
- inputs_embeds=inputs_embeds,
353
- use_cache=use_cache,
354
- output_attentions=output_attentions,
355
- output_hidden_states=output_hidden_states,
356
- images=images,
357
- return_dict=return_dict
358
-
359
- )
360
-
361
- hidden_states = outputs[0]
362
- logits = self.lm_head(hidden_states)
363
- logits = logits.float()
364
-
365
- # logits
366
-
367
- loss = None
368
- if labels is not None:
369
- # Shift so that tokens < n predict n
370
- shift_logits = logits[..., :-1, :].contiguous()
371
- shift_labels = labels[..., 1:].contiguous()
372
- # Flatten the tokens
373
- loss_fct = CrossEntropyLoss()
374
- shift_logits = shift_logits.view(-1, self.config.vocab_size)
375
- shift_labels = shift_labels.view(-1)
376
- # Enable model parallelism
377
- shift_labels = shift_labels.to(shift_logits.device)
378
- loss = loss_fct(shift_logits, shift_labels)
379
-
380
- if not return_dict:
381
- output = (logits,) + outputs[1:]
382
- return (loss,) + output if loss is not None else output
383
-
384
- return CausalLMOutputWithPast(
385
- loss=loss,
386
- logits=logits,
387
- past_key_values=outputs.past_key_values,
388
- hidden_states=outputs.hidden_states,
389
- attentions=outputs.attentions,
390
- )
391
-
392
-
393
- def prepare_inputs_for_generation(
394
- self, input_ids, past_key_values=None, attention_mask=None, inputs_embeds=None, **kwargs
395
- ):
396
- # Omit tokens covered by past_key_values
397
- if past_key_values is not None:
398
- if isinstance(past_key_values, Cache):
399
- cache_length = past_key_values.get_seq_length()
400
- past_length = past_key_values.seen_tokens
401
- max_cache_length = past_key_values.get_max_length()
402
- else:
403
- cache_length = past_length = past_key_values[0][0].shape[2]
404
- max_cache_length = None
405
-
406
- # Keep only the unprocessed tokens:
407
- # 1 - If the length of the attention_mask exceeds the length of input_ids, then we are in a setting where
408
- # some of the inputs are exclusively passed as part of the cache (e.g. when passing input_embeds as
409
- # input)
410
- if attention_mask is not None and attention_mask.shape[1] > input_ids.shape[1]:
411
- input_ids = input_ids[:, -(attention_mask.shape[1] - past_length) :]
412
- # 2 - If the past_length is smaller than input_ids', then input_ids holds all input tokens. We can discard
413
- # input_ids based on the past_length.
414
- elif past_length < input_ids.shape[1]:
415
- input_ids = input_ids[:, past_length:]
416
- # 3 - Otherwise (past_length >= input_ids.shape[1]), let's assume input_ids only has unprocessed tokens.
417
-
418
- # If we are about to go beyond the maximum cache length, we need to crop the input attention mask.
419
- if (
420
- max_cache_length is not None
421
- and attention_mask is not None
422
- and cache_length + input_ids.shape[1] > max_cache_length
423
- ):
424
- attention_mask = attention_mask[:, -max_cache_length:]
425
-
426
- position_ids = kwargs.get("position_ids", None)
427
- if attention_mask is not None and position_ids is None:
428
- # create position_ids on the fly for batch generation
429
- position_ids = attention_mask.long().cumsum(-1) - 1
430
- position_ids.masked_fill_(attention_mask == 0, 1)
431
- if past_key_values:
432
- position_ids = position_ids[:, -input_ids.shape[1] :]
433
-
434
- # if `inputs_embeds` are passed, we only want to use them in the 1st generation step
435
- if inputs_embeds is not None and past_key_values is None:
436
- model_inputs = {"inputs_embeds": inputs_embeds}
437
- else:
438
- model_inputs = {"input_ids": input_ids}
439
-
440
- model_inputs.update(
441
- {
442
- "position_ids": position_ids,
443
- "past_key_values": past_key_values,
444
- "use_cache": kwargs.get("use_cache"),
445
- "attention_mask": attention_mask,
446
- "images": kwargs.get("images", None),
447
- }
448
- )
449
- return model_inputs
450
-
451
- def initialize_vision_tokenizer(
452
- self,
453
- tokenizer,
454
- freeze_lm_model=False,
455
- pretrained_stage1_model=None,
456
- device="mps"
457
- ):
458
- config = self.get_model().config
459
-
460
-
461
- self.resize_token_embeddings(len(tokenizer))
462
-
463
- config.im_patch_token = 151859
464
-
465
- config.use_im_start_end = True
466
-
467
- if config.use_im_start_end:
468
- self.resize_token_embeddings(len(tokenizer))
469
- config.im_start_token, config.im_end_token = 151857, 151858
470
-
471
- def load_image(self, image_file):
472
- if image_file.startswith('http') or image_file.startswith('https'):
473
- response = requests.get(image_file)
474
- image = Image.open(BytesIO(response.content)).convert('RGB')
475
- else:
476
- image = Image.open(image_file).convert('RGB')
477
- return image
478
-
479
- def disable_torch_init(self):
480
- """
481
- Disable the redundant torch default initialization to accelerate model creation.
482
- """
483
- import torch
484
- setattr(torch.nn.Linear, "reset_parameters", lambda self: None)
485
- setattr(torch.nn.LayerNorm, "reset_parameters", lambda self: None)
486
-
487
- def chat(self, tokenizer, image_file, ocr_type, ocr_box='', ocr_color='', render=False, save_render_file=None, print_prompt=False, gradio_input=False, stream_flag = False):
488
-
489
- self.disable_torch_init()
490
-
491
-
492
- image_processor_high = GOTImageEvalProcessor(image_size=1024)
493
-
494
- use_im_start_end = True
495
-
496
- image_token_len = 256
497
-
498
- if gradio_input:
499
- image = image_file.copy()
500
- else:
501
- image = self.load_image(image_file)
502
-
503
- w, h = image.size
504
-
505
- if ocr_type == 'format':
506
- qs = 'OCR with format: '
507
- else:
508
- qs = 'OCR: '
509
-
510
- if ocr_box:
511
- bbox = eval(ocr_box)
512
- if len(bbox) == 2:
513
- bbox[0] = int(bbox[0]/w*1000)
514
- bbox[1] = int(bbox[1]/h*1000)
515
- if len(bbox) == 4:
516
- bbox[0] = int(bbox[0]/w*1000)
517
- bbox[1] = int(bbox[1]/h*1000)
518
- bbox[2] = int(bbox[2]/w*1000)
519
- bbox[3] = int(bbox[3]/h*1000)
520
- if ocr_type == 'format':
521
- qs = str(bbox) + ' ' + 'OCR with format: '
522
- else:
523
- qs = str(bbox) + ' ' + 'OCR: '
524
-
525
- if ocr_color:
526
- if ocr_type == 'format':
527
- qs = '[' + ocr_color + ']' + ' ' + 'OCR with format: '
528
- else:
529
- qs = '[' + ocr_color + ']' + ' ' + 'OCR: '
530
-
531
- if use_im_start_end:
532
- qs = DEFAULT_IM_START_TOKEN + DEFAULT_IMAGE_PATCH_TOKEN*image_token_len + DEFAULT_IM_END_TOKEN + '\n' + qs
533
- else:
534
- qs = DEFAULT_IMAGE_TOKEN + '\n' + qs
535
-
536
-
537
- conv_mpt = Conversation(
538
- system="""<|im_start|>system
539
- You should follow the instructions carefully and explain your answers in detail.""",
540
- # system = None,
541
- roles=("<|im_start|>user\n", "<|im_start|>assistant\n"),
542
- version="mpt",
543
- messages=(),
544
- offset=0,
545
- sep_style=SeparatorStyle.MPT,
546
- sep="<|im_end|>",
547
- )
548
-
549
- conv = conv_mpt.copy()
550
- conv.append_message(conv.roles[0], qs)
551
- conv.append_message(conv.roles[1], None)
552
- prompt = conv.get_prompt()
553
-
554
- if print_prompt:
555
- print(prompt)
556
-
557
- inputs = tokenizer([prompt])
558
-
559
- image_tensor_1 = image_processor_high(image)
560
-
561
- input_ids = torch.as_tensor(inputs.input_ids)
562
-
563
- stop_str = conv.sep if conv.sep_style != SeparatorStyle.TWO else conv.sep2
564
- keywords = [stop_str]
565
- stopping_criteria = KeywordsStoppingCriteria(keywords, tokenizer, input_ids)
566
- streamer = TextStreamer(tokenizer, skip_prompt=True, skip_special_tokens=True)
567
-
568
- if stream_flag:
569
- with torch.autocast("cpu", dtype=torch.bfloat16):
570
- output_ids = self.generate(
571
- input_ids,
572
- images=[image_tensor_1.unsqueeze(0).half()],
573
- do_sample=False,
574
- num_beams = 1,
575
- no_repeat_ngram_size = 20,
576
- streamer=streamer,
577
- max_new_tokens=4096,
578
- stopping_criteria=[stopping_criteria]
579
- )
580
- else:
581
- with torch.autocast("cpu", dtype=torch.bfloat16):
582
- output_ids = self.generate(
583
- input_ids,
584
- images=[image_tensor_1.unsqueeze(0).half()],
585
- do_sample=False,
586
- num_beams = 1,
587
- no_repeat_ngram_size = 20,
588
- # streamer=streamer,
589
- max_new_tokens=4096,
590
- stopping_criteria=[stopping_criteria]
591
- )
592
-
593
- outputs = tokenizer.decode(output_ids[0, input_ids.shape[1]:]).strip()
594
-
595
- if outputs.endswith(stop_str):
596
- outputs = outputs[:-len(stop_str)]
597
- outputs = outputs.strip()
598
- response_str = outputs
599
-
600
- if render:
601
- print('==============rendering===============')
602
- from .render_tools import svg_to_html, content_mmd_to_html, tik_html, translation_table
603
-
604
- if '**kern' in outputs:
605
- import verovio
606
- tk = verovio.toolkit()
607
- tk.loadData(outputs)
608
- tk.setOptions({"pageWidth": 2100, "footer": 'none',
609
- 'barLineWidth': 0.5, 'beamMaxSlope': 15,
610
- 'staffLineWidth': 0.2, 'spacingStaff': 6})
611
- tk.getPageCount()
612
- svg = tk.renderToSVG()
613
- svg = svg.replace("overflow=\"inherit\"", "overflow=\"visible\"")
614
-
615
- svg_to_html(svg, save_render_file)
616
-
617
- if ocr_type == 'format' and '**kern' not in outputs:
618
-
619
-
620
- if '\\begin{tikzpicture}' not in outputs:
621
- html_path_2 = save_render_file
622
- right_num = outputs.count('\\right')
623
- left_num = outputs.count('\left')
624
-
625
- if right_num != left_num:
626
- outputs = outputs.replace('\left(', '(').replace('\\right)', ')').replace('\left[', '[').replace('\\right]', ']').replace('\left{', '{').replace('\\right}', '}').replace('\left|', '|').replace('\\right|', '|').replace('\left.', '.').replace('\\right.', '.')
627
-
628
-
629
- outputs = outputs.replace('"', '``').replace('$', '')
630
-
631
- outputs_list = outputs.split('\n')
632
- gt= ''
633
- for out in outputs_list:
634
- gt += '"' + out.replace('\\', '\\\\') + r'\n' + '"' + '+' + '\n'
635
-
636
- gt = gt[:-2]
637
-
638
-
639
- lines = content_mmd_to_html
640
- lines = lines.split("const text =")
641
- new_web = lines[0] + 'const text =' + gt + lines[1]
642
-
643
- else:
644
- html_path_2 = save_render_file
645
- outputs = outputs.translate(translation_table)
646
- outputs_list = outputs.split('\n')
647
- gt= ''
648
- for out in outputs_list:
649
- if out:
650
- if '\\begin{tikzpicture}' not in out and '\\end{tikzpicture}' not in out:
651
- while out[-1] == ' ':
652
- out = out[:-1]
653
- if out is None:
654
- break
655
-
656
- if out:
657
- if out[-1] != ';':
658
- gt += out[:-1] + ';\n'
659
- else:
660
- gt += out + '\n'
661
- else:
662
- gt += out + '\n'
663
-
664
-
665
- lines = tik_html
666
- lines = lines.split("const text =")
667
- new_web = lines[0] + gt + lines[1]
668
-
669
- with open(html_path_2, 'w') as web_f_new:
670
- web_f_new.write(new_web)
671
- return response_str
672
-
673
- def dynamic_preprocess(self, image, min_num=1, max_num=6, image_size=1024, use_thumbnail=True):
674
-
675
- def find_closest_aspect_ratio(aspect_ratio, target_ratios, width, height, image_size):
676
- best_ratio_diff = float('inf')
677
- best_ratio = (1, 1)
678
- area = width * height
679
- for ratio in target_ratios:
680
- target_aspect_ratio = ratio[0] / ratio[1]
681
- ratio_diff = abs(aspect_ratio - target_aspect_ratio)
682
- if ratio_diff < best_ratio_diff:
683
- best_ratio_diff = ratio_diff
684
- best_ratio = ratio
685
- elif ratio_diff == best_ratio_diff:
686
- if area > 0.5 * image_size * image_size * ratio[0] * ratio[1]:
687
- best_ratio = ratio
688
- # print(f'width: {width}, height: {height}, best_ratio: {best_ratio}')
689
- return best_ratio
690
-
691
- orig_width, orig_height = image.size
692
- aspect_ratio = orig_width / orig_height
693
-
694
- # calculate the existing image aspect ratio
695
- target_ratios = set(
696
- (i, j) for n in range(min_num, max_num + 1) for i in range(1, n + 1) for j in range(1, n + 1) if
697
- i * j <= max_num and i * j >= min_num)
698
- # print(target_ratios)
699
- target_ratios = sorted(target_ratios, key=lambda x: x[0] * x[1])
700
-
701
- # find the closest aspect ratio to the target
702
- target_aspect_ratio = find_closest_aspect_ratio(
703
- aspect_ratio, target_ratios, orig_width, orig_height, image_size)
704
-
705
- # print(target_aspect_ratio)
706
- # calculate the target width and height
707
- target_width = image_size * target_aspect_ratio[0]
708
- target_height = image_size * target_aspect_ratio[1]
709
- blocks = target_aspect_ratio[0] * target_aspect_ratio[1]
710
-
711
- # resize the image
712
- resized_img = image.resize((target_width, target_height))
713
- processed_images = []
714
- for i in range(blocks):
715
- box = (
716
- (i % (target_width // image_size)) * image_size,
717
- (i // (target_width // image_size)) * image_size,
718
- ((i % (target_width // image_size)) + 1) * image_size,
719
- ((i // (target_width // image_size)) + 1) * image_size
720
- )
721
- # split the image
722
- split_img = resized_img.crop(box)
723
- processed_images.append(split_img)
724
- assert len(processed_images) == blocks
725
- if use_thumbnail and len(processed_images) != 1:
726
- thumbnail_img = image.resize((image_size, image_size))
727
- processed_images.append(thumbnail_img)
728
- return processed_images
729
-
730
-
731
- def chat_crop(self, tokenizer, image_file, ocr_type, render=False, save_render_file=None, print_prompt=False, gradio_input=False, stream_flag = False):
732
- # Model
733
- self.disable_torch_init()
734
- multi_page=False
735
-
736
-
737
- image_processor_high = GOTImageEvalProcessor(image_size=1024)
738
-
739
- use_im_start_end = True
740
-
741
-
742
- image_token_len = 256
743
-
744
- image_list = []
745
-
746
- # if len(image_file_list)>1:
747
- # multi_page = True
748
-
749
- if multi_page:
750
- qs = 'OCR with format across multi pages: '
751
- # only for png files
752
- # import glob
753
- # from natsort import natsorted
754
- # patches = glob.glob(image_file + '/*png')
755
- patches = image_file
756
- # patches = natsorted(patches)
757
- sub_images = []
758
- for sub_image in patches:
759
- sub_images.append(self.load_image(sub_image))
760
-
761
- ll = len(patches)
762
- # print(patches)
763
- # print("len ll: ", ll)
764
-
765
- else:
766
- if ocr_type == 'format':
767
- qs = 'OCR with format upon the patch reference: '
768
- else:
769
- qs = 'OCR upon the patch reference: '
770
- if gradio_input:
771
- img = image_file.copy()
772
- else:
773
- img = self.load_image(image_file)
774
- sub_images = self.dynamic_preprocess(img)
775
- ll = len(sub_images)
776
-
777
- for image in sub_images:
778
- image_tensor_1 = image_processor_high(image)
779
- image_list.append(image_tensor_1)
780
-
781
-
782
- image_list = torch.stack(image_list)
783
-
784
- print('====new images batch size======: \n',image_list.shape)
785
-
786
-
787
- if use_im_start_end:
788
- qs = DEFAULT_IM_START_TOKEN + DEFAULT_IMAGE_PATCH_TOKEN*image_token_len*ll + DEFAULT_IM_END_TOKEN + '\n' + qs
789
- else:
790
- qs = DEFAULT_IMAGE_TOKEN + '\n' + qs
791
-
792
-
793
- conv_mpt = Conversation(
794
- system="""<|im_start|>system
795
- You should follow the instructions carefully and explain your answers in detail.""",
796
- # system = None,
797
- roles=("<|im_start|>user\n", "<|im_start|>assistant\n"),
798
- version="mpt",
799
- messages=(),
800
- offset=0,
801
- sep_style=SeparatorStyle.MPT,
802
- sep="<|im_end|>",
803
- )
804
-
805
- conv = conv_mpt.copy()
806
- conv.append_message(conv.roles[0], qs)
807
- conv.append_message(conv.roles[1], None)
808
- prompt = conv.get_prompt()
809
-
810
- if print_prompt:
811
- print(prompt)
812
-
813
- inputs = tokenizer([prompt])
814
-
815
- input_ids = torch.as_tensor(inputs.input_ids)
816
-
817
- stop_str = conv.sep if conv.sep_style != SeparatorStyle.TWO else conv.sep2
818
- keywords = [stop_str]
819
- stopping_criteria = KeywordsStoppingCriteria(keywords, tokenizer, input_ids)
820
- streamer = TextStreamer(tokenizer, skip_prompt=True, skip_special_tokens=True)
821
-
822
- if stream_flag:
823
- with torch.autocast("cpu", dtype=torch.bfloat16):
824
- output_ids = self.generate(
825
- input_ids,
826
- images=[image_list.half()],
827
- do_sample=False,
828
- num_beams = 1,
829
- # no_repeat_ngram_size = 20,
830
- streamer=streamer,
831
- max_new_tokens=4096,
832
- stopping_criteria=[stopping_criteria]
833
- )
834
- else:
835
- with torch.autocast("cpu", dtype=torch.bfloat16):
836
- output_ids = self.generate(
837
- input_ids,
838
- images=[image_list.half()],
839
- do_sample=False,
840
- num_beams = 1,
841
- # no_repeat_ngram_size = 20,
842
- # streamer=streamer,
843
- max_new_tokens=4096,
844
- stopping_criteria=[stopping_criteria]
845
- )
846
-
847
- outputs = tokenizer.decode(output_ids[0, input_ids.shape[1]:]).strip()
848
-
849
- if outputs.endswith(stop_str):
850
- outputs = outputs[:-len(stop_str)]
851
- outputs = outputs.strip()
852
- response_str = outputs
853
-
854
- if render:
855
- print('==============rendering===============')
856
- from .render_tools import content_mmd_to_html
857
- html_path_2 = save_render_file
858
- right_num = outputs.count('\\right')
859
- left_num = outputs.count('\left')
860
-
861
- if right_num != left_num:
862
- outputs = outputs.replace('\left(', '(').replace('\\right)', ')').replace('\left[', '[').replace('\\right]', ']').replace('\left{', '{').replace('\\right}', '}').replace('\left|', '|').replace('\\right|', '|').replace('\left.', '.').replace('\\right.', '.')
863
-
864
-
865
- outputs = outputs.replace('"', '``').replace('$', '')
866
-
867
- outputs_list = outputs.split('\n')
868
- gt= ''
869
- for out in outputs_list:
870
- gt += '"' + out.replace('\\', '\\\\') + r'\n' + '"' + '+' + '\n'
871
-
872
- gt = gt[:-2]
873
-
874
- lines = content_mmd_to_html
875
- lines = lines.split("const text =")
876
- new_web = lines[0] + 'const text =' + gt + lines[1]
877
-
878
- with open(html_path_2, 'w') as web_f_new:
879
- web_f_new.write(new_web)
880
-
881
- return response_str