# File: pixparse-main/src/pixparse/app/eval.py
import logging
import os
import json
from dataclasses import dataclass, replace, field
from typing import List
import simple_parsing
from simple_parsing import ArgumentParser
import torch
from pixparse.data import DataCfg, create_loader
from pixparse.framework import TaskEval, TaskEvalCfg, DeviceEnv, Monitor, evaluate, setup_logging, random_seed
from pixparse.utils.s3_utils import load_checkpoint_from_s3
from pixparse.task.task_factory import TaskFactory
from chug.webdataset import create_doc_anno_pipe, create_image_text_pipe
from collections import OrderedDict
_logger = logging.getLogger('eval')
@dataclass
class EvalCfg:
experiment: str = ''
output_dir: str = './output'
log_filename: str = 'out.log'
dataset_name: str = ''
s3_bucket: str = ''
checkpoint_path: str = ''
metrics_file_path: str = ''
task_name: str = ''
datasets: List[str] = field(default_factory=lambda : ['eval'])
seed: int = 42
def eval(cfg: EvalCfg, task: TaskEval, eval_loaders: dict):
device_env = task.device_env
metrics = evaluate(task, eval_loaders)
with open(cfg.metrics_file_path, 'w') as f:
json.dump(metrics, f)
parser = ArgumentParser(add_option_string_dash_variants=simple_parsing.DashVariant.DASH, argument_generation_mode=simple_parsing.ArgumentGenerationMode.BOTH, add_config_path_arg=True)
parser.add_arguments(EvalCfg, dest='eval')
parser.add_arguments(TaskEvalCfg, dest='task')
parser.add_arguments(DataCfg, dest='data')
def main():
args = parser.parse_args()
eval_cfg: EvalCfg = args.eval
data_cfg: DataCfg = args.data
device_env = DeviceEnv()
(task, task_cfg) = TaskFactory.create_task(task_name=eval_cfg.task_name, task_args=args.task, device_env=device_env, monitor=None)
random_seed(eval_cfg.seed, rank=device_env.global_rank)
_logger.info(f'Device env is {device_env}')
assert eval_cfg.output_dir is not None, f'output_dir is not provided. Stopping eval run.'
if device_env.is_primary():
log_path = os.path.join(eval_cfg.output_dir, eval_cfg.log_filename)
setup_logging(log_path)
monitor = Monitor(eval_cfg.experiment, output_dir=eval_cfg.output_dir, output_enabled=device_env.is_primary())
if eval_cfg.task_name not in ['donut_eval_ocr']:
checkpoint_path = eval_cfg.checkpoint_path
eval_cfg = replace(eval_cfg, checkpoint_path=checkpoint_path)
if eval_cfg.s3_bucket != '':
_logger.info('s3 bucket specified. Loading checkpoint from s3.')
checkpoint = load_checkpoint_from_s3(eval_cfg.s3_bucket, eval_cfg.checkpoint_path)
else:
assert os.path.isfile(checkpoint_path), f'Cannot find checkpoint {checkpoint_path}: File not found'
checkpoint = torch.load(eval_cfg.checkpoint_path)
if isinstance(checkpoint, OrderedDict):
state_dict = checkpoint
else:
state_dict = checkpoint['model']
checkpoint_name = eval_cfg.checkpoint_path.replace('/', '_').replace('.pt', '')
metrics_file_name = f'{checkpoint_name}-{eval_cfg.dataset_name}-metrics.json'
eval_state_dict = {k.replace('module.', ''): v for (k, v) in state_dict.items()}
task.resume_state_dict = eval_state_dict
else:
metrics_file_name = f'{eval_cfg.task_name}-{eval_cfg.dataset_name}-metrics.json'
eval_cfg.metrics_file_path = os.path.join(eval_cfg.output_dir, metrics_file_name)
if device_env.is_primary():
_logger.info(task_cfg)
_logger.info(eval_cfg)
loaders = {}
assert data_cfg.eval is not None, f'data_cfg.eval is not set.'
loaders['eval'] = create_loader(data_cfg.eval, is_train=False, collate_fn=task.collate_fn, image_preprocess=task.image_preprocess_eval, anno_preprocess=task.anno_preprocess_eval, image_fmt=task_cfg.model.image_encoder.image_fmt, world_size=device_env.world_size, local_rank=device_env.local_rank, create_decoder_pipe=create_image_text_pipe)
task.setup()
if device_env.is_primary():
_logger.info(task)
eval(eval_cfg, task, loaders)
task.end()
if __name__ == '__main__':
main()
# File: pixparse-main/src/pixparse/app/train.py
import logging
import os
from dataclasses import dataclass, replace
from datetime import datetime
from typing import Dict, Optional
import simple_parsing
from simple_parsing import ArgumentParser
import torch
from pixparse.data import DataCfg, create_loader
from pixparse.framework import DeviceEnv, Monitor, train_one_interval, evaluate, setup_logging, random_seed, TaskTrain, TaskTrainCfg
from pixparse.utils.name_utils import clean_name
from pixparse.utils.s3_utils import load_checkpoint_from_s3
from pixparse.task import TaskFactory
from chug.common import LoaderBundle
from chug.webdataset import create_doc_anno_pipe
from collections import OrderedDict
_logger = logging.getLogger('train')
@dataclass
class TrainCfg:
experiment: Optional[str] = None
output_dir: str = './output'
log_filename: str = 'out.log'
s3_bucket: str = ''
resume: bool = False
checkpoint_path: str = ''
output_checkpoint_dir: Optional[str] = None
seed: int = 42
task_name: str = 'cruller_pretrain'
wandb: bool = False
wandb_project: str = 'unknown'
tensorboard: bool = False
log_eval_data: bool = False
def train(cfg: TrainCfg, task: TaskTrain, loaders: Dict[str, LoaderBundle]):
device_env = task.device_env
train_loader = loaders['train']
for i in range(task.start_interval, task.num_intervals):
train_loader.set_interval(i)
train_one_interval(task, train_loader)
if device_env.is_primary():
checkpoint_dir = os.path.join(cfg.output_checkpoint_dir, cfg.experiment)
os.makedirs(checkpoint_dir, exist_ok=True)
torch.save(task.model.state_dict(), os.path.join(checkpoint_dir, f'checkpoint-{i}.pt'))
parser = ArgumentParser(add_option_string_dash_variants=simple_parsing.DashVariant.DASH, argument_generation_mode=simple_parsing.ArgumentGenerationMode.BOTH, add_config_path_arg=True)
parser.add_arguments(TrainCfg, dest='train')
parser.add_arguments(TaskTrainCfg, dest='task')
parser.add_arguments(DataCfg, dest='data')
def main():
args = parser.parse_args()
train_cfg: TrainCfg = args.train
data_cfg: DataCfg = args.data
device_env = DeviceEnv()
(task, task_cfg) = TaskFactory.create_task(task_name=train_cfg.task_name, task_args=args.task, device_env=device_env, monitor=None)
random_seed(train_cfg.seed, rank=device_env.global_rank)
_logger.info(f'Device env is {device_env}')
if train_cfg.experiment is None:
model_name_safe = clean_name(task_cfg.model_name)
date_str = datetime.now().strftime('%Y%m%d-%H%M%S')
if device_env.world_size > 1:
date_str = device_env.broadcast_object(date_str)
experiment = '-'.join([date_str, f'task_{train_cfg.task_name}', f'model_{model_name_safe}', f"lr_{'{:.1e}'.format(task_cfg.opt.learning_rate)}", f'b_{data_cfg.train.batch_size}'])
train_cfg = replace(train_cfg, experiment=experiment)
resume_latest = False
experiment_path = os.path.join(train_cfg.output_dir, train_cfg.experiment)
log_path = None
if device_env.is_primary():
os.makedirs(experiment_path, exist_ok=True)
log_path = os.path.join(experiment_path, train_cfg.log_filename)
if os.path.exists(log_path) and (not resume_latest):
_logger.error('Error. Experiment already exists. Use --experiment {} to specify a new experiment.')
return -1
setup_logging(log_path)
task.monitor = Monitor(train_cfg.experiment, output_dir=experiment_path, wandb=train_cfg.wandb, wandb_project=train_cfg.wandb_project, tensorboard=train_cfg.tensorboard, output_enabled=device_env.is_primary())
if train_cfg.resume:
checkpoint_path = train_cfg.checkpoint_path
train_cfg = replace(train_cfg, checkpoint_path=checkpoint_path)
if train_cfg.s3_bucket != '':
_logger.info('s3 bucket specified. Loading checkpoint from s3.')
checkpoint = load_checkpoint_from_s3(train_cfg.s3_bucket, train_cfg.checkpoint_path)
else:
assert os.path.isfile(checkpoint_path), f'Cannot find checkpoint {checkpoint_path}: File not found'
checkpoint = torch.load(train_cfg.checkpoint_path)
if isinstance(checkpoint, OrderedDict):
state_dict = checkpoint
else:
state_dict = checkpoint['model']
task.state_dict = state_dict
task.resume = True
output_checkpoint_dir = train_cfg.output_checkpoint_dir or os.path.join(experiment_path, 'checkpoints')
os.makedirs(output_checkpoint_dir, exist_ok=True)
train_cfg = replace(train_cfg, output_checkpoint_dir=output_checkpoint_dir)
if device_env.is_primary():
_logger.info(task_cfg)
_logger.info(train_cfg)
loaders = {}
assert data_cfg.train is not None or data_cfg.eval is not None, f'Neither data_cfg.train nor data_cfg.eval are set.'
if data_cfg.train is not None:
loaders['train'] = create_loader(data_cfg.train, is_train=True, collate_fn=task.collate_fn, image_preprocess=task.image_preprocess_train, anno_preprocess=task.anno_preprocess_train, image_fmt=task_cfg.model.image_encoder.image_fmt, world_size=device_env.world_size, global_rank=device_env.global_rank, create_decoder_pipe=create_doc_anno_pipe)
task.train_setup(num_batches_per_interval=loaders['train'].num_batches)
if device_env.is_primary():
_logger.info(task)
train(train_cfg, task, loaders)
if __name__ == '__main__':
main()
# File: pixparse-main/src/pixparse/data/config.py
from dataclasses import dataclass, field
from typing import List, Optional
@dataclass
class PreprocessCfg:
pass
@dataclass
class DatasetCfg:
source: str
num_samples: int
batch_size: int
split: str
format: str = 'webdataset'
num_workers: int = 4
@dataclass
class DataCfg:
train: Optional[DatasetCfg] = None
eval: Optional[DatasetCfg] = None
# File: pixparse-main/src/pixparse/data/datasets_utils.py
import json
import os
from ast import literal_eval
import torch
from datasets import load_dataset
from PIL import Image
from torch.utils.data import DataLoader, Dataset
from torchvision import transforms
from pixparse.utils.json_utils import json2token
''
class CustomVQADataset(Dataset):
def __init__(self, root_dir, split, transform=None):
self.extra_tokens = ['', '', '', '']
self.root_dir = root_dir
self.split = split
assert split in ['train', 'test', 'val'], 'split is not train, test or val.'
if split == 'test' or split == 'val':
json_path = os.path.join(root_dir, split, f'{split}_v1.0.json')
else:
json_path = os.path.join(root_dir, split, f'processed_{split}_v1.0.json')
assert os.path.isdir(self.root_dir), f"Can't find {root_dir}. Make sure you have DocVQA files locally."
assert os.path.isfile(json_path), f'{json_path} not found. Make sure you have the processed dataset.'
self.img_dir = os.path.join(root_dir, split)
with open(json_path, 'r') as f:
self.data_dict = json.load(f)
self.all_images = list(self.data_dict.keys())
self.transform = transform
def __len__(self):
if self.split == 'test' or self.split == 'val':
return len(self.data_dict['data'])
return len(self.all_images)
def __getitem__(self, index):
if self.split == 'test':
entry = self.data_dict['data'][index]
labels = '' + entry['question'] + ''
img_path = os.path.join(self.img_dir, entry['image'])
question_id = entry['questionId']
image_id = entry['image']
if self.split == 'val':
entry = self.data_dict['data'][index]
labels = {'question': entry['question'], 'answers': entry['answers']}
img_path = os.path.join(self.img_dir, entry['image'])
question_id = entry['questionId']
image_id = entry['image']
else:
image_id = self.all_images[index]
questions_and_answers = self.data_dict[image_id]
labels = questions_and_answers
img_path = os.path.join(self.img_dir, image_id)
question_id = -1
image = Image.open(img_path).convert('L')
if self.transform:
image = self.transform(image)
return {'image': image, 'labels': labels, 'image_id': image_id, 'question_id': question_id}
class SafeDataset:
def __init__(self, original_dataset):
self.original_dataset = original_dataset
def __len__(self):
return len(self.original_dataset)
def __getitem__(self, idx):
try:
item = self.original_dataset[idx]
return item
except Exception as e:
return None
def get_additional_tokens_from_dataset(all_special_tokens: list, dataset=None, dataset_id: str='naver-clova-ix/cord-v2') -> list:
if dataset_id == 'naver-clova-ix/cord-v2':
def collate_fn(batch):
text_inputs = [literal_eval(item['ground_truth'])['gt_parse'] for item in batch]
return {'label': text_inputs}
cord = load_dataset(dataset_id)
loader = DataLoader(cord['train'], batch_size=32, collate_fn=collate_fn)
new_special_tokens = []
for (i, batch) in enumerate(loader):
for text in batch['label']:
(_, batch_special_tokens) = json2token(text, all_special_tokens)
new_special_tokens += batch_special_tokens
new_special_tokens = list(set(new_special_tokens))
return new_special_tokens
# File: pixparse-main/src/pixparse/data/loader.py
from typing import Callable
from chug import create_wds_loader, create_doc_anno_pipe
from chug.common import LoaderBundle
from datasets import VerificationMode
from datasets import load_dataset
from torch.utils.data import DataLoader, DistributedSampler
from pixparse.data.datasets_utils import SafeDataset, CustomVQADataset
from .config import DatasetCfg
class GenericLoader(DataLoader):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.num_batches = len(self.dataset) // self.batch_size
if len(self.dataset) % self.batch_size != 0:
self.num_batches += 1
def create_loader(cfg: DatasetCfg, is_train: bool, image_preprocess, anno_preprocess, collate_fn: Callable=None, image_key='pdf;tif;tiff;png;jpg;jpeg', image_fmt='L', start_interval: int=0, seed: int=0, world_size: int=1, global_rank: int=0, create_decoder_pipe: Callable=create_doc_anno_pipe):
decoder = create_decoder_pipe(image_preprocess=image_preprocess, anno_preprocess=anno_preprocess, image_key=image_key, image_fmt=image_fmt)
if cfg.format == 'webdataset':
loader = create_wds_loader(cfg.source, decoder, is_train=is_train, num_samples=cfg.num_samples, workers=cfg.num_workers, batch_size=cfg.batch_size, seed=seed, world_size=world_size)
elif cfg.format == 'hf_dataset':
if cfg.source == 'SinglePageDocVQA':
dataset = CustomVQADataset(root_dir=f'/fsx/pablo/.cache/{cfg.source}', split=cfg.split)
else:
dataset = load_dataset(cfg.source, verification_mode=VerificationMode.ALL_CHECKS)[cfg.split]
dataset = SafeDataset(dataset)
sampler = None
if world_size > 1:
sampler = DistributedSampler(dataset, rank=global_rank, shuffle=True, seed=seed, num_replicas=world_size, drop_last=True)
base_loader = DataLoader(dataset=dataset, collate_fn=collate_fn, sampler=sampler, batch_size=cfg.batch_size, num_workers=cfg.num_workers)
loader = LoaderBundle(loader=base_loader, num_batches=len(base_loader), num_samples=len(dataset), sampler=sampler)
return loader
# File: pixparse-main/src/pixparse/data/preprocess.py
import logging
from typing import Callable
import torch
_logger = logging.getLogger(__name__)
def preprocess_text_anno(anno, tokenizer: Callable, max_position_embeddings: int, task_start_token: str, prompt_end_token: str, ignore_id: int=-100, generator=None):
text = task_start_token + anno + tokenizer.eos_token
tokenizer_fn = lambda x: tokenizer(x, add_special_tokens=False, return_tensors='pt', max_length=max_position_embeddings, padding='max_length', truncation=True).input_ids[0]
text = tokenizer_fn(text)
target = text.clone()
target[target == tokenizer.pad_token_id] = ignore_id
prompt_end_token_id = tokenizer.convert_tokens_to_ids(prompt_end_token)
target[:torch.nonzero(target == prompt_end_token_id).sum() + 1] = ignore_id
return dict(text=[text], target=[target])
def preprocess_ocr_anno(anno, tokenizer: Callable, max_position_embeddings: int, task_start_token: str, prompt_end_token: str, ignore_id: int=-100, generator=None):
if isinstance(anno, list):
_logger.warning('Old [id, {}] annotation form found, correcting...')
anno = anno[1]
num_pages = len(anno['pages'])
if not num_pages:
raise RuntimeError('Empty annotation. Skipping...')
tokenizer_fn = lambda x: tokenizer(x, add_special_tokens=False, return_tensors='pt', max_length=max_position_embeddings, padding='max_length', truncation=True).input_ids[0]
pad_token_id = tokenizer.pad_token_id
prompt_end_token_id = tokenizer.convert_tokens_to_ids(prompt_end_token)
current_index = generator.randint(0, num_pages - 1)
if not anno['pages'][current_index]['text']:
current_index = get_next_valid_page_index(current_index, num_pages, anno)
page_indices = []
text_pages = []
target_pages = []
n_wanted_pages = min(1, num_pages)
while len(text_pages) < n_wanted_pages:
anno_page = anno['pages'][current_index]
if not anno_page['text']:
raise RuntimeError('No text on page, skipping...')
text = '\n'.join(anno_page['text'])
orig_text = text
text = task_start_token + text + tokenizer.eos_token
text = tokenizer_fn(text)
target = text.clone()
target[target == pad_token_id] = ignore_id
target[:torch.nonzero(target == prompt_end_token_id).sum() + 1] = ignore_id
text_pages.append(text)
target_pages.append(target)
page_indices.append(current_index)
current_index = get_next_valid_page_index(current_index, num_pages, anno)
return (dict(text=text_pages, target=target_pages), dict(page_indices=page_indices, num_pages=num_pages, orig_text=orig_text))
def get_next_valid_page_index(current_index: int, num_pages: int, anno: dict, retries: int=10):
for _ in range(retries):
current_index = (current_index + 1) % num_pages
anno_page = anno['pages'][current_index]
if anno_page['text']:
return current_index
raise RuntimeError(f'No non-empty page found after {retries} attempts')
# File: pixparse-main/src/pixparse/data/transforms.py
import random
from typing import Tuple, Union
import timm.data.transforms
import torch
import torchvision.transforms.functional as F
from torchvision import transforms
from PIL import Image, ImageOps, ImageFilter
from timm.data.transforms import CenterCropOrPad
from timm.data.constants import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
import numpy as np
try:
import albumentations as alb
from albumentations.pytorch import ToTensorV2
has_albumentations = True
except ImportError:
has_albumentations = False
try:
import cv2
has_cv2 = True
except ImportError:
has_cv2 = False
def create_transforms(name, image_size, training=True, image_mean=IMAGENET_DEFAULT_MEAN, image_std=IMAGENET_DEFAULT_STD, interpolation: str='bicubic', crop_margin: bool=False, align_long_axis: bool=False, fill=255):
basic_args = dict(training=training, image_mean=image_mean, image_std=image_std)
adv_args = dict(interpolation=interpolation, crop_margin=crop_margin, align_long_axis=align_long_axis, fill=fill)
if name == 'better':
return better_transforms(image_size, **basic_args, **adv_args)
elif name == 'nougat':
return nougat_transforms(image_size, **basic_args, **adv_args)
else:
return legacy_transforms(image_size, **basic_args)
def legacy_transforms(image_size, image_mean, image_std, training=False):
pp = transforms.Compose([transforms.Resize(image_size, interpolation=transforms.InterpolationMode.BICUBIC, antialias=True), transforms.ToTensor(), transforms.Normalize(mean=image_mean, std=image_std)])
return pp
def better_transforms(image_size, training=True, image_mean=IMAGENET_DEFAULT_MEAN, image_std=IMAGENET_DEFAULT_STD, interpolation='bicubic', crop_margin=False, align_long_axis=False, fill=255):
interpolation_mode = timm.data.transforms.str_to_interp_mode(interpolation)
pp = []
if crop_margin:
assert has_cv2, 'CV2 needed to use crop margin.'
pp += [CropMargin()]
if align_long_axis:
pp += [AlignLongAxis(image_size, interpolation=interpolation_mode)]
if training:
pp += [ResizeKeepRatio(image_size, longest=1, interpolation=interpolation, random_scale_prob=0.05, random_scale_range=(0.85, 1.04), random_aspect_prob=0.05, random_aspect_range=(0.9, 1.11)), transforms.RandomApply([Bitmap()], p=0.05), transforms.RandomApply([transforms.RandomChoice([Erosion(3), Dilation(3)])], p=0.02), transforms.RandomApply([transforms.RandomAffine(degrees=0, shear=(0, 3.0, -3, 0), interpolation=interpolation_mode, fill=fill)], p=0.05), transforms.RandomApply([transforms.RandomAffine(degrees=3, translate=(0, 0.04), interpolation=interpolation_mode, fill=fill)], p=0.05), transforms.RandomApply([transforms.ElasticTransform(alpha=50.0, sigma=120 * 0.1, interpolation=interpolation_mode, fill=fill)], p=0.05), transforms.RandomApply([transforms.ColorJitter(0.1, 0.1)], p=0.05), transforms.RandomApply([transforms.GaussianBlur(3, sigma=(0.1, 0.5))], p=0.05), RandomPad(image_size, fill=fill), transforms.CenterCrop(image_size)]
else:
pp += [ResizeKeepRatio(image_size, longest=1, interpolation=interpolation), CenterCropOrPad(image_size, fill=fill)]
pp += [transforms.ToTensor(), transforms.Normalize(image_mean, image_std)]
return transforms.Compose(pp)
def nougat_transforms(image_size, training=True, image_mean=IMAGENET_DEFAULT_MEAN, image_std=IMAGENET_DEFAULT_STD, align_long_axis=False, interpolation='bicubic', fill=255, crop_margin=False):
assert has_albumentations, 'Albumentations and CV2 needed to use nougat transforms.'
if interpolation == 'bilinear':
interpolation_mode = 1
else:
interpolation_mode = 2
tv_pp = []
alb_pp = []
if crop_margin:
tv_pp += [CropMargin()]
if align_long_axis:
tv_pp += [AlignLongAxis(image_size)]
if training:
tv_pp += [ResizeKeepRatio(image_size, longest=1, interpolation=interpolation), RandomPad(image_size, fill=fill)]
alb_pp += [BitmapAlb(p=0.05), alb.OneOf([ErosionAlb((2, 3)), DilationAlb((2, 3))], p=0.02), alb.Affine(shear={'x': (0, 3), 'y': (-3, 0)}, cval=(255, 255, 255), p=0.03), alb.ShiftScaleRotate(shift_limit_x=(0, 0.04), shift_limit_y=(0, 0.03), scale_limit=(-0.15, 0.03), rotate_limit=2, border_mode=0, interpolation=interpolation_mode, value=fill, p=0.03), alb.GridDistortion(distort_limit=0.05, border_mode=0, interpolation=interpolation_mode, value=fill, p=0.04), alb.Compose([alb.Affine(translate_px=(0, 5), always_apply=True, cval=(255, 255, 255)), alb.ElasticTransform(p=1, alpha=50, sigma=120 * 0.1, alpha_affine=120 * 0.01, border_mode=0, value=fill)], p=0.04), alb.RandomBrightnessContrast(0.1, 0.1, True, p=0.03), alb.ImageCompression(95, p=0.07), alb.GaussNoise(20, p=0.08), alb.GaussianBlur((3, 3), p=0.03)]
else:
tv_pp += [ResizeKeepRatio(image_size, longest=1, interpolation=interpolation), CenterCropOrPad(image_size, fill=fill)]
alb_pp += [alb.Normalize(image_mean, image_std), alb.pytorch.ToTensorV2()]
tv_pp += [alb_wrapper(alb.Compose(alb_pp))]
return transforms.Compose(tv_pp)
def alb_wrapper(transform):
def f(im):
return transform(image=np.asarray(im))['image']
return f
class CropMargin:
def __init__(self):
pass
def __call__(self, img):
if isinstance(img, torch.Tensor):
assert False
else:
data = np.array(img.convert('L'))
data = data.astype(np.uint8)
max_val = data.max()
min_val = data.min()
if max_val == min_val:
return img
data = (data - min_val) / (max_val - min_val) * 255
gray = 255 * (data < 200).astype(np.uint8)
coords = cv2.findNonZero(gray)
(a, b, w, h) = cv2.boundingRect(coords)
return img.crop((a, b, w + a, h + b))
class AlignLongAxis:
def __init__(self, input_size, interpolation=transforms.InterpolationMode.BICUBIC):
self.input_size = input_size
self.interpolation = interpolation
def __call__(self, img):
is_tensor = isinstance(img, torch.Tensor)
(img_height, img_width) = img.shape[-2:] if is_tensor else (img.height, img.width)
if self.input_size[0] > self.input_size[1] and img_width > img_height or (self.input_size[0] < self.input_size[1] and img_width < img_height):
img = F.rotate(img, angle=-90, expand=True, interpolation=self.interpolation)
return img
class RandomPad:
def __init__(self, input_size, fill=0):
self.input_size = input_size
self.fill = fill
@staticmethod
def get_params(img, input_size):
(width, height) = F.get_image_size(img)
delta_width = max(input_size[1] - width, 0)
delta_height = max(input_size[0] - height, 0)
pad_left = random.randint(0, delta_width)
pad_top = random.randint(0, delta_height)
pad_right = delta_width - pad_left
pad_bottom = delta_height - pad_top
return (pad_left, pad_top, pad_right, pad_bottom)
def __call__(self, img):
padding = self.get_params(img, self.input_size)
img = F.pad(img, padding, self.fill)
return img
class ResizeKeepRatio:
def __init__(self, size, longest=0.0, interpolation='bilinear', random_scale_prob=0.0, random_scale_range=(0.85, 1.05), random_aspect_prob=0.0, random_aspect_range=(0.9, 1.11)):
if isinstance(size, (list, tuple)):
self.size = tuple(size)
else:
self.size = (size, size)
self.interpolation = timm.data.transforms.str_to_interp_mode(interpolation)
self.longest = float(longest)
self.random_scale_prob = random_scale_prob
self.random_scale_range = random_scale_range
self.random_aspect_prob = random_aspect_prob
self.random_aspect_range = random_aspect_range
@staticmethod
def get_params(img, target_size, longest, random_scale_prob=0.0, random_scale_range=(0.85, 1.05), random_aspect_prob=0.0, random_aspect_range=(0.9, 1.11)):
source_size = img.size[::-1]
(h, w) = source_size
(target_h, target_w) = target_size
ratio_h = h / target_h
ratio_w = w / target_w
ratio = max(ratio_h, ratio_w) * longest + min(ratio_h, ratio_w) * (1.0 - longest)
if random_scale_prob > 0 and random.random() < random_scale_prob:
ratio_factor = random.uniform(random_scale_range[0], random_scale_range[1])
ratio_factor = (ratio_factor, ratio_factor)
else:
ratio_factor = (1.0, 1.0)
if random_aspect_prob > 0 and random.random() < random_aspect_prob:
aspect_factor = random.uniform(random_aspect_range[0], random_aspect_range[1])
ratio_factor = (ratio_factor[0] / aspect_factor, ratio_factor[1] * aspect_factor)
size = [round(x * f / ratio) for (x, f) in zip(source_size, ratio_factor)]
return size
def __call__(self, img):
size = self.get_params(img, self.size, self.longest, self.random_scale_prob, self.random_scale_range, self.random_aspect_prob, self.random_aspect_range)
img = F.resize(img, size, self.interpolation)
return img
def __repr__(self):
interpolate_str = timm.data.transforms.interp_mode_to_str(self.interpolation)
format_string = self.__class__.__name__ + '(size={0}'.format(self.size)
format_string += f', interpolation={interpolate_str})'
format_string += f', longest={self.longest:.3f})'
return format_string
class Bitmap:
def __init__(self, threshold=200):
self.lut = [0 if i < threshold else i for i in range(256)]
def __call__(self, img):
if img.mode == 'RGB' and len(self.lut) == 256:
lut = self.lut + self.lut + self.lut
else:
lut = self.lut
return img.point(lut)
class Erosion:
def __init__(self, scale=3):
super().__init__()
if type(scale) is tuple or type(scale) is list:
assert len(scale) == 2
self.scale = scale
else:
self.scale = (scale, scale)
@staticmethod
def get_params(scale):
if type(scale) is tuple or type(scale) is list:
assert len(scale) == 2
scale = random.choice(scale)
return scale
def __call__(self, img):
kernel_size = self.get_params(self.scale)
if isinstance(img, torch.Tensor):
padding = kernel_size // 2
img = -torch.nn.functional.max_pool2d(-img, kernel_size=kernel_size, padding=padding)
elif isinstance(img, Image.Image):
img = img.filter(ImageFilter.MinFilter(kernel_size))
return img
class Dilation:
def __init__(self, scale=3):
super().__init__()
self.scale = scale
@staticmethod
def get_params(scale):
if type(scale) is tuple or type(scale) is list:
assert len(scale) == 2
scale = random.choice(scale)
return scale
def __call__(self, img):
kernel_size = self.get_params(self.scale)
if isinstance(img, torch.Tensor):
padding = kernel_size // 2
img = torch.nn.functional.max_pool2d(img, kernel_size=kernel_size, padding=padding)
elif isinstance(img, Image.Image):
img = img.filter(ImageFilter.MaxFilter(kernel_size))
return img
if has_albumentations:
class ErosionAlb(alb.ImageOnlyTransform):
def __init__(self, scale, always_apply=False, p=0.5):
super().__init__(always_apply=always_apply, p=p)
if type(scale) is tuple or type(scale) is list:
assert len(scale) == 2
self.scale = scale
else:
self.scale = (scale, scale)
def apply(self, img, **params):
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, tuple(np.random.randint(self.scale[0], self.scale[1], 2)))
img = cv2.erode(img, kernel, iterations=1)
return img
class DilationAlb(alb.ImageOnlyTransform):
def __init__(self, scale, always_apply=False, p=0.5):
super().__init__(always_apply=always_apply, p=p)
if type(scale) is tuple or type(scale) is list:
assert len(scale) == 2
self.scale = scale
else:
self.scale = (scale, scale)
def apply(self, img, **params):
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, tuple(np.random.randint(self.scale[0], self.scale[1], 2)))
img = cv2.dilate(img, kernel, iterations=1)
return img
class BitmapAlb(alb.ImageOnlyTransform):
def __init__(self, value=0, lower=200, always_apply=False, p=0.5):
super().__init__(always_apply=always_apply, p=p)
self.lower = lower
self.value = value
def apply(self, img, **params):
img = img.copy()
img[img < self.lower] = self.value
return img
# File: pixparse-main/src/pixparse/framework/config.py
from dataclasses import dataclass, field
from typing import Optional, Tuple
@dataclass
class OptimizationCfg:
optimizer: str = 'adamw'
scheduler: str = 'cosine'
learning_rate: float = 0.0005
warmup_learning_rate: float = 0.0
weight_decay: float = 0.02
eps: float = 1e-06
clip_grad_value: Optional[float] = None
clip_grad_mode: Optional[str] = None
grad_accum_steps: int = 1
momentum: Optional[float] = None
betas: Optional[Tuple[float, float]] = None
layer_decay: Optional[float] = None
@dataclass
class TaskTrainCfg:
num_intervals: int = 100
num_warmup_intervals: int = 5
eval_frequency: int = 1000
opt: OptimizationCfg = field(default_factory=OptimizationCfg)
dtype: Optional[str] = None
amp: bool = True
model_name: str = ''
@dataclass
class TaskEvalCfg:
dtype: Optional[str] = None
amp: bool = True
model_name: str = ''
model_state_dict: dict = field(default_factory=dict)
# File: pixparse-main/src/pixparse/framework/device.py
""""""
import os
from dataclasses import dataclass, field, InitVar
from enum import Enum
from typing import Union, Optional, List, Tuple
import torch
import torch.distributed as dist
def is_distributed_env():
if 'WORLD_SIZE' in os.environ:
return int(os.environ['WORLD_SIZE']) > 1
if 'SLURM_NTASKS' in os.environ:
return int(os.environ['SLURM_NTASKS']) > 1
return False
def world_info_from_env():
local_rank = 0
for v in ('LOCAL_RANK', 'MPI_LOCALRANKID', 'SLURM_LOCALID', 'OMPI_COMM_WORLD_LOCAL_RANK'):
if v in os.environ:
local_rank = int(os.environ[v])
break
global_rank = 0
for v in ('RANK', 'PMI_RANK', 'SLURM_PROCID', 'OMPI_COMM_WORLD_RANK'):
if v in os.environ:
global_rank = int(os.environ[v])
break
world_size = 1
for v in ('WORLD_SIZE', 'PMI_SIZE', 'SLURM_NTASKS', 'OMPI_COMM_WORLD_SIZE'):
if v in os.environ:
world_size = int(os.environ[v])
break
return (local_rank, global_rank, world_size)
class DeviceEnvType(Enum):
CPU = 'cpu'
CUDA = 'cuda'
XLA = 'xla'
@dataclass
class DeviceEnv:
init_device_type: InitVar[Optional[str]] = None
init_device_index: InitVar[Optional[int]] = None
init_dist_backend: InitVar[str] = 'nccl'
init_dist_url: InitVar[str] = 'env://'
device: torch.device = field(init=False)
world_size: Optional[int] = None
local_rank: Optional[int] = None
global_rank: Optional[int] = None
def is_global_primary(self):
return self.global_rank == 0
def is_local_primary(self):
return self.local_rank == 0
def is_primary(self, local=False):
return self.is_local_primary() if local else self.is_global_primary()
def __post_init__(self, init_device_type: Optional[str], init_device_index: Optional[int], init_dist_backend: str, init_dist_url: str):
assert torch.cuda.device_count()
torch.backends.cudnn.benchmark = True
torch.backends.cuda.matmul.allow_tf32 = True
(init_local_rank, init_global_rank, init_world_size) = world_info_from_env()
if init_world_size > 1:
assert init_device_index is None
self.local_rank = int(init_local_rank)
is_slurm = 'SLURM_PROCID' in os.environ
if 'SLURM_PROCID' in os.environ:
torch.distributed.init_process_group(backend=init_dist_backend, init_method=init_dist_url, world_size=init_world_size, rank=init_global_rank)
else:
torch.distributed.init_process_group(backend=init_dist_backend, init_method=init_dist_url)
self.world_size = torch.distributed.get_world_size()
self.global_rank = torch.distributed.get_rank()
if is_slurm:
assert self.world_size == init_world_size
assert self.global_rank == init_global_rank
self.device = torch.device('cuda:%d' % self.local_rank)
torch.cuda.set_device(self.local_rank)
else:
self.device = torch.device('cuda' if init_device_index is None else f'cuda:{init_device_index}')
self.local_rank = 0
self.world_size = 1
self.global_rank = 0
def broadcast_object(self, obj, src=0):
if self.global_rank == src:
objects = [obj]
else:
objects = [None]
dist.broadcast_object_list(objects, src=src)
return objects[0]
def all_gather_object(self, obj, dst=0):
objects = [None for _ in range(self.world_size)]
dist.all_gather_object(objects, obj)
return objects
# File: pixparse-main/src/pixparse/framework/eval.py
from .task import TaskEval
def evaluate(task: TaskEval, loaders):
metrics = dict()
authorized_loaders = task.prepare_for_evaluation(loaders)
for (key, loader) in authorized_loaders.items():
metrics[key] = dict()
for (index_batch, sample) in enumerate(loader.loader):
metrics[key][index_batch] = task.step(sample)
if hasattr(task, 'average_metrics'):
averaged_metrics = task.average_metrics(metrics[key])
metrics[key] = {}
metrics[key]['average'] = averaged_metrics
return metrics
# File: pixparse-main/src/pixparse/framework/logger.py
import logging
def setup_logging(log_file, debug=False, include_host=False, set_all_loggers=False):
level = logging.DEBUG if debug else logging.INFO
if include_host:
import socket
hostname = socket.gethostname()
formatter = logging.Formatter(f'%(asctime)s | {hostname} | %(levelname)s | %(message)s', datefmt='%Y-%m-%d,%H:%M:%S')
else:
formatter = logging.Formatter('%(asctime)s | %(levelname)s | %(message)s', datefmt='%Y-%m-%d,%H:%M:%S')
logging.root.setLevel(level)
if set_all_loggers:
loggers = [logging.getLogger(name) for name in logging.root.manager.loggerDict]
for logger in loggers:
logger.setLevel(level)
stream_handler = logging.StreamHandler()
stream_handler.setFormatter(formatter)
logging.root.addHandler(stream_handler)
if log_file:
file_handler = logging.FileHandler(filename=log_file)
file_handler.setFormatter(formatter)
logging.root.addHandler(file_handler)
# File: pixparse-main/src/pixparse/framework/monitor.py
import csv
import logging
import os
from collections import OrderedDict
from typing import Optional, Tuple, Dict, Union
import torch
from torch.utils.tensorboard.summary import image
_logger = logging.getLogger(__name__)
try:
from torch.utils.tensorboard import SummaryWriter
HAS_TB = True
except ImportError as e:
HAS_TB = False
try:
import wandb
HAS_WANDB = True
except ImportError:
HAS_WANDB = False
def summary_row_dict(results, index=None, index_name='epoch'):
assert isinstance(results, dict)
row_dict = OrderedDict()
if index is not None:
row_dict[index_name] = index
if not results:
return row_dict
if isinstance(next(iter(results.values())), dict):
for (p, pr) in results.items():
assert isinstance(pr, dict)
row_dict.update([('_'.join([p, k]), v) for (k, v) in pr.items()])
else:
row_dict.update(results)
return row_dict
class SummaryCsv:
def __init__(self, output_dir, filename='summary.csv'):
self.output_dir = output_dir
self.filename = os.path.join(output_dir, filename)
self.needs_header = not os.path.exists(self.filename)
def update(self, row_dict):
with open(self.filename, mode='a') as cf:
dw = csv.DictWriter(cf, fieldnames=row_dict.keys())
if self.needs_header:
dw.writeheader()
self.needs_header = False
dw.writerow(row_dict)
_sci_keys = {'lr'}
def _add_kwargs(text_update, name_map=None, **kwargs):
def _to_str(key, val):
if isinstance(val, float):
if key.lower() in _sci_keys:
return f'{key}: {val:.3e} '
else:
return f'{key}: {val:.4f}'
else:
return f'{key}: {val}'
def _map_name(key, name_map, capitalize=False):
if name_map is None:
if capitalize:
return key.capitalize() if not key.isupper() else key
else:
return key
return name_map.get(key, None)
for (k, v) in kwargs.items():
if isinstance(v, dict):
for (kk, vv) in v.items():
name = _map_name(kk, name_map)
if not name:
continue
text_update += [_to_str(kk, vv)]
else:
name = _map_name(k, name_map)
if not name:
continue
text_update += [_to_str(name, v)]
class Monitor:
def __init__(self, experiment_name=None, output_dir=None, logger=None, hparams=None, wandb=False, wandb_project='unknown', wandb_dir='wandb', tensorboard=False, tensorboard_dir='tensorboard', output_enabled=True, log_eval_data=False):
self.output_dir = output_dir
self.logger = logger or logging.getLogger('log')
hparams = hparams or {}
if output_dir is not None:
self.csv_writer = SummaryCsv(output_dir=output_dir)
else:
self.csv_writer = None
self.tensorboard = None
if tensorboard:
assert HAS_TB
self.tensorboard = SummaryWriter(log_dir=os.path.join(self.output_dir, tensorboard_dir))
self.wandb = None
if wandb:
if HAS_WANDB:
dir_ = os.path.join(self.output_dir, wandb_dir)
self.wandb = wandb.init(project=wandb_project, name=experiment_name, config=hparams, dir=dir_)
_logger.info(f'Wandb found. Metrics are being logged to {dir_}')
else:
_logger.warning("You've requested to log metrics to wandb but package not found. Metrics not being logged to wandb, try `pip install wandb`")
self.output_enabled = output_enabled
self.log_eval_data = log_eval_data
def log_step(self, phase: str, step_idx: int, step_end_idx: Optional[int]=None, interval: Optional[int]=None, loss: Optional[float]=None, rate: Optional[Union[float, Tuple[float, float]]]=None, learning_rate: Optional[float]=None, phase_suffix: str='', metrics: dict=None, eval_data: dict=None, **kwargs):
if not self.output_enabled:
return
if 'num_steps' in kwargs:
step_end_idx = max(0, kwargs.pop('num_steps') - 1)
phase_title = f'{phase.capitalize()} ({phase_suffix})' if phase_suffix else f'{phase.capitalize()}:'
progress = 100.0 * step_idx / step_end_idx if step_end_idx else 0.0
rate_str = ''
if isinstance(rate, (tuple, list)):
rate_str = f'Rate: {rate[0]:.2f}/s ({rate[1]:.2f}/s)'
elif rate is not None:
rate_str = f'Rate: {rate:.2f}/s'
text_update = [phase_title, f'{interval}' if interval is not None else None, f'[{step_idx}]' if step_end_idx is None else None, f'[{step_idx}/{step_end_idx} ({progress:>3.0f}%)]' if step_end_idx is not None else None, rate_str, f'loss: {loss:.5f}' if loss is not None else None, f'lr: {learning_rate:.5f}' if learning_rate is not None else None]
_add_kwargs(text_update, **kwargs)
log_str = ' '.join((item for item in text_update if item))
self.logger.info(log_str)
if self.tensorboard is not None:
if metrics is not None:
for (metric_category, metric_items) in metrics.items():
for (metric_name, metric_value) in metric_items.items():
self.tensorboard.add_scalar('/'.join([metric_category, metric_name, phase_title]), metric_value, step_idx)
if eval_data is not None and self.log_eval_data:
for (eval_data_category, eval_data_triplet) in eval_data.items():
if eval_data_category == 'ocr_reconstruction_data':
image_tag = '/'.join([eval_data_category, 'image', phase_title])
self.tensorboard._get_file_writer().add_summary(image(image_tag, eval_data_triplet['image'], dataformats='CHW'), step_idx)
self.tensorboard.add_text('/'.join([eval_data_category, 'original_text', phase_title]), eval_data_triplet['original_text'], step_idx)
self.tensorboard.add_text('/'.join([eval_data_category, 'reconstructed_text', phase_title]), eval_data_triplet['reconstructed_text'], step_idx)
if loss is not None:
self.tensorboard.add_scalar('/'.join(['Loss', phase_title]), loss, step_idx)
if learning_rate is not None:
self.tensorboard.add_scalar('/'.join(['Learning Rate', phase_title]), loss, step_idx)
for (k, v) in kwargs.items():
self.tensorboard.add_scalar('/'.join([k, phase_title]), v, step_idx)
if self.wandb is not None:
wandb_log = dict(**kwargs)
if loss:
wandb_log['loss'] = loss
if learning_rate:
wandb_log['learning_rate'] = learning_rate
def log_phase(self, phase: str='eval', interval: Optional[int]=None, name_map: Optional[dict]=None, **kwargs):
if not self.output_enabled:
return
title = [f'{phase.capitalize()}', f'interval {interval}' if interval is not None else None, 'completed. ']
title_str = ' '.join((i for i in title if i))
results = []
_add_kwargs(results, name_map=name_map, **kwargs)
log_str = title_str + ', '.join((item for item in results if item))
self.logger.info(log_str)
def write_summary(self, results: Dict, index: Optional[Union[int, str]]=None, index_name: str='interval'):
if not self.output_enabled:
return
row_dict = summary_row_dict(index=index, index_name=index_name, results=results)
if self.csv_writer:
self.csv_writer.update(row_dict)
if self.wandb is not None:
wandb.log(row_dict)
if self.tensorboard:
pass
# File: pixparse-main/src/pixparse/framework/task.py
from dataclasses import dataclass
from typing import Any, Dict, Optional
from .config import TaskTrainCfg, TaskEvalCfg
from .device import DeviceEnv
from .monitor import Monitor
class Task:
def __init__(self, device_env: DeviceEnv, monitor: Monitor=None):
self.device_env = device_env
self.monitor = monitor
class TaskEval(Task):
def __init__(self, cfg: TaskEvalCfg, device_env: DeviceEnv, monitor: Monitor=None):
super().__init__(device_env=device_env, monitor=monitor)
def collate_fn(self, batch):
pass
def setup(self, *args, **kwargs):
pass
def prepare_for_evaluation(self):
pass
def step(self, sample: Dict[str, Any]) -> Dict[str, Any]:
pass
def end(self):
pass
class TaskTrain(Task):
def __init__(self, cfg: TaskTrainCfg, device_env: DeviceEnv, monitor: Monitor=None):
super().__init__(device_env=device_env, monitor=monitor)
self.num_intervals = cfg.num_intervals
self.num_warmup_intervals = cfg.num_warmup_intervals
self.eval_frequency = cfg.eval_frequency
self.num_steps_per_interval = None
self.start_interval = 0
self.step = 0
self.batch_idx = 0
self.interval_idx = 0
self.interval_batch_idx = 0
self.optimizer = None
self.scheduler = None
self.scaler = None
self.autocast = None
def collate_fn(self, batch):
pass
def train_setup(self, *args, **kwargs):
pass
def train_interval_start(self):
pass
def train_interval_end(self):
pass
def train_step(self, sample: Dict[str, Any]) -> Dict[str, Any]:
pass
def eval_step(self, sample: Dict[str, Any]) -> Dict[str, Any]:
pass
def get_current_lr(self):
lrl = [param_group['lr'] for param_group in self.optimizer.param_groups]
lr = sum(lrl) / len(lrl)
return lr
# File: pixparse-main/src/pixparse/framework/train.py
from .task import TaskTrain
import torch
import os
def train_one_interval(task: TaskTrain, loader):
task.train_interval_start()
for (i, sample) in enumerate(loader.loader):
task.train_step(sample)
task.train_interval_end()
# File: pixparse-main/src/pixparse/models/config.py
import copy
import re
from pathlib import Path
from dataclasses import dataclass, field
from typing import Optional, Tuple
from simple_parsing.helpers import Serializable
from pixparse.utils.name_utils import _natural_key, clean_name
_MODEL_CONFIG_PATHS = [Path(__file__).parent / f'configs/']
_MODEL_CONFIGS = {}
@dataclass
class ImageEncoderCfg(Serializable):
name: str = 'vit_base_patch16_224'
image_fmt: str = 'L'
image_size: Optional[Tuple[int, int]] = (576, 448)
pretrained: bool = True
@dataclass
class TextDecoderCfg(Serializable):
name: str = 'facebook/bart-base'
pretrained: bool = True
num_decoder_layers: Optional[int] = 4
max_length: Optional[int] = 1024
pad_token_id: Optional[int] = None
@dataclass
class ModelCfg(Serializable):
image_encoder: ImageEncoderCfg = field(default_factory=ImageEncoderCfg)
text_decoder: TextDecoderCfg = field(default_factory=TextDecoderCfg)
def _scan_model_configs():
global _MODEL_CONFIGS
config_ext = ('.json',)
config_files = []
for config_path in _MODEL_CONFIG_PATHS:
if config_path.is_file() and config_path.suffix in config_ext:
config_files.append(config_path)
elif config_path.is_dir():
for ext in config_ext:
config_files.extend(config_path.glob(f'*{ext}'))
for cf in config_files:
model_cfg = ModelCfg.load(cf)
_MODEL_CONFIGS[cf.stem] = model_cfg
_MODEL_CONFIGS = {k: v for (k, v) in sorted(_MODEL_CONFIGS.items(), key=lambda x: _natural_key(x[0]))}
_scan_model_configs()
def list_models():
return list(_MODEL_CONFIGS.keys())
def get_model_config(model_name):
model_name = clean_name(model_name)
cfg = _MODEL_CONFIGS.get(model_name, None)
return copy.deepcopy(cfg)
# File: pixparse-main/src/pixparse/models/cruller.py
import torch.nn as nn
from .config import ModelCfg
from .image_encoder_timm import ImageEncoderTimm
from .text_decoder_hf import TextDecoderHf
class Cruller(nn.Module):
def __init__(self, cfg: ModelCfg):
super().__init__()
self.image_encoder = ImageEncoderTimm(cfg.image_encoder)
self.text_decoder = TextDecoderHf(cfg.text_decoder)
def forward(self, image_input, text_input):
encoder_output = self.image_encoder(image_input)
decoder_output = self.text_decoder(text_input, encoder_hidden_states=encoder_output, return_dict=True)
return decoder_output
# File: pixparse-main/src/pixparse/models/image_encoder_timm.py
import timm
from torch import nn as nn
from pixparse.models.config import ImageEncoderCfg
def create_image_encoder(cfg: ImageEncoderCfg) -> nn.Module:
assert cfg.name
extra_kwargs = {}
if cfg.image_size is not None:
extra_kwargs['img_size'] = cfg.image_size
assert cfg.image_fmt in ('L', 'RGB')
model = timm.create_model(cfg.name, pretrained=cfg.pretrained, in_chans=1 if cfg.image_fmt == 'L' else 3, num_classes=0, global_pool='', **extra_kwargs)
return model
class ImageEncoderTimm(nn.Module):
def __init__(self, cfg: ImageEncoderCfg):
super().__init__()
self.trunk = create_image_encoder(cfg)
self.pool = None
self.head = None
def forward(self, x):
x = self.trunk(x)
if self.pool is not None:
x = self.pool(x)
if self.head is not None:
x = self.head(x)
return x
# File: pixparse-main/src/pixparse/models/text_decoder_hf.py
from typing import Optional
import torch
import transformers
from torch import nn as nn
from pixparse.models.config import TextDecoderCfg
def create_text_decoder(cfg: TextDecoderCfg) -> transformers.BartForCausalLM:
assert cfg.name
config = transformers.AutoConfig.from_pretrained(cfg.name)
config.add_cross_attention = True
if False:
config.is_encoder_decoder = False
config.scale_embedding = True
config.add_final_layer_norm = True
if cfg.num_decoder_layers is not None:
config.decoder_layers = cfg.num_decoder_layers
if cfg.max_length is not None:
config.max_position_embeddings = cfg.max_length
if cfg.pretrained:
model = transformers.AutoModelForCausalLM.from_pretrained(cfg.name, config=config)
else:
model = transformers.AutoModelForCausalLM.from_config(config)
return model
class TextDecoderHf(nn.Module):
def __init__(self, cfg: TextDecoderCfg):
super().__init__()
self.trunk = create_text_decoder(cfg)
self.prepare_inputs_for_generation = self.prepare_inputs_for_inference
def prepare_inputs_for_inference(self, input_ids: torch.Tensor, encoder_outputs: torch.Tensor, pad_token_id: int, past_key_values=None, past=None, use_cache: bool=None, attention_mask: torch.Tensor=None):
if past is not None:
past_key_values = past
attention_mask = input_ids.ne(pad_token_id).long()
if past_key_values is not None:
input_ids = input_ids[:, -1:]
output = {'input_ids': input_ids, 'attention_mask': attention_mask, 'past_key_values': past_key_values, 'use_cache': use_cache, 'encoder_hidden_states': encoder_outputs}
return output
def forward(self, input_ids, attention_mask: Optional[torch.Tensor]=None, encoder_hidden_states: Optional[torch.Tensor]=None, past_key_values: Optional[torch.Tensor]=None, use_cache: bool=None, output_attentions: Optional[torch.Tensor]=None, output_hidden_states: Optional[torch.Tensor]=None, return_dict: bool=None):
output = self.trunk(input_ids=input_ids, attention_mask=attention_mask, encoder_hidden_states=encoder_hidden_states, past_key_values=past_key_values, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict)
return output
# File: pixparse-main/src/pixparse/task/__init__.py
from .task_cruller_pretrain import TaskCrullerPretrain, TaskCrullerPretrainCfg
from .task_cruller_finetune_RVLCDIP import TaskCrullerFinetuneRVLCDIP, TaskCrullerFinetuneRVLCDIPCfg
from .task_cruller_finetune_CORD import TaskCrullerFinetuneCORD, TaskCrullerFinetuneCORDCfg
from .task_cruller_finetune_xent import TaskCrullerFinetuneXent, TaskCrullerFinetuneXentCfg
from .task_cruller_finetune_docvqa import TaskCrullerFinetuneDOCVQA, TaskCrullerFinetuneDOCVQACfg
from .task_cruller_eval_ocr import TaskCrullerEvalOCR, TaskCrullerEvalOCRCfg
from .task_donut_eval_ocr import TaskDonutEvalOCR, TaskDonutEvalOCRCfg
from .task_cruller_eval_rvlcdip import TaskCrullerEvalRVLCDIP, TaskCrullerEvalRVLCDIPCfg
from .task_cruller_eval_cord import TaskCrullerEvalCORD, TaskCrullerEvalCORDCfg
from .task_cruller_eval_docvqa import TaskCrullerEvalDOCVQA, TaskCrullerEvalDOCVQACfg
from .task_factory import TaskFactory
# File: pixparse-main/src/pixparse/task/task_cruller_eval_cord.py
import logging
from collections import OrderedDict
from dataclasses import dataclass, field
from functools import partial
from typing import Optional
import PIL
import torch
from torch import nn
import torch.nn.functional as F
from torchvision import transforms
from pixparse.data import preprocess_ocr_anno, preprocess_text_anno
from pixparse.framework import DeviceEnv, Monitor, TaskEval, TaskEvalCfg
from pixparse.models import Cruller, ModelCfg, get_model_config
from pixparse.tokenizers import TokenizerCfg, TokenizerHF
from pixparse.utils.json_utils import json2token, token2json
from pixparse.utils.json_utils import JSONParseEvaluator
import numpy as np
from ast import literal_eval
_logger = logging.getLogger(__name__)
@dataclass
class TaskCrullerEvalCORDCfg(TaskEvalCfg):
model_name: Optional[str] = None
model: ModelCfg = field(default_factory=ModelCfg)
tokenizer: TokenizerCfg = field(default_factory=TokenizerCfg)
def __post_init__(self):
if self.model_name:
model = get_model_config(self.model_name)
if model is None:
_logger.warning(f'Model config for {self.model_name} was not found, using defaults.')
else:
self.model = model
else:
self.model_name = 'custom'
class TaskCrullerEvalCORD(TaskEval):
def __init__(self, cfg: TaskCrullerEvalCORDCfg, device_env: DeviceEnv, monitor: Monitor=None):
super().__init__(cfg=cfg, device_env=device_env, monitor=monitor)
self.cfg = cfg
self.amp_dtype = None
if cfg.dtype is not None:
self.amp_dtype = torch.bfloat16 if cfg.dtype in ('bfloat16', 'bf16') else torch.float16
self.task_start_token = ''
self.prompt_end_token = self.task_start_token
self.max_position_embeddings = cfg.model.text_decoder.max_length
self.text_anno_fn = True
self.tokenizer = TokenizerHF(cfg.tokenizer)
self.state_dict = OrderedDict()
self.resume = False
cord_finetune_tokens = ['', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '']
special_tokens_from_pretrain = ['', '']
preproc_fn = preprocess_text_anno if self.text_anno_fn else preprocess_ocr_anno
self.anno_preprocess_eval = partial(preproc_fn, tokenizer=self.tokenizer.trunk, max_position_embeddings=self.max_position_embeddings, task_start_token=self.task_start_token, prompt_end_token=self.prompt_end_token)
self.model = Cruller(cfg.model)
newly_added_num_from_pretrain = self.tokenizer.trunk.add_special_tokens({'additional_special_tokens': sorted(set(special_tokens_from_pretrain))})
if newly_added_num_from_pretrain > 0:
self.model.text_decoder.trunk.resize_token_embeddings(len(self.tokenizer.trunk))
newly_added_num = self.tokenizer.trunk.add_special_tokens({'additional_special_tokens': sorted(set(cord_finetune_tokens))})
self.vocab_size = len(self.tokenizer.trunk)
if newly_added_num > 0:
self.model.text_decoder.trunk.resize_token_embeddings(len(self.tokenizer.trunk))
self.loss = nn.CrossEntropyLoss(ignore_index=-100)
self.has_no_sync = False
self.num_image_chs = 1 if cfg.model.image_encoder.image_fmt == 'L' else 3
img_mean = self.model.image_encoder.trunk.pretrained_cfg['mean']
img_std = self.model.image_encoder.trunk.pretrained_cfg['std']
self.img_mean = sum(img_mean) / len(img_mean) if cfg.model.image_encoder.image_fmt == 'L' else img_mean
self.img_std = sum(img_std) / len(img_std) if cfg.model.image_encoder.image_fmt == 'L' else img_std
self.image_preprocess_eval = transforms.Compose([transforms.ToTensor(), transforms.Grayscale(), transforms.Resize(cfg.model.image_encoder.image_size, interpolation=transforms.InterpolationMode.BICUBIC, antialias=True), transforms.Normalize(mean=self.img_mean, std=self.img_std)])
def setup(self):
device = self.device_env.device
self.model.load_state_dict(self.resume_state_dict)
self.model.eval()
self.model.to(device)
self.all_ground_truths = []
self.all_predictions = []
self.acc_list = []
self.evaluator = JSONParseEvaluator()
def prepare_inputs_for_inference(self, input_ids: torch.Tensor, encoder_outputs: torch.Tensor, past_key_values=None, past=None, use_cache: bool=None, attention_mask: torch.Tensor=None):
if past is not None:
past_key_values = past
attention_mask = input_ids.ne(self.tokenizer.trunk.pad_token_id).long()
if past_key_values is not None:
input_ids = input_ids[:, -1:]
output = {'input_ids': input_ids, 'attention_mask': attention_mask, 'past_key_values': past_key_values, 'use_cache': use_cache, 'encoder_hidden_states': encoder_outputs}
return output
def prepare_for_evaluation(self, loaders):
loaders = {loader_key: loader for (loader_key, loader) in loaders.items() if loader_key in ['eval', 'eval_FUNSD']}
return loaders
def safe_image_transform(self, img):
try:
transformed_img = self.image_preprocess_eval(img)
except PIL.UnidentifiedImageError as e:
print(f'Encountered PIL issue {e}. Filtering...')
transformed_img = None
return transformed_img
def text_input_to_target(self, text_input, ignore_id=-100):
target = text_input.clone()
target[target == self.tokenizer.trunk.pad_token_id] = ignore_id
prompt_end_token_id = self.tokenizer.trunk.convert_tokens_to_ids(self.prompt_end_token)
slice_id = torch.nonzero(target == prompt_end_token_id).sum() + 1
target[:slice_id] = ignore_id
return target
def collate_fn(self, batch):
tokenizer_fn = lambda x: self.tokenizer.trunk(x, add_special_tokens=False, return_tensors='pt', max_length=512, padding='max_length', truncation=True).input_ids[0]
images = [item['image'] for item in batch]
raw_texts = [literal_eval(item['ground_truth'])['gt_parse'] for item in batch]
inputs_to_stack = []
for text in raw_texts:
(tokens_from_json, _) = json2token(text, self.tokenizer.trunk.all_special_tokens, sort_json_key=False)
inputs_to_stack.append(tokenizer_fn(self.task_start_token + tokens_from_json + self.tokenizer.trunk.eos_token))
text_inputs = torch.stack(inputs_to_stack)
targets = torch.stack([self.text_input_to_target(text) for text in text_inputs])
transform = self.image_preprocess_eval
images = torch.stack([transform(img) for img in images])
text_inputs = text_inputs[:, :-1]
targets = targets[:, 1:]
return {'image': images, 'label': text_inputs, 'text_target': targets}
def step(self, batch):
metrics = {}
for (image, label) in zip(batch['image'], batch['label']):
decoded_gt = self.tokenizer.trunk.decode(label)
ground_truth = token2json(decoded_gt)
with torch.inference_mode():
tensor_image = image.unsqueeze(0).to(self.device_env.device)
output = self.model.image_encoder(tensor_image)
current_string = ''
input_ids = torch.tensor(self.tokenizer.trunk.encode('', add_special_tokens=False)).unsqueeze(0).to(self.device_env.device)
max_steps = 512
for step in range(max_steps):
inputs = self.prepare_inputs_for_inference(input_ids=input_ids, encoder_outputs=output)
decoder_outputs = self.model.text_decoder(**inputs)
probabilities = F.softmax(decoder_outputs['logits'], dim=-1)
next_token_id = torch.argmax(probabilities[0, -1]).item()
next_token = self.tokenizer.trunk.decode([next_token_id])
current_string += next_token
if next_token == '':
break
input_ids = torch.tensor(self.tokenizer.trunk.encode(current_string, add_special_tokens=False)).unsqueeze(0).to(self.device_env.device)
predicted_json = token2json(current_string)
self.all_predictions.append(predicted_json)
self.all_ground_truths.append(ground_truth)
acc = self.evaluator.cal_acc(predicted_json, ground_truth)
self.acc_list.append(acc)
metrics['batch_accuracy'] = acc
return metrics
def average_metrics(self, metrics: dict):
avg_accuracy = np.mean(self.acc_list)
f1 = self.evaluator.cal_f1(self.all_predictions, self.all_ground_truths)
self.all_ground_truths = []
self.all_predictions = []
self.acc_list = []
return {'average_accuracy': avg_accuracy, 'f1_score': f1}
def end(self):
pass
def state_dict(self):
state_dicts = {}
state_dicts['model'] = self.model.state_dict()
return state_dicts
# File: pixparse-main/src/pixparse/task/task_cruller_eval_docvqa.py
import logging
from collections import OrderedDict
from dataclasses import dataclass, field
from functools import partial
from typing import Optional
import PIL
import torch
from torch import nn
import torch.nn.functional as F
from torchvision import transforms
from pixparse.data import preprocess_ocr_anno, preprocess_text_anno
from pixparse.framework import DeviceEnv, Monitor, TaskEval, TaskEvalCfg
from pixparse.models import Cruller, ModelCfg, get_model_config
from pixparse.tokenizers import TokenizerCfg, TokenizerHF
from pixparse.utils.json_utils import json2token, token2json
from pixparse.utils.json_utils import JSONParseEvaluator
from pixparse.utils.metrics import average_normalized_levenshtein_similarity
import numpy as np
from ast import literal_eval
_logger = logging.getLogger(__name__)
@dataclass
class TaskCrullerEvalDOCVQACfg(TaskEvalCfg):
model_name: Optional[str] = None
model: ModelCfg = field(default_factory=ModelCfg)
tokenizer: TokenizerCfg = field(default_factory=TokenizerCfg)
def __post_init__(self):
if self.model_name:
model = get_model_config(self.model_name)
if model is None:
_logger.warning(f'Model config for {self.model_name} was not found, using defaults.')
else:
self.model = model
else:
self.model_name = 'custom'
class TaskCrullerEvalDOCVQA(TaskEval):
def __init__(self, cfg: TaskCrullerEvalDOCVQACfg, device_env: DeviceEnv, monitor: Monitor=None):
super().__init__(cfg=cfg, device_env=device_env, monitor=monitor)
self.cfg = cfg
self.amp_dtype = None
if cfg.dtype is not None:
self.amp_dtype = torch.bfloat16 if cfg.dtype in ('bfloat16', 'bf16') else torch.float16
self.task_start_token = ''
self.prompt_end_token = ''
self.max_position_embeddings = cfg.model.text_decoder.max_length
self.text_anno_fn = True
self.tokenizer = TokenizerHF(cfg.tokenizer)
self.state_dict = OrderedDict()
self.resume = False
docvqa_finetune_tokens = ['', self.task_start_token, self.prompt_end_token, '', '', '']
special_tokens_from_pretrain = ['', '']
preproc_fn = preprocess_text_anno if self.text_anno_fn else preprocess_ocr_anno
self.anno_preprocess_eval = partial(preproc_fn, tokenizer=self.tokenizer.trunk, max_position_embeddings=self.max_position_embeddings, task_start_token=self.task_start_token, prompt_end_token=self.prompt_end_token)
self.model = Cruller(cfg.model)
newly_added_num_from_pretrain = self.tokenizer.trunk.add_special_tokens({'additional_special_tokens': sorted(set(special_tokens_from_pretrain))})
if newly_added_num_from_pretrain > 0:
self.model.text_decoder.trunk.resize_token_embeddings(len(self.tokenizer.trunk))
newly_added_num = self.tokenizer.trunk.add_special_tokens({'additional_special_tokens': sorted(set(docvqa_finetune_tokens))})
self.vocab_size = len(self.tokenizer.trunk)
if newly_added_num > 0:
self.model.text_decoder.trunk.resize_token_embeddings(len(self.tokenizer.trunk))
self.loss = nn.CrossEntropyLoss(ignore_index=-100)
self.has_no_sync = False
self.num_image_chs = 1 if cfg.model.image_encoder.image_fmt == 'L' else 3
img_mean = self.model.image_encoder.trunk.pretrained_cfg['mean']
img_std = self.model.image_encoder.trunk.pretrained_cfg['std']
self.img_mean = sum(img_mean) / len(img_mean) if cfg.model.image_encoder.image_fmt == 'L' else img_mean
self.img_std = sum(img_std) / len(img_std) if cfg.model.image_encoder.image_fmt == 'L' else img_std
self.image_preprocess_eval = transforms.Compose([transforms.ToTensor(), transforms.Grayscale(), transforms.Resize(cfg.model.image_encoder.image_size, interpolation=transforms.InterpolationMode.BICUBIC, antialias=True), transforms.Normalize(mean=self.img_mean, std=self.img_std)])
self.raw_predictions_test = dict()
def setup(self):
device = self.device_env.device
self.model.load_state_dict(self.resume_state_dict)
self.model.eval()
self.model.to(device)
self.all_ground_truths = []
self.all_predictions = []
self.acc_list = []
self.evaluator = JSONParseEvaluator()
def prepare_inputs_for_inference(self, input_ids: torch.Tensor, encoder_outputs: torch.Tensor, past_key_values=None, past=None, use_cache: bool=None, attention_mask: torch.Tensor=None):
if past is not None:
past_key_values = past
attention_mask = input_ids.ne(self.tokenizer.trunk.pad_token_id).long()
if past_key_values is not None:
input_ids = input_ids[:, -1:]
output = {'input_ids': input_ids, 'attention_mask': attention_mask, 'past_key_values': past_key_values, 'use_cache': use_cache, 'encoder_hidden_states': encoder_outputs}
return output
def prepare_for_evaluation(self, loaders):
loaders = {loader_key: loader for (loader_key, loader) in loaders.items() if loader_key in ['eval', 'eval_FUNSD']}
return loaders
def safe_image_transform(self, img):
try:
transformed_img = self.image_preprocess_eval(img)
except PIL.UnidentifiedImageError as e:
print(f'Encountered PIL issue {e}. Filtering...')
transformed_img = None
return transformed_img
def text_input_to_target(self, text_input, ignore_id=-100):
target = text_input.clone()
target[target == self.tokenizer.trunk.pad_token_id] = ignore_id
prompt_end_token_id = self.tokenizer.trunk.convert_tokens_to_ids(self.prompt_end_token)
slice_id = torch.nonzero(target == prompt_end_token_id).sum() + 1
target[:slice_id] = ignore_id
return target
def collate_fn(self, batch):
question_ids = []
image_ids = []
images = []
questions = []
answers = []
for item in batch:
question_ids.append(item['question_id'])
image_ids.append(item['image_id'])
images.append(item['image'])
questions.append(item['labels']['question'])
answers.append(item['labels']['answers'])
transform = self.image_preprocess_eval
images = torch.stack([transform(img) for img in images])
return {'images': images, 'questions': questions, 'ground_truth_answers': answers, 'image_ids': image_ids, 'question_ids': question_ids}
def step(self, batch):
metrics = {}
image_outputs = self.model.image_encoder(batch['images'].to(self.device_env.device))
for (output, question, answers, question_id) in zip(image_outputs, batch['questions'], batch['ground_truth_answers'], batch['question_ids']):
self.all_ground_truths.append(answers)
with torch.inference_mode():
current_string = self.task_start_token + '' + question + '' + ''
input_ids = torch.tensor(self.tokenizer.trunk.encode(current_string, add_special_tokens=False)).unsqueeze(0).to(self.device_env.device)
max_steps = 512
for step in range(max_steps):
inputs = self.prepare_inputs_for_inference(input_ids=input_ids, encoder_outputs=output)
decoder_outputs = self.model.text_decoder(**inputs)
probabilities = F.softmax(decoder_outputs['logits'], dim=-1)
next_token_id = torch.argmax(probabilities[0, -1]).item()
next_token = self.tokenizer.trunk.decode([next_token_id])
current_string += next_token
if next_token == '':
break
input_ids = torch.tensor(self.tokenizer.trunk.encode(current_string, add_special_tokens=False)).unsqueeze(0).to(self.device_env.device)
predicted_json = token2json(current_string)
if 'answer' in predicted_json:
self.all_predictions.append(predicted_json['answer'])
else:
self.all_predictions.append('')
return metrics
def average_metrics(self, metrics: dict):
anls = average_normalized_levenshtein_similarity(ground_truth=self.all_ground_truths, predicted_answers=self.all_predictions)
return {'ANLS': anls}
def end(self):
pass
def state_dict(self):
state_dicts = {}
state_dicts['model'] = self.model.state_dict()
return state_dicts
# File: pixparse-main/src/pixparse/task/task_cruller_eval_ocr.py
import logging
from dataclasses import dataclass, field
from functools import partial
from typing import Optional
import torch
import torchvision.transforms as transforms
from pixparse.framework import TaskEvalCfg, TaskEval, DeviceEnv, Monitor
from pixparse.models import Cruller, ModelCfg, get_model_config
from pixparse.tokenizers import TokenizerHF, TokenizerCfg
from pixparse.data import preprocess_text_anno
from pixparse.utils import get_ocr_metrics
from chug.common import LoaderBundle
_logger = logging.getLogger(__name__)
import time
@dataclass
class TaskCrullerEvalOCRCfg(TaskEvalCfg):
model_name: Optional[str] = None
model: ModelCfg = field(default_factory=ModelCfg)
tokenizer: TokenizerCfg = field(default_factory=TokenizerCfg)
def __post_init__(self):
if self.model_name:
model = get_model_config(self.model_name)
if model is None:
_logger.warning(f'Model config for {self.model_name} was not found, using defaults.')
else:
self.model = model
else:
self.model_name = 'custom'
class TaskCrullerEvalOCR(TaskEval):
def __init__(self, cfg: TaskCrullerEvalOCRCfg, device_env: DeviceEnv, monitor: Monitor=None):
super().__init__(cfg=cfg, device_env=device_env, monitor=monitor)
self.cfg = cfg
self.amp_dtype = None
if cfg.dtype is not None:
self.amp_dtype = torch.bfloat16 if cfg.dtype in ('bfloat16', 'bf16') else torch.float16
self.task_start_token = ''
self.prompt_end_token = self.task_start_token
self.max_position_embeddings = cfg.model.text_decoder.max_length
self.text_anno_fn = True
self.tokenizer = TokenizerHF(cfg.tokenizer)
special_tokens = ['', self.task_start_token, self.prompt_end_token]
newly_added_num = self.tokenizer.trunk.add_special_tokens({'additional_special_tokens': sorted(set(special_tokens))})
self.vocab_size = len(self.tokenizer.trunk)
preproc_fn = preprocess_text_anno
self.anno_preprocess_eval = partial(preproc_fn, tokenizer=self.tokenizer.trunk, max_position_embeddings=self.max_position_embeddings, task_start_token=self.task_start_token, prompt_end_token=self.prompt_end_token)
self.model = Cruller(cfg.model)
if newly_added_num > 0:
self.model.text_decoder.trunk.resize_token_embeddings(len(self.tokenizer.trunk))
self.has_no_sync = False
self.num_image_chs = 1 if cfg.model.image_encoder.image_fmt == 'L' else 3
img_mean = self.model.image_encoder.trunk.pretrained_cfg['mean']
img_std = self.model.image_encoder.trunk.pretrained_cfg['std']
self.img_mean = sum(img_mean) / len(img_mean) if cfg.model.image_encoder.image_fmt == 'L' else img_mean
self.img_std = sum(img_std) / len(img_std) if cfg.model.image_encoder.image_fmt == 'L' else img_std
self.image_preprocess_eval = transforms.Compose([transforms.ToTensor(), transforms.Resize(cfg.model.image_encoder.image_size, interpolation=transforms.InterpolationMode.BICUBIC, antialias=True), transforms.Normalize(mean=self.img_mean, std=self.img_std)])
self.eval_metrics = {}
self.max_recursion_length = 1000
def time_and_log(func):
def wrapper(self, *args, **kwargs):
start_time = time.time()
result = func(self, *args, **kwargs)
end_time = time.time()
execution_time = end_time - start_time
_logger.info(f'Executed method {func.__name__} in {execution_time:.2f} seconds')
return result
return wrapper
def setup(self):
device = self.device_env.device
self.model.load_state_dict(self.resume_state_dict)
self.model.eval()
self.model.to(device)
def prepare_for_evaluation(self, loaders: dict[str, LoaderBundle]) -> dict[str, LoaderBundle]:
loaders = {loader_key: loader for (loader_key, loader) in loaders.items() if loader_key in ['eval', 'eval_FUNSD']}
return loaders
@time_and_log
def step(self, sample):
metrics = {}
(image_input, text_input, text_target) = sample
text_input = [item[0] for item in text_input]
text_input = torch.stack(text_input, dim=0).to(self.device_env.device, non_blocking=True)
text_target = [item[0] for item in text_target]
text_target = torch.stack(text_target, dim=0).to(self.device_env.device, non_blocking=True)
image_input = image_input.to(self.device_env.device, non_blocking=True)
(ocr_metrics, _) = get_ocr_metrics(model=self.model, tokenizer=self.tokenizer, image_input=image_input, text_input=text_target, device_env=self.device_env, max_recursion_length=self.max_recursion_length, prompt_token=self.task_start_token)
metrics['ocr_reconstruction'] = ocr_metrics
return metrics
def average_metrics(self, metrics: dict):
wer_sum = 0
cer_sum = 0
for batch_metrics in metrics.values():
wer_sum += batch_metrics['ocr_reconstruction']['wer']
cer_sum += batch_metrics['ocr_reconstruction']['cer']
num_batches = len(metrics)
average_wer = wer_sum / num_batches
average_cer = cer_sum / num_batches
return {'ocr_reconstruction': {'wer': average_wer, 'cer': average_cer}}
def end(self):
pass
def state_dict(self):
state_dicts = {}
state_dicts['model'] = self.model.state_dict()
return state_dicts
# File: pixparse-main/src/pixparse/task/task_cruller_eval_rvlcdip.py
import logging
from collections import OrderedDict
from dataclasses import dataclass, field
from functools import partial
from typing import Optional
import PIL
import torch
import torch.nn.functional as F
from torchvision import transforms
from pixparse.data import preprocess_ocr_anno, preprocess_text_anno
from pixparse.framework import DeviceEnv, Monitor, TaskEval, TaskEvalCfg
from pixparse.models import Cruller, ModelCfg, get_model_config
from pixparse.tokenizers import TokenizerCfg, TokenizerHF
_logger = logging.getLogger(__name__)
@dataclass
class TaskCrullerEvalRVLCDIPCfg(TaskEvalCfg):
model_name: Optional[str] = None
model: ModelCfg = field(default_factory=ModelCfg)
tokenizer: TokenizerCfg = field(default_factory=TokenizerCfg)
def __post_init__(self):
if self.model_name:
model = get_model_config(self.model_name)
if model is None:
_logger.warning(f'Model config for {self.model_name} was not found, using defaults.')
else:
self.model = model
else:
self.model_name = 'custom'
class TaskCrullerEvalRVLCDIP(TaskEval):
def __init__(self, cfg: TaskCrullerEvalRVLCDIPCfg, device_env: DeviceEnv, monitor: Monitor=None):
super().__init__(cfg=cfg, device_env=device_env, monitor=monitor)
self.cfg = cfg
self.amp_dtype = None
if cfg.dtype is not None:
self.amp_dtype = torch.bfloat16 if cfg.dtype in ('bfloat16', 'bf16') else torch.float16
self.task_start_token = ''
self.prompt_end_token = self.task_start_token
self.max_position_embeddings = cfg.model.text_decoder.max_length
self.text_anno_fn = True
self.tokenizer = TokenizerHF(cfg.tokenizer)
self.state_dict = OrderedDict()
self.resume = False
special_tokens = ['', self.task_start_token, self.prompt_end_token, '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '']
newly_added_num = self.tokenizer.trunk.add_special_tokens({'additional_special_tokens': sorted(set(special_tokens))})
self.has_no_sync = False
self.num_image_chs = 1 if cfg.model.image_encoder.image_fmt == 'L' else 3
self.int2str = {0: 'letter', 1: 'form', 2: 'email', 3: 'handwritten', 4: 'advertisement', 5: 'scientific_report', 6: 'scientific_publication', 7: 'specification', 8: 'file_folder', 9: 'news_article', 10: 'budget', 11: 'invoice', 12: 'presentation', 13: 'questionnaire', 14: 'resume', 15: 'memo'}
self.vocab_size = len(self.tokenizer.trunk)
preproc_fn = preprocess_text_anno if self.text_anno_fn else preprocess_ocr_anno
self.anno_preprocess_eval = partial(preproc_fn, tokenizer=self.tokenizer.trunk, max_position_embeddings=self.max_position_embeddings, task_start_token=self.task_start_token, prompt_end_token=self.prompt_end_token)
self.model = Cruller(cfg.model)
if newly_added_num > 0:
self.model.text_decoder.trunk.resize_token_embeddings(len(self.tokenizer.trunk))
img_mean = self.model.image_encoder.trunk.pretrained_cfg['mean']
img_std = self.model.image_encoder.trunk.pretrained_cfg['std']
self.img_mean = sum(img_mean) / len(img_mean) if cfg.model.image_encoder.image_fmt == 'L' else img_mean
self.img_std = sum(img_std) / len(img_std) if cfg.model.image_encoder.image_fmt == 'L' else img_std
self.image_preprocess_eval = transforms.Compose([transforms.ToTensor(), transforms.Resize(cfg.model.image_encoder.image_size, interpolation=transforms.InterpolationMode.BICUBIC, antialias=True), transforms.Normalize(mean=self.img_mean, std=self.img_std)])
def setup(self):
device = self.device_env.device
self.model.load_state_dict(self.resume_state_dict)
self.model.eval()
self.model.to(device)
def prepare_inputs_for_inference(self, input_ids: torch.Tensor, encoder_outputs: torch.Tensor, past_key_values=None, past=None, use_cache: bool=None, attention_mask: torch.Tensor=None):
if past is not None:
past_key_values = past
attention_mask = input_ids.ne(self.tokenizer.trunk.pad_token_id).long()
if past_key_values is not None:
input_ids = input_ids[:, -1:]
output = {'input_ids': input_ids, 'attention_mask': attention_mask, 'past_key_values': past_key_values, 'use_cache': use_cache, 'encoder_hidden_states': encoder_outputs}
return output
def prepare_for_evaluation(self, loaders):
loaders = {loader_key: loader for (loader_key, loader) in loaders.items() if loader_key in ['eval', 'eval_FUNSD']}
return loaders
def safe_image_transform(self, img):
try:
transformed_img = self.image_preprocess_eval(img)
except PIL.UnidentifiedImageError as e:
print(f'Encountered PIL issue {e}. Filtering...')
transformed_img = None
return transformed_img
def collate_fn(self, batch):
images = [item['image'] for item in batch if item is not None]
labels = [item['label'] for item in batch if item is not None]
if len(images) == 0:
return None
transformed_images = [self.safe_image_transform(img) for img in images]
valid_indices = [i for (i, img) in enumerate(transformed_images) if img is not None]
images = torch.stack([transformed_images[i] for i in valid_indices])
labels = torch.tensor([labels[i] for i in valid_indices], dtype=torch.int64)
return {'image': images, 'label': labels}
def step(self, sample):
metrics = {}
metrics['classification'] = dict()
correct_samples = 0
ground_truths = [self.int2str[int(gt)] for gt in sample['label']]
already_counted = [False] * len(ground_truths)
with torch.inference_mode():
tensor_images = torch.stack([im for im in sample['image']]).to(self.device_env.device)
output = self.model.image_encoder(tensor_images)
current_strings = ['' for _ in range(tensor_images.shape[0])]
input_ids = torch.tensor(self.tokenizer.trunk.encode('')[1]).unsqueeze(0).repeat(tensor_images.shape[0], 1).to(self.device_env.device)
max_steps = 5
for step in range(max_steps):
inputs = self.prepare_inputs_for_inference(input_ids=input_ids, encoder_outputs=output)
decoder_outputs = self.model.text_decoder(**inputs)
probabilities = F.softmax(decoder_outputs['logits'], dim=-1)
next_token_ids = torch.argmax(probabilities, dim=-1)
for idx in range(next_token_ids.shape[0]):
next_token_id = next_token_ids[idx, -1].item()
next_token = self.tokenizer.trunk.decode([next_token_id])
current_strings[idx] += next_token
if next_token == '':
generated_label = current_strings[idx].replace('', '').replace('', '').replace('', '').strip()
ground_truth_label = '<' + ground_truths[idx] + '/>'
if generated_label == ground_truth_label and (not already_counted[idx]):
correct_samples += 1
already_counted[idx] = True
input_ids = torch.tensor([self.tokenizer.trunk.encode(s)[1:] for s in current_strings]).to(self.device_env.device)
metrics['classification']['correct_samples'] = correct_samples
metrics['classification']['n_valid_samples'] = len(sample['label'])
return metrics
def average_metrics(self, metrics: dict):
correct_samples = 0
total_samples = 0
for batch_metrics in metrics.values():
correct_samples += batch_metrics['classification']['correct_samples']
total_samples += batch_metrics['classification']['n_valid_samples']
average_acc = correct_samples / total_samples
return {'classification': {'accuracy': average_acc}}
def end(self):
pass
def state_dict(self):
state_dicts = {}
state_dicts['model'] = self.model.state_dict()
return state_dicts
# File: pixparse-main/src/pixparse/task/task_cruller_finetune_CORD.py
import logging
from contextlib import nullcontext
from dataclasses import dataclass, field, asdict
from functools import partial
from typing import Optional, List, Any
import torch
from torch.utils.data import DataLoader
import torch.nn as nn
import torch.nn.functional as F
import torchvision.transforms as transforms
from torchvision.transforms import functional as transformsF
from torchvision.transforms import Lambda
import timm
import timm.utils
from timm.optim import create_optimizer_v2
from timm.scheduler import create_scheduler_v2
from pixparse.framework import TaskTrainCfg, TaskTrain, DeviceEnv, Monitor
from pixparse.models import Cruller, ModelCfg, get_model_config
from pixparse.tokenizers import TokenizerHF, TokenizerCfg
from pixparse.data import preprocess_ocr_anno, preprocess_text_anno
from timm.layers import SelectAdaptivePool2d
from typing import Dict, List
from collections import OrderedDict
from ast import literal_eval
from datasets import load_dataset
from pixparse.utils.json_utils import json2token, token2json
from transformers import DonutProcessor, VisionEncoderDecoderModel
from timm.data.constants import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
from pixparse.utils.json_utils import JSONParseEvaluator
_logger = logging.getLogger(__name__)
@dataclass
class TaskCrullerFinetuneCORDCfg(TaskTrainCfg):
model_name: Optional[str] = None
model: ModelCfg = field(default_factory=ModelCfg)
tokenizer: TokenizerCfg = field(default_factory=TokenizerCfg)
def __post_init__(self):
if self.model_name:
model = get_model_config(self.model_name)
if model is None:
_logger.warning(f'Model config for {self.model_name} was not found, using defaults.')
else:
self.model = model
else:
self.model_name = 'custom'
def prepare_inputs_for_inference(tokenizer, input_ids: torch.Tensor, encoder_outputs: torch.Tensor, past_key_values=None, past=None, use_cache: bool=None, attention_mask: torch.Tensor=None):
if past is not None:
past_key_values = past
attention_mask = input_ids.ne(tokenizer.trunk.pad_token_id).long()
if past_key_values is not None:
input_ids = input_ids[:, -1:]
output = {'input_ids': input_ids, 'attention_mask': attention_mask, 'past_key_values': past_key_values, 'use_cache': use_cache, 'encoder_hidden_states': encoder_outputs}
return output
class TaskCrullerFinetuneCORD(TaskTrain):
def __init__(self, cfg: TaskCrullerFinetuneCORDCfg, device_env: DeviceEnv, monitor: Monitor=None):
super().__init__(cfg=cfg, device_env=device_env, monitor=monitor)
self.cfg = cfg
self.amp_dtype = None
if cfg.dtype is not None:
self.amp_dtype = torch.bfloat16 if cfg.dtype in ('bfloat16', 'bf16') else torch.float16
self.task_start_token = ''
self.prompt_end_token = self.task_start_token
self.max_position_embeddings = cfg.model.text_decoder.max_length
self.text_anno_fn = True
self.tokenizer = TokenizerHF(cfg.tokenizer)
self.state_dict = OrderedDict()
self.resume = False
self.special_tokens_finetune = ['', self.task_start_token, self.prompt_end_token, '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '']
preproc_fn = preprocess_text_anno if self.text_anno_fn else preprocess_ocr_anno
self.anno_preprocess_train = partial(preproc_fn, tokenizer=self.tokenizer.trunk, max_position_embeddings=self.max_position_embeddings, task_start_token=self.task_start_token, prompt_end_token=self.prompt_end_token)
''
self.finetune_donut_weights = False
_logger.info(f'Finetuning donut weights? {self.finetune_donut_weights}')
if self.finetune_donut_weights:
self.model = VisionEncoderDecoderModel.from_pretrained('naver-clova-ix/donut-base')
else:
self.model = Cruller(cfg.model)
special_tokens_from_pretrain = ['', '']
num_tokens_from_pretrain = self.tokenizer.trunk.add_special_tokens({'additional_special_tokens': sorted(set(special_tokens_from_pretrain))})
if num_tokens_from_pretrain > 0:
self.model.text_decoder.trunk.resize_token_embeddings(len(self.tokenizer.trunk))
self.loss = nn.CrossEntropyLoss(ignore_index=-100)
self.has_no_sync = False
if self.finetune_donut_weights:
self.num_image_chs = 3
else:
self.num_image_chs = 1 if cfg.model.image_encoder.image_fmt == 'L' else 3
if self.finetune_donut_weights:
img_mean = IMAGENET_DEFAULT_MEAN
img_std = IMAGENET_DEFAULT_STD
else:
img_mean = self.model.image_encoder.trunk.pretrained_cfg['mean']
img_std = self.model.image_encoder.trunk.pretrained_cfg['std']
self.img_mean = sum(img_mean) / len(img_mean) if cfg.model.image_encoder.image_fmt == 'L' else img_mean
self.img_std = sum(img_std) / len(img_std) if cfg.model.image_encoder.image_fmt == 'L' else img_std
if self.finetune_donut_weights:
image_size = (1280, 960)
color_transform = Lambda(lambda x: x)
else:
image_size = cfg.model.image_encoder.image_size
color_transform = transforms.Grayscale()
self.image_preprocess_train = transforms.Compose([transforms.ToTensor(), color_transform, transforms.Resize(image_size, interpolation=transforms.InterpolationMode.BICUBIC, antialias=True), transforms.Normalize(mean=self.img_mean, std=self.img_std)])
def train_setup(self, num_batches_per_interval: int):
if self.finetune_donut_weights:
self.newly_added_num = self.tokenizer.trunk.add_special_tokens({'additional_special_tokens': sorted(set(self.special_tokens_finetune))})
self.vocab_size = len(self.tokenizer.trunk)
if self.newly_added_num > 0:
self.model.decoder.resize_token_embeddings(len(self.tokenizer.trunk))
else:
_logger.info(f'Resuming from existing checkpoint. ')
self.state_dict = {k.replace('module.', ''): v for (k, v) in self.state_dict.items()}
self.model.load_state_dict(self.state_dict)
self.newly_added_num = self.tokenizer.trunk.add_special_tokens({'additional_special_tokens': sorted(set(self.special_tokens_finetune))})
self.vocab_size = len(self.tokenizer.trunk)
if self.newly_added_num > 0:
self.model.text_decoder.trunk.resize_token_embeddings(len(self.tokenizer.trunk))
device = self.device_env.device
self.model.to(device)
if self.device_env.world_size > 1:
self.model = torch.nn.parallel.DistributedDataParallel(self.model, device_ids=[device], static_graph=True)
self.has_no_sync = hasattr(self.model, 'no_sync')
opt_kwargs = {}
if self.cfg.opt.betas is not None:
opt_kwargs['betas'] = self.cfg.opt.betas
if self.cfg.opt.momentum is not None:
opt_kwargs['momentum'] = self.cfg.opt.momentum
self.optimizer = create_optimizer_v2(self.model, self.cfg.opt.optimizer, lr=self.cfg.opt.learning_rate, eps=self.cfg.opt.eps, layer_decay=self.cfg.opt.layer_decay, **opt_kwargs)
if self.cfg.amp:
self.scaler = timm.utils.NativeScaler()
self.autocast = partial(torch.autocast, device_type=device.type, dtype=self.amp_dtype)
else:
self.scaler = None
self.autocast = nullcontext
self.num_steps_per_interval = num_batches_per_interval // self.cfg.opt.grad_accum_steps
(self.scheduler, num_scheduled_epochs) = create_scheduler_v2(self.optimizer, self.cfg.opt.scheduler, warmup_lr=self.cfg.opt.warmup_learning_rate, warmup_epochs=self.num_warmup_intervals, num_epochs=self.num_intervals, step_on_epochs=False, updates_per_epoch=self.num_steps_per_interval)
self.scheduler.step_update(0)
def text_input_to_target(self, text_input, ignore_id=-100):
target = text_input.clone()
target[target == self.tokenizer.trunk.pad_token_id] = ignore_id
prompt_end_token_id = self.tokenizer.trunk.convert_tokens_to_ids(self.prompt_end_token)
slice_id = torch.nonzero(target == prompt_end_token_id).sum() + 1
target[:slice_id] = ignore_id
return target
def collate_fn(self, batch):
tokenizer_fn = lambda x: self.tokenizer.trunk(x, add_special_tokens=False, return_tensors='pt', max_length=512, padding='max_length', truncation=True).input_ids[0]
images = [item['image'] for item in batch]
raw_texts = [literal_eval(item['ground_truth'])['gt_parse'] for item in batch]
inputs_to_stack = []
for text in raw_texts:
(tokens_from_json, _) = json2token(text, self.tokenizer.trunk.all_special_tokens, sort_json_key=False)
inputs_to_stack.append(tokenizer_fn(self.task_start_token + tokens_from_json + self.tokenizer.trunk.eos_token))
text_inputs = torch.stack(inputs_to_stack)
targets = torch.stack([self.text_input_to_target(text) for text in text_inputs])
transform = self.image_preprocess_train
images = torch.stack([transform(img) for img in images])
text_inputs = text_inputs[:, :-1]
targets = targets[:, 1:]
return {'image': images, 'label': text_inputs, 'text_target': targets}
def train_step(self, sample: Dict[str, Any]) -> Dict[str, Any]:
image_input = sample['image']
label = sample['label']
text_target = sample['text_target']
result = {}
image_input = image_input.to(self.device_env.device, non_blocking=True)
label = label.to(self.device_env.device, non_blocking=True)
text_target = text_target.to(self.device_env.device, non_blocking=True)
accum_steps = self.cfg.opt.grad_accum_steps
need_update = (self.interval_batch_idx + 1) % accum_steps == 0
def _forward():
with self.autocast():
if self.finetune_donut_weights:
output = self.model(pixel_values=image_input, decoder_input_ids=label, labels=text_target)
logits = output['logits']
else:
output = self.model(image_input, label)
logits = output['logits']
loss = self.loss(logits.view(-1, self.vocab_size), text_target.view(-1))
if accum_steps > 1:
loss /= accum_steps
return loss
def _backward(_loss):
if self.scaler is not None:
self.scaler(_loss, self.optimizer, clip_grad=self.cfg.opt.clip_grad_value, clip_mode=self.cfg.opt.clip_grad_mode, parameters=self.model.parameters(), need_update=need_update)
else:
_loss.backward()
if need_update:
if self.cfg.opt.clip_grad_value is not None:
timm.utils.dispatch_clip_grad(self.model.parameters(), value=self.cfg.opt.clip_grad_value, mode=self.cfg.opt.clip_grad_mode)
self.optimizer.step()
if self.has_no_sync and (not need_update):
with self.model.no_sync():
loss = _forward()
_backward(loss)
else:
loss = _forward()
_backward(loss)
self.batch_idx += 1
self.interval_batch_idx += 1
if self.step % 100 == 0:
self.monitor.log_step('finetune', step_idx=self.step, step_end_idx=self.num_intervals * self.num_steps_per_interval, interval=self.interval_idx, loss=loss.item(), lr=self.get_current_lr(), metrics=None, eval_data=None)
if not need_update:
return result
self.step += 1
self.scheduler.step_update(self.step)
self.optimizer.zero_grad()
def state_dict(self):
state_dicts = {}
state_dicts['model'] = self.model.state_dict()
state_dicts['tokenizer'] = self.tokenizer.state_dict()
return state_dicts
# File: pixparse-main/src/pixparse/task/task_cruller_finetune_RVLCDIP.py
import logging
from contextlib import nullcontext
from dataclasses import dataclass, field, asdict
from functools import partial
from typing import Optional, List, Any
import torch
import torch.nn as nn
import torchvision.transforms as transforms
import timm
import timm.utils
from timm.optim import create_optimizer_v2
from timm.scheduler import create_scheduler_v2
from pixparse.framework import TaskTrainCfg, TaskTrain, DeviceEnv, Monitor
from pixparse.models import Cruller, ModelCfg, get_model_config
from pixparse.tokenizers import TokenizerHF, TokenizerCfg
from pixparse.data import preprocess_ocr_anno, preprocess_text_anno
from timm.layers import SelectAdaptivePool2d
from typing import Dict, List
from collections import OrderedDict
_logger = logging.getLogger(__name__)
class GetCLSToken(nn.Module):
def forward(self, x):
return x[:, 0, :]
@dataclass
class TaskCrullerFinetuneRVLCDIPCfg(TaskTrainCfg):
model_name: Optional[str] = None
model: ModelCfg = field(default_factory=ModelCfg)
tokenizer: TokenizerCfg = field(default_factory=TokenizerCfg)
def __post_init__(self):
if self.model_name:
model = get_model_config(self.model_name)
if model is None:
_logger.warning(f'Model config for {self.model_name} was not found, using defaults.')
else:
self.model = model
else:
self.model_name = 'custom'
class TaskCrullerFinetuneRVLCDIP(TaskTrain):
def __init__(self, cfg: TaskCrullerFinetuneRVLCDIPCfg, device_env: DeviceEnv, monitor: Monitor=None):
super().__init__(cfg=cfg, device_env=device_env, monitor=monitor)
self.cfg = cfg
self.amp_dtype = None
if cfg.dtype is not None:
self.amp_dtype = torch.bfloat16 if cfg.dtype in ('bfloat16', 'bf16') else torch.float16
self.task_start_token = ''
self.prompt_end_token = self.task_start_token
self.max_position_embeddings = cfg.model.text_decoder.max_length
self.text_anno_fn = True
self.tokenizer = TokenizerHF(cfg.tokenizer)
self.state_dict = OrderedDict()
self.resume = False
self.special_tokens_finetune = ['', self.task_start_token, self.prompt_end_token, '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '']
self.int2str = {0: 'letter', 1: 'form', 2: 'email', 3: 'handwritten', 4: 'advertisement', 5: 'scientific_report', 6: 'scientific_publication', 7: 'specification', 8: 'file_folder', 9: 'news_article', 10: 'budget', 11: 'invoice', 12: 'presentation', 13: 'questionnaire', 14: 'resume', 15: 'memo'}
preproc_fn = preprocess_text_anno if self.text_anno_fn else preprocess_ocr_anno
self.anno_preprocess_train = partial(preproc_fn, tokenizer=self.tokenizer.trunk, max_position_embeddings=self.max_position_embeddings, task_start_token=self.task_start_token, prompt_end_token=self.prompt_end_token)
self.model = Cruller(cfg.model)
special_tokens_from_pretrain = ['', '']
num_tokens_from_pretrain = self.tokenizer.trunk.add_special_tokens({'additional_special_tokens': sorted(set(special_tokens_from_pretrain))})
if num_tokens_from_pretrain > 0:
self.model.text_decoder.trunk.resize_token_embeddings(len(self.tokenizer.trunk))
self.loss = nn.CrossEntropyLoss(ignore_index=-100)
self.has_no_sync = False
self.num_image_chs = 1 if cfg.model.image_encoder.image_fmt == 'L' else 3
img_mean = self.model.image_encoder.trunk.pretrained_cfg['mean']
img_std = self.model.image_encoder.trunk.pretrained_cfg['std']
self.img_mean = sum(img_mean) / len(img_mean) if cfg.model.image_encoder.image_fmt == 'L' else img_mean
self.img_std = sum(img_std) / len(img_std) if cfg.model.image_encoder.image_fmt == 'L' else img_std
self.image_preprocess_train = transforms.Compose([transforms.ToTensor(), transforms.Resize(cfg.model.image_encoder.image_size, interpolation=transforms.InterpolationMode.BICUBIC, antialias=True), transforms.Normalize(mean=self.img_mean, std=self.img_std)])
def train_setup(self, num_batches_per_interval: int):
_logger.info(f'Resuming from existing checkpoint. ')
self.state_dict = {k.replace('module.', ''): v for (k, v) in self.state_dict.items()}
self.model.load_state_dict(self.state_dict)
self.newly_added_num = self.tokenizer.trunk.add_special_tokens({'additional_special_tokens': sorted(set(self.special_tokens_finetune))})
self.vocab_size = len(self.tokenizer.trunk)
if self.newly_added_num > 0:
self.model.text_decoder.trunk.resize_token_embeddings(len(self.tokenizer.trunk))
device = self.device_env.device
self.model.to(device)
if self.device_env.world_size > 1:
self.model = torch.nn.parallel.DistributedDataParallel(self.model, device_ids=[device], static_graph=True)
self.has_no_sync = hasattr(self.model, 'no_sync')
opt_kwargs = {}
if self.cfg.opt.betas is not None:
opt_kwargs['betas'] = self.cfg.opt.betas
if self.cfg.opt.momentum is not None:
opt_kwargs['momentum'] = self.cfg.opt.momentum
self.optimizer = create_optimizer_v2(self.model, self.cfg.opt.optimizer, lr=self.cfg.opt.learning_rate, eps=self.cfg.opt.eps, layer_decay=self.cfg.opt.layer_decay, **opt_kwargs)
if self.cfg.amp:
self.scaler = timm.utils.NativeScaler()
self.autocast = partial(torch.autocast, device_type=device.type, dtype=self.amp_dtype)
else:
self.scaler = None
self.autocast = nullcontext
self.num_steps_per_interval = num_batches_per_interval // self.cfg.opt.grad_accum_steps
(self.scheduler, num_scheduled_epochs) = create_scheduler_v2(self.optimizer, self.cfg.opt.scheduler, warmup_lr=self.cfg.opt.warmup_learning_rate, warmup_epochs=self.num_warmup_intervals, num_epochs=self.num_intervals, step_on_epochs=False, updates_per_epoch=self.num_steps_per_interval)
self.scheduler.step_update(0)
def text_input_to_target(self, text_input, ignore_id=-100):
target = text_input.clone()
target[target == self.tokenizer.trunk.pad_token_id] = ignore_id
prompt_end_token_id = self.tokenizer.trunk.convert_tokens_to_ids(self.prompt_end_token)
target[:torch.nonzero(target == prompt_end_token_id).sum() + 1] = ignore_id
return target
def collate_fn(self, batch):
images = [item['image'] for item in batch]
labels = [item['label'] for item in batch]
tokenizer_fn = lambda x: self.tokenizer.trunk(x, add_special_tokens=False, return_tensors='pt', max_length=5, padding='max_length', truncation=True).input_ids[0]
labels_tokens = [tokenizer_fn(self.task_start_token + '<' + self.int2str[label] + '/>' + self.tokenizer.trunk.eos_token) for label in labels]
transform = self.image_preprocess_train
images = torch.stack([transform(img) for img in images])
labels = torch.stack(labels_tokens)
targets = torch.stack([self.text_input_to_target(text) for text in labels])
labels = labels[:, :-1]
targets = targets[:, 1:]
return {'image': images, 'label': labels, 'text_target': targets}
def train_step(self, sample: Dict[str, Any]) -> Dict[str, Any]:
image_input = sample['image']
label = sample['label']
text_target = sample['text_target']
result = {}
image_input = image_input.to(self.device_env.device, non_blocking=True)
label = label.to(self.device_env.device, non_blocking=True)
text_target = text_target.to(self.device_env.device, non_blocking=True)
accum_steps = self.cfg.opt.grad_accum_steps
need_update = (self.interval_batch_idx + 1) % accum_steps == 0
def _forward():
with self.autocast():
output = self.model(image_input, label)
logits = output['logits']
loss = self.loss(logits.view(-1, self.vocab_size), text_target.view(-1))
if accum_steps > 1:
loss /= accum_steps
return loss
def _backward(_loss):
if self.scaler is not None:
self.scaler(_loss, self.optimizer, clip_grad=self.cfg.opt.clip_grad_value, clip_mode=self.cfg.opt.clip_grad_mode, parameters=self.model.parameters(), need_update=need_update)
else:
_loss.backward()
if need_update:
if self.cfg.opt.clip_grad_value is not None:
timm.utils.dispatch_clip_grad(self.model.parameters(), value=self.cfg.opt.clip_grad_value, mode=self.cfg.opt.clip_grad_mode)
self.optimizer.step()
if self.has_no_sync and (not need_update):
with self.model.no_sync():
loss = _forward()
_backward(loss)
else:
loss = _forward()
_backward(loss)
self.batch_idx += 1
self.interval_batch_idx += 1
if self.step % self.eval_frequency == 0:
self.monitor.log_step('finetune', step_idx=self.step, step_end_idx=self.num_intervals * self.num_steps_per_interval, interval=self.interval_idx, loss=loss.item(), lr=self.get_current_lr(), metrics=None, eval_data=None)
if not need_update:
return result
self.step += 1
self.scheduler.step_update(self.step)
self.optimizer.zero_grad()
# File: pixparse-main/src/pixparse/task/task_cruller_finetune_docvqa.py
import logging
from contextlib import nullcontext
from dataclasses import dataclass, field, asdict
from functools import partial
from typing import Optional, List, Any
import torch
from torch.utils.data import DataLoader
import torch.nn as nn
import torch.nn.functional as F
import torchvision.transforms as transforms
from torchvision.transforms import functional as transformsF
from torchvision.transforms import Lambda
import timm
import timm.utils
from timm.optim import create_optimizer_v2
from timm.scheduler import create_scheduler_v2
from pixparse.framework import TaskTrainCfg, TaskTrain, DeviceEnv, Monitor
from pixparse.models import Cruller, ModelCfg, get_model_config
from pixparse.tokenizers import TokenizerHF, TokenizerCfg
from pixparse.data import preprocess_ocr_anno, preprocess_text_anno
from timm.layers import SelectAdaptivePool2d
from typing import Dict, List
from collections import OrderedDict
from ast import literal_eval
from datasets import load_dataset
from pixparse.utils.json_utils import json2token, token2json
from timm.data.constants import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
from pixparse.utils.json_utils import JSONParseEvaluator
import numpy as np
_logger = logging.getLogger(__name__)
@dataclass
class TaskCrullerFinetuneDOCVQACfg(TaskTrainCfg):
model_name: Optional[str] = None
model: ModelCfg = field(default_factory=ModelCfg)
tokenizer: TokenizerCfg = field(default_factory=TokenizerCfg)
def __post_init__(self):
if self.model_name:
model = get_model_config(self.model_name)
if model is None:
_logger.warning(f'Model config for {self.model_name} was not found, using defaults.')
else:
self.model = model
else:
self.model_name = 'custom'
class TaskCrullerFinetuneDOCVQA(TaskTrain):
def __init__(self, cfg: TaskCrullerFinetuneDOCVQACfg, device_env: DeviceEnv, monitor: Monitor=None):
super().__init__(cfg=cfg, device_env=device_env, monitor=monitor)
self.cfg = cfg
self.amp_dtype = None
if cfg.dtype is not None:
self.amp_dtype = torch.bfloat16 if cfg.dtype in ('bfloat16', 'bf16') else torch.float16
self.task_start_token = ''
self.prompt_end_token = ''
self.max_position_embeddings = cfg.model.text_decoder.max_length
self.text_anno_fn = True
self.tokenizer = TokenizerHF(cfg.tokenizer)
self.state_dict = OrderedDict()
self.resume = False
self.special_tokens_finetune = ['', self.task_start_token, self.prompt_end_token, '', '', '']
preproc_fn = preprocess_text_anno if self.text_anno_fn else preprocess_ocr_anno
self.anno_preprocess_train = partial(preproc_fn, tokenizer=self.tokenizer.trunk, max_position_embeddings=self.max_position_embeddings, task_start_token=self.task_start_token, prompt_end_token=self.prompt_end_token)
self.model = Cruller(cfg.model)
special_tokens_from_pretrain = ['', '']
num_tokens_from_pretrain = self.tokenizer.trunk.add_special_tokens({'additional_special_tokens': sorted(set(special_tokens_from_pretrain))})
if num_tokens_from_pretrain > 0:
self.model.text_decoder.trunk.resize_token_embeddings(len(self.tokenizer.trunk))
self.loss = nn.CrossEntropyLoss(ignore_index=-100)
self.has_no_sync = False
self.num_image_chs = 1 if cfg.model.image_encoder.image_fmt == 'L' else 3
img_mean = self.model.image_encoder.trunk.pretrained_cfg['mean']
img_std = self.model.image_encoder.trunk.pretrained_cfg['std']
self.img_mean = sum(img_mean) / len(img_mean) if cfg.model.image_encoder.image_fmt == 'L' else img_mean
self.img_std = sum(img_std) / len(img_std) if cfg.model.image_encoder.image_fmt == 'L' else img_std
image_size = cfg.model.image_encoder.image_size
color_transform = transforms.Grayscale()
self.image_preprocess_train = transforms.Compose([transforms.ToTensor(), color_transform, transforms.Resize(image_size, interpolation=transforms.InterpolationMode.BICUBIC, antialias=True), transforms.Normalize(mean=self.img_mean, std=self.img_std)])
def train_setup(self, num_batches_per_interval: int):
_logger.info(f'Resuming from existing checkpoint. ')
self.state_dict = {k.replace('module.', ''): v for (k, v) in self.state_dict.items()}
self.model.load_state_dict(self.state_dict)
self.newly_added_num = self.tokenizer.trunk.add_special_tokens({'additional_special_tokens': sorted(set(self.special_tokens_finetune))})
self.vocab_size = len(self.tokenizer.trunk)
if self.newly_added_num > 0:
self.model.text_decoder.trunk.resize_token_embeddings(len(self.tokenizer.trunk))
device = self.device_env.device
self.model.to(device)
if self.device_env.world_size > 1:
self.model = torch.nn.parallel.DistributedDataParallel(self.model, device_ids=[device], static_graph=True)
self.has_no_sync = hasattr(self.model, 'no_sync')
opt_kwargs = {}
if self.cfg.opt.betas is not None:
opt_kwargs['betas'] = self.cfg.opt.betas
if self.cfg.opt.momentum is not None:
opt_kwargs['momentum'] = self.cfg.opt.momentum
self.optimizer = create_optimizer_v2(self.model, self.cfg.opt.optimizer, lr=self.cfg.opt.learning_rate, eps=self.cfg.opt.eps, layer_decay=self.cfg.opt.layer_decay, **opt_kwargs)
if self.cfg.amp:
self.scaler = timm.utils.NativeScaler()
self.autocast = partial(torch.autocast, device_type=device.type, dtype=self.amp_dtype)
else:
self.scaler = None
self.autocast = nullcontext
self.num_steps_per_interval = num_batches_per_interval // self.cfg.opt.grad_accum_steps
(self.scheduler, num_scheduled_epochs) = create_scheduler_v2(self.optimizer, self.cfg.opt.scheduler, warmup_lr=self.cfg.opt.warmup_learning_rate, warmup_epochs=self.num_warmup_intervals, num_epochs=self.num_intervals, step_on_epochs=False, updates_per_epoch=self.num_steps_per_interval)
self.scheduler.step_update(0)
def text_input_to_target(self, text_input, ignore_id=-100):
target = text_input.clone()
target[target == self.tokenizer.trunk.pad_token_id] = ignore_id
prompt_end_token_id = self.tokenizer.trunk.convert_tokens_to_ids(self.prompt_end_token)
slice_id = torch.nonzero(target == prompt_end_token_id).sum() + 1
target[:slice_id] = ignore_id
return target
def collate_fn(self, batch):
tokenizer_fn = lambda x: self.tokenizer.trunk(x, add_special_tokens=False, return_tensors='pt', max_length=512, padding='max_length', truncation=True).input_ids[0]
images = [item['image'] for item in batch]
q_and_as = [np.random.choice(item['labels']) for item in batch]
inputs_to_stack = []
for text in q_and_as:
inputs_to_stack.append(tokenizer_fn('' + text + self.tokenizer.trunk.eos_token))
text_inputs = torch.stack(inputs_to_stack)
targets = torch.stack([self.text_input_to_target(text) for text in text_inputs])
transform = self.image_preprocess_train
images = torch.stack([transform(img) for img in images])
text_inputs = text_inputs[:, :-1]
targets = targets[:, 1:]
return {'image': images, 'label': text_inputs, 'text_target': targets}
def train_step(self, sample: Dict[str, Any]) -> Dict[str, Any]:
image_input = sample['image']
label = sample['label']
text_target = sample['text_target']
result = {}
image_input = image_input.to(self.device_env.device, non_blocking=True)
label = label.to(self.device_env.device, non_blocking=True)
text_target = text_target.to(self.device_env.device, non_blocking=True)
accum_steps = self.cfg.opt.grad_accum_steps
need_update = (self.interval_batch_idx + 1) % accum_steps == 0
def _forward():
with self.autocast():
output = self.model(image_input, label)
logits = output['logits']
loss = self.loss(logits.view(-1, self.vocab_size), text_target.view(-1))
if accum_steps > 1:
loss /= accum_steps
return loss
def _backward(_loss):
if self.scaler is not None:
self.scaler(_loss, self.optimizer, clip_grad=self.cfg.opt.clip_grad_value, clip_mode=self.cfg.opt.clip_grad_mode, parameters=self.model.parameters(), need_update=need_update)
else:
_loss.backward()
if need_update:
if self.cfg.opt.clip_grad_value is not None:
timm.utils.dispatch_clip_grad(self.model.parameters(), value=self.cfg.opt.clip_grad_value, mode=self.cfg.opt.clip_grad_mode)
self.optimizer.step()
if self.has_no_sync and (not need_update):
with self.model.no_sync():
loss = _forward()
_backward(loss)
else:
loss = _forward()
_backward(loss)
self.batch_idx += 1
self.interval_batch_idx += 1
if self.step % 100 == 0:
self.monitor.log_step('finetune', step_idx=self.step, step_end_idx=self.num_intervals * self.num_steps_per_interval, interval=self.interval_idx, loss=loss.item(), lr=self.get_current_lr(), metrics=None, eval_data=None)
if not need_update:
return result
self.step += 1
self.scheduler.step_update(self.step)
self.optimizer.zero_grad()
def state_dict(self):
state_dicts = {}
state_dicts['model'] = self.model.state_dict()
state_dicts['tokenizer'] = self.tokenizer.state_dict()
return state_dicts
# File: pixparse-main/src/pixparse/task/task_cruller_finetune_xent.py
import logging
from contextlib import nullcontext
from dataclasses import dataclass, field, asdict
from functools import partial
from typing import Optional, List, Any
import torch
import torch.nn as nn
import torchvision.transforms as transforms
import timm
import timm.utils
from timm.optim import create_optimizer_v2
from timm.scheduler import create_scheduler_v2
from pixparse.framework import TaskTrainCfg, TaskTrain, DeviceEnv, Monitor
from pixparse.models import Cruller, ModelCfg, get_model_config
from pixparse.tokenizers import TokenizerHF, TokenizerCfg
from pixparse.data import preprocess_ocr_anno, preprocess_text_anno
from timm.layers import SelectAdaptivePool2d
from typing import Dict, List
from collections import OrderedDict
_logger = logging.getLogger(__name__)
class GetCLSToken(nn.Module):
def forward(self, x):
return x[:, 0, :]
@dataclass
class TaskCrullerFinetuneXentCfg(TaskTrainCfg):
model_name: Optional[str] = None
model: ModelCfg = field(default_factory=ModelCfg)
tokenizer: TokenizerCfg = field(default_factory=TokenizerCfg)
def __post_init__(self):
if self.model_name:
model = get_model_config(self.model_name)
if model is None:
_logger.warning(f'Model config for {self.model_name} was not found, using defaults.')
else:
self.model = model
else:
self.model_name = 'custom'
class TaskCrullerFinetuneXent(TaskTrain):
def __init__(self, cfg: TaskCrullerFinetuneXentCfg, device_env: DeviceEnv, monitor: Monitor=None):
super().__init__(cfg=cfg, device_env=device_env, monitor=monitor)
self.cfg = cfg
self.amp_dtype = None
if cfg.dtype is not None:
self.amp_dtype = torch.bfloat16 if cfg.dtype in ('bfloat16', 'bf16') else torch.float16
self.task_start_token = ''
self.prompt_end_token = self.task_start_token
self.max_position_embeddings = cfg.model.text_decoder.max_length
self.text_anno_fn = False
self.tokenizer = TokenizerHF(cfg.tokenizer)
self.state_dict = OrderedDict()
self.resume = False
special_tokens = ['', self.task_start_token, self.prompt_end_token]
newly_added_num = self.tokenizer.trunk.add_special_tokens({'additional_special_tokens': sorted(set(special_tokens))})
self.vocab_size = len(self.tokenizer.trunk)
preproc_fn = preprocess_text_anno if self.text_anno_fn else preprocess_ocr_anno
self.anno_preprocess_train = partial(preproc_fn, tokenizer=self.tokenizer.trunk, max_position_embeddings=self.max_position_embeddings, task_start_token=self.task_start_token, prompt_end_token=self.prompt_end_token)
self.model = Cruller(cfg.model)
if newly_added_num > 0:
self.model.text_decoder.trunk.resize_token_embeddings(len(self.tokenizer.trunk))
self.loss = nn.CrossEntropyLoss(ignore_index=-100)
self.has_no_sync = False
self.num_image_chs = 1 if cfg.model.image_encoder.image_fmt == 'L' else 3
img_mean = self.model.image_encoder.trunk.pretrained_cfg['mean']
img_std = self.model.image_encoder.trunk.pretrained_cfg['std']
self.img_mean = sum(img_mean) / len(img_mean) if cfg.model.image_encoder.image_fmt == 'L' else img_mean
self.img_std = sum(img_std) / len(img_std) if cfg.model.image_encoder.image_fmt == 'L' else img_std
self.image_preprocess_train = transforms.Compose([transforms.ToTensor(), transforms.Resize(cfg.model.image_encoder.image_size, interpolation=transforms.InterpolationMode.BICUBIC, antialias=True), transforms.Normalize(mean=self.img_mean, std=self.img_std)])
def train_setup(self, num_batches_per_interval: int):
if self.resume:
_logger.info(f'Resuming from existing checkpoint. ')
self.state_dict = {k.replace('module.', ''): v for (k, v) in self.state_dict.items()}
self.model.load_state_dict(self.state_dict)
self.model = nn.Sequential(OrderedDict([('encoder', self.model.image_encoder), ('token_pool', GetCLSToken()), ('final_fc', nn.Linear(768, 16))]))
device = self.device_env.device
print(f'Local rank for this process: {self.device_env.local_rank}')
device = torch.device(f'cuda:{self.device_env.local_rank}')
self.model.to(device)
if self.device_env.world_size > 1:
self.model = torch.nn.parallel.DistributedDataParallel(self.model, device_ids=[device], static_graph=True)
self.has_no_sync = hasattr(self.model, 'no_sync')
opt_kwargs = {}
if self.cfg.opt.betas is not None:
opt_kwargs['betas'] = self.cfg.opt.betas
if self.cfg.opt.momentum is not None:
opt_kwargs['momentum'] = self.cfg.opt.momentum
self.optimizer = create_optimizer_v2(self.model, self.cfg.opt.optimizer, lr=self.cfg.opt.learning_rate, eps=self.cfg.opt.eps, layer_decay=self.cfg.opt.layer_decay, **opt_kwargs)
if self.cfg.amp:
self.scaler = timm.utils.NativeScaler()
self.autocast = partial(torch.autocast, device_type=device.type, dtype=self.amp_dtype)
else:
self.scaler = None
self.autocast = nullcontext
self.num_steps_per_interval = num_batches_per_interval // self.cfg.opt.grad_accum_steps
(self.scheduler, num_scheduled_epochs) = create_scheduler_v2(self.optimizer, self.cfg.opt.scheduler, warmup_lr=self.cfg.opt.warmup_learning_rate, warmup_epochs=self.num_warmup_intervals, num_epochs=self.num_intervals, step_on_epochs=False, updates_per_epoch=self.num_steps_per_interval)
self.scheduler.step_update(0)
def collate_fn(self, batch):
images = [item['image'] for item in batch]
labels = [item['label'] for item in batch]
transform = self.image_preprocess_train
images = torch.stack([transform(img) for img in images])
labels = torch.tensor(labels, dtype=torch.int64)
return {'image': images, 'label': labels}
def train_interval_start(self):
self.optimizer.zero_grad()
self.interval_batch_idx = 0
def train_interval_end(self):
self.monitor.log_phase('finetune', self.interval_idx)
self.interval_idx += 1
def train_step(self, sample: Dict[str, Any]) -> Dict[str, Any]:
image_input = sample['image']
label = sample['label']
result = {}
image_input = image_input.to(self.device_env.device, non_blocking=True)
label = label.to(self.device_env.device, non_blocking=True)
accum_steps = self.cfg.opt.grad_accum_steps
need_update = (self.interval_batch_idx + 1) % accum_steps == 0
def _forward():
with self.autocast():
outputs = self.model(image_input)
loss = self.loss(outputs, label)
if accum_steps > 1:
loss /= accum_steps
return loss
def _backward(_loss):
if self.scaler is not None:
self.scaler(_loss, self.optimizer, clip_grad=self.cfg.opt.clip_grad_value, clip_mode=self.cfg.opt.clip_grad_mode, parameters=self.model.parameters(), need_update=need_update)
else:
_loss.backward()
if need_update:
if self.cfg.opt.clip_grad_value is not None:
timm.utils.dispatch_clip_grad(self.model.parameters(), value=self.cfg.opt.clip_grad_value, mode=self.cfg.opt.clip_grad_mode)
self.optimizer.step()
if self.has_no_sync and (not need_update):
with self.model.no_sync():
loss = _forward()
_backward(loss)
else:
loss = _forward()
_backward(loss)
self.batch_idx += 1
self.interval_batch_idx += 1
if self.step % self.eval_frequency == 0:
self.monitor.log_step('finetune', step_idx=self.step, step_end_idx=self.num_intervals * self.num_steps_per_interval, interval=self.interval_idx, loss=loss.item(), lr=self.get_current_lr(), metrics=None, eval_data=None)
if not need_update:
return result
self.step += 1
self.scheduler.step_update(self.step)
self.optimizer.zero_grad()
def eval_step(self, sample: Dict[str, Any]) -> Dict[str, Any]:
pass
def get_current_lr(self):
lrl = [param_group['lr'] for param_group in self.optimizer.param_groups]
lr = sum(lrl) / len(lrl)
return lr
# File: pixparse-main/src/pixparse/task/task_cruller_pretrain.py
import logging
from contextlib import nullcontext
from dataclasses import dataclass, field, asdict
from functools import partial
from typing import Optional, List, Any
import torch
import torch.nn as nn
import torchvision.transforms as transforms
import timm
import timm.utils
from timm.optim import create_optimizer_v2
from timm.scheduler import create_scheduler_v2
from pixparse.framework import TaskTrainCfg, TaskTrain, DeviceEnv, Monitor
from pixparse.models import Cruller, ModelCfg, get_model_config
from pixparse.tokenizers import TokenizerHF, TokenizerCfg
from pixparse.data import preprocess_ocr_anno, preprocess_text_anno
from pixparse.utils.ocr_utils import get_ocr_metrics
_logger = logging.getLogger(__name__)
@dataclass
class TaskCrullerPretrainCfg(TaskTrainCfg):
model_name: Optional[str] = None
model: ModelCfg = field(default_factory=ModelCfg)
tokenizer: TokenizerCfg = field(default_factory=TokenizerCfg)
def __post_init__(self):
if self.model_name:
model = get_model_config(self.model_name)
if model is None:
_logger.warning(f'Model config for {self.model_name} was not found, using defaults.')
else:
self.model = model
else:
self.model_name = 'custom'
class TaskCrullerPretrain(TaskTrain):
def __init__(self, cfg: TaskCrullerPretrainCfg, device_env: DeviceEnv, monitor: Monitor=None):
super().__init__(cfg=cfg, device_env=device_env, monitor=monitor)
self.cfg = cfg
self.amp_dtype = None
if cfg.dtype is not None:
self.amp_dtype = torch.bfloat16 if cfg.dtype in ('bfloat16', 'bf16') else torch.float16
self.task_start_token = ''
self.prompt_end_token = self.task_start_token
self.max_position_embeddings = cfg.model.text_decoder.max_length
self.text_anno_fn = False
self.tokenizer = TokenizerHF(cfg.tokenizer)
special_tokens = ['', self.task_start_token, self.prompt_end_token]
newly_added_num = self.tokenizer.trunk.add_special_tokens({'additional_special_tokens': sorted(set(special_tokens))})
self.vocab_size = len(self.tokenizer.trunk)
preproc_fn = preprocess_text_anno if self.text_anno_fn else preprocess_ocr_anno
self.anno_preprocess_train = partial(preproc_fn, tokenizer=self.tokenizer.trunk, max_position_embeddings=self.max_position_embeddings, task_start_token=self.task_start_token, prompt_end_token=self.prompt_end_token)
self.model = Cruller(cfg.model)
if newly_added_num > 0:
self.model.text_decoder.trunk.resize_token_embeddings(len(self.tokenizer.trunk))
self.loss = nn.CrossEntropyLoss(ignore_index=-100)
self.has_no_sync = False
self.num_image_chs = 1 if cfg.model.image_encoder.image_fmt == 'L' else 3
img_mean = self.model.image_encoder.trunk.pretrained_cfg['mean']
img_std = self.model.image_encoder.trunk.pretrained_cfg['std']
self.img_mean = sum(img_mean) / len(img_mean) if cfg.model.image_encoder.image_fmt == 'L' else img_mean
self.img_std = sum(img_std) / len(img_std) if cfg.model.image_encoder.image_fmt == 'L' else img_std
self.image_preprocess_train = transforms.Compose([transforms.ToTensor(), transforms.Resize(cfg.model.image_encoder.image_size, interpolation=transforms.InterpolationMode.BICUBIC, antialias=True), transforms.Normalize(mean=self.img_mean, std=self.img_std)])
self.image_preprocess_eval = None
self.train_metrics = {}
self.eval_metrics = {}
self.max_recursion_length = 1000
def train_setup(self, num_batches_per_interval: int):
device = self.device_env.device
self.model.to(device)
if self.device_env.world_size > 1:
self.model = torch.nn.parallel.DistributedDataParallel(self.model, device_ids=[device], static_graph=True)
self.has_no_sync = hasattr(self.model, 'no_sync')
opt_kwargs = {}
if self.cfg.opt.betas is not None:
opt_kwargs['betas'] = self.cfg.opt.betas
if self.cfg.opt.momentum is not None:
opt_kwargs['momentum'] = self.cfg.opt.momentum
self.optimizer = create_optimizer_v2(self.model, self.cfg.opt.optimizer, lr=self.cfg.opt.learning_rate, eps=self.cfg.opt.eps, layer_decay=self.cfg.opt.layer_decay, **opt_kwargs)
if self.cfg.amp:
self.scaler = timm.utils.NativeScaler()
self.autocast = partial(torch.autocast, device_type=device.type, dtype=self.amp_dtype)
else:
self.scaler = None
self.autocast = nullcontext
self.num_steps_per_interval = num_batches_per_interval // self.cfg.opt.grad_accum_steps
(self.scheduler, num_scheduled_epochs) = create_scheduler_v2(self.optimizer, self.cfg.opt.scheduler, warmup_lr=self.cfg.opt.warmup_learning_rate, warmup_epochs=self.num_warmup_intervals, num_epochs=self.num_intervals, step_on_epochs=False, updates_per_epoch=self.num_steps_per_interval)
self.scheduler.step_update(0)
def train_interval_start(self):
self.optimizer.zero_grad()
self.interval_batch_idx = 0
def train_interval_end(self):
self.monitor.log_phase('train', self.interval_idx)
self.interval_idx += 1
def train_step(self, sample):
(image_input, text_input, text_target) = sample
result = {}
image_input = image_input.to(self.device_env.device, non_blocking=True)
text_input = text_input[:, :-1].to(self.device_env.device, non_blocking=True)
text_target = text_target[:, 1:].to(self.device_env.device, non_blocking=True)
accum_steps = self.cfg.opt.grad_accum_steps
need_update = (self.interval_batch_idx + 1) % accum_steps == 0
def _forward():
with self.autocast():
output = self.model(image_input, text_input)
logits = output['logits']
loss = self.loss(logits.view(-1, self.vocab_size), text_target.view(-1))
if accum_steps > 1:
loss /= accum_steps
return loss
def _backward(_loss):
if self.scaler is not None:
self.scaler(_loss, self.optimizer, clip_grad=self.cfg.opt.clip_grad_value, clip_mode=self.cfg.opt.clip_grad_mode, parameters=self.model.parameters(), need_update=need_update)
else:
_loss.backward()
if need_update:
if self.cfg.opt.clip_grad_value is not None:
timm.utils.dispatch_clip_grad(self.model.parameters(), value=self.cfg.opt.clip_grad_value, mode=self.cfg.opt.clip_grad_mode)
self.optimizer.step()
if self.has_no_sync and (not need_update):
with self.model.no_sync():
loss = _forward()
_backward(loss)
else:
loss = _forward()
_backward(loss)
self.batch_idx += 1
self.interval_batch_idx += 1
if not need_update:
return result
self.step += 1
self.scheduler.step_update(self.step)
self.optimizer.zero_grad()
if self.step % self.eval_frequency == 0:
(metrics, eval_gallery) = self.get_train_ocr_metrics(sample)
self.train_metrics |= metrics
self.monitor.log_step('train', step_idx=self.step, step_end_idx=self.num_intervals * self.num_steps_per_interval, interval=self.interval_idx, loss=loss.item(), lr=self.get_current_lr(), metrics=self.train_metrics, eval_data=eval_gallery)
return result
def get_train_ocr_metrics(self, sample):
metrics = {}
eval_data = {}
(image_input, text_input, text_target) = sample
image_input = image_input.to(self.device_env.device, non_blocking=True)
text_input = text_input[:, :-1].to(self.device_env.device, non_blocking=True)
text_target = text_target[:, 1:].to(self.device_env.device, non_blocking=True)
''
(ocr_metrics, ocr_reconstructed_sample) = get_ocr_metrics(model=self.model, tokenizer=self.tokenizer, image_input=image_input, text_input=text_target, device_env=self.device_env, max_recursion_length=self.max_recursion_length)
if ocr_metrics and ocr_reconstructed_sample:
metrics['ocr_reconstruction'] = ocr_metrics
eval_data['ocr_reconstruction_data'] = ocr_reconstructed_sample
else:
_logger.info("Can't generate text from current batch. Skipping metrics...")
return (metrics, eval_data)
def state_dict(self):
state_dicts = {}
state_dicts['model'] = self.model.state_dict()
state_dicts['optimizer'] = self.optimizer.state_dict()
if hasattr(self.scheduler, 'state_dict'):
state_dicts['scheduler'] = self.scheduler.state_dict()
if self.scaler is not None:
state_dicts['scaler'] = self.scaler.state_dict()
return state_dicts
def load_state_dict(self, state_dict):
pass
def __repr__(self):
outputs = [f'model: {repr(self.model)}', f'opt: {repr(self.optimizer)}', f'sched: {repr(self.scheduler)}']
return '\n'.join(outputs)
# File: pixparse-main/src/pixparse/task/task_donut_eval_ocr.py
from PIL import Image
import re
from transformers import DonutProcessor, VisionEncoderDecoderModel
import torch
from dataclasses import dataclass
from functools import partial
from pixparse.framework import TaskEvalCfg, TaskEval, DeviceEnv, Monitor
from pixparse.models import Cruller, ModelCfg, get_model_config
from pixparse.data import preprocess_text_anno
from pixparse.utils import get_ocr_metrics
from pixparse.utils.ocr_utils import get_cer_wer_metrics
import jiwer.transforms as tr
import torch
import torchvision.transforms as transforms
import numpy as np
@dataclass
class TaskDonutEvalOCRCfg(TaskEvalCfg):
def __post_init__(self):
pass
class TaskDonutEvalOCR(TaskEval):
def __init__(self, cfg: TaskDonutEvalOCRCfg, device_env: DeviceEnv, monitor: Monitor=None):
super().__init__(cfg=cfg, device_env=device_env, monitor=monitor)
self.cfg = cfg
self.processor = DonutProcessor.from_pretrained('naver-clova-ix/donut-base-finetuned-cord-v2')
self.model = VisionEncoderDecoderModel.from_pretrained('naver-clova-ix/donut-base-finetuned-cord-v2')
self.task_prompt = ''
self.decoder_input_ids = self.processor.tokenizer(self.task_prompt, add_special_tokens=False, return_tensors='pt').input_ids
self.vocab_size = len(self.processor.tokenizer)
preproc_fn = preprocess_text_anno
self.max_position_embeddings = 768
self.anno_preprocess_eval = partial(preproc_fn, tokenizer=self.processor.tokenizer, max_position_embeddings=self.max_position_embeddings, task_start_token='', prompt_end_token=self.task_prompt)
self.model.eval()
self.has_no_sync = False
self.num_image_chs = 3
self.image_preprocess_eval = lambda x: x
self.cer_transforms = tr.Compose([tr.RemoveSpecificWords(''), tr.Strip(), tr.ReduceToListOfListOfChars()])
self.wer_transforms = tr.Compose([tr.RemoveSpecificWords(''), tr.RemoveMultipleSpaces(), tr.Strip(), tr.ReduceToListOfListOfWords()])
self.eval_metrics = {}
self.max_recursion_length = 1000
def setup(self):
device = self.device_env.device
self.model.to(device)
def prepare_for_evaluation(self, loaders):
loaders = {loader_key: loader for (loader_key, loader) in loaders.items() if loader_key in ['eval', 'eval_FUNSD']}
return loaders
def clean_text(self, text: str) -> str:
sequence = text.replace(self.processor.tokenizer.eos_token, '').replace(self.processor.tokenizer.pad_token, '')
cleaned_text = re.sub('<.*?>', '', sequence)
return cleaned_text
def step(self, sample):
metrics = {}
(image_input, text_input, text_target) = sample
text_input = [item[0] for item in text_input]
text_input = torch.stack(text_input, dim=0).to(self.device_env.device, non_blocking=True)
text_target = [item[0] for item in text_target]
text_target = torch.stack(text_target, dim=0).to(self.device_env.device, non_blocking=True)
decoder_input_ids = self.processor.tokenizer(self.task_prompt, add_special_tokens=False, return_tensors='pt').input_ids
pixel_values = self.processor([im.convert('RGB') for im in image_input], return_tensors='pt').pixel_values
with torch.inference_mode():
outputs = [self.model.generate(pixel_value.unsqueeze(0).to(self.device_env.device), decoder_input_ids=decoder_input_ids.to(self.device_env.device), max_length=self.max_position_embeddings, early_stopping=True, pad_token_id=self.processor.tokenizer.pad_token_id, eos_token_id=self.processor.tokenizer.eos_token_id, use_cache=True, num_beams=1, bad_words_ids=[[self.processor.tokenizer.unk_token_id]], return_dict_in_generate=True) for pixel_value in pixel_values]
generated_text = [self.clean_text(self.processor.decode(greedy_outputs.sequences[0])) for greedy_outputs in outputs]
text_input[text_input == -100] = self.processor.tokenizer.pad_token_id
raw_decoded_texts = self.processor.tokenizer.batch_decode(text_input)
decoded_texts = [self.clean_text(t) for t in raw_decoded_texts]
filtered = [(ref, pred) for (ref, pred) in zip(decoded_texts, generated_text) if ref and pred]
if not filtered:
return (None, None)
(decoded_texts, ocr_predictions) = zip(*filtered)
decoded_texts = list(decoded_texts)
ocr_predictions = list(ocr_predictions)
ocr_predictions = [text[0:len(reftext)] for (text, reftext) in zip(ocr_predictions, decoded_texts)]
metrics['ocr_reconstruction'] = get_cer_wer_metrics(self.cer_transforms, self.wer_transforms, dict(), ocr_predictions, decoded_texts)
return metrics
def average_metrics(self, metrics: dict):
wer_sum = 0
cer_sum = 0
for batch_metrics in metrics.values():
wer_sum += batch_metrics['ocr_reconstruction']['wer']
cer_sum += batch_metrics['ocr_reconstruction']['cer']
num_batches = len(metrics)
average_wer = wer_sum / num_batches
average_cer = cer_sum / num_batches
return {'ocr_reconstruction': {'wer': average_wer, 'cer': average_cer}}
def end(self):
pass
def state_dict(self):
state_dicts = {}
state_dicts['model'] = self.model.state_dict()
return state_dicts
# File: pixparse-main/src/pixparse/task/task_factory.py
import logging
from dataclasses import dataclass, field
from functools import partial
from typing import Optional
import torch
import torchvision.transforms as transforms
from pixparse.framework import TaskEvalCfg, TaskEval, DeviceEnv, Monitor
from pixparse.models import Cruller, ModelCfg, get_model_config
from pixparse.tokenizers import TokenizerHF, TokenizerCfg
from pixparse.data import preprocess_text_anno
from pixparse.utils import get_ocr_metrics
from pixparse.task import TaskCrullerEvalOCR, TaskCrullerEvalOCRCfg, TaskDonutEvalOCR, TaskDonutEvalOCRCfg, TaskCrullerEvalRVLCDIP, TaskCrullerEvalRVLCDIPCfg, TaskCrullerEvalCORD, TaskCrullerEvalCORDCfg, TaskCrullerEvalDOCVQA, TaskCrullerEvalDOCVQACfg, TaskCrullerPretrain, TaskCrullerPretrainCfg, TaskCrullerFinetuneRVLCDIP, TaskCrullerFinetuneRVLCDIPCfg, TaskCrullerFinetuneCORD, TaskCrullerFinetuneCORDCfg, TaskCrullerFinetuneDOCVQA, TaskCrullerFinetuneDOCVQACfg, TaskCrullerFinetuneXent, TaskCrullerFinetuneXentCfg
class TaskFactory:
TASK_CLASS_REGISTRY = {'cruller_eval_ocr': (TaskCrullerEvalOCR, TaskCrullerEvalOCRCfg), 'cruller_eval_rvlcdip': (TaskCrullerEvalRVLCDIP, TaskCrullerEvalRVLCDIPCfg), 'cruller_eval_cord': (TaskCrullerEvalCORD, TaskCrullerEvalCORDCfg), 'cruller_eval_docvqa': (TaskCrullerEvalDOCVQA, TaskCrullerEvalDOCVQACfg), 'donut_eval_ocr': (TaskDonutEvalOCR, TaskDonutEvalOCRCfg), 'cruller_pretrain': (TaskCrullerPretrain, TaskCrullerPretrainCfg), 'cruller_finetune_rvlcdip': (TaskCrullerFinetuneRVLCDIP, TaskCrullerFinetuneRVLCDIPCfg), 'cruller_finetune_cord': (TaskCrullerFinetuneCORD, TaskCrullerFinetuneCORDCfg), 'cruller_finetune_docvqa': (TaskCrullerFinetuneDOCVQA, TaskCrullerFinetuneDOCVQACfg), 'cruller_finetune_xent': (TaskCrullerFinetuneXent, TaskCrullerFinetuneXentCfg)}
@classmethod
def create_task(cls, task_name: str, task_args, device_env: DeviceEnv, monitor: Monitor):
task_name = task_name.lower()
if task_name not in cls.TASK_CLASS_REGISTRY:
raise ValueError(f'Unknown task type: {task_name}. Available tasks are {list(cls.TASK_CLASS_REGISTRY.keys())}')
task_cls = cls.TASK_CLASS_REGISTRY[task_name][0]
task_cfg = cls.TASK_CLASS_REGISTRY[task_name][1]
task_cfg_instance = task_cfg(**vars(task_args))
task_cls_instance = task_cls(cfg=task_cfg_instance, device_env=device_env, monitor=monitor)
return (task_cls_instance, task_cfg_instance)
# File: pixparse-main/src/pixparse/tokenizers/config.py
import copy
import re
from pathlib import Path
from dataclasses import dataclass, field
from typing import Optional, Tuple
from simple_parsing.helpers import Serializable
from pixparse.utils.name_utils import _natural_key, clean_name
_TOKENIZER_CONFIG_PATHS = [Path(__file__).parent / f'configs/']
_TOKENIZER_CONFIGS = {}
@dataclass
class TokenizerCfg(Serializable):
name: str = 'facebook/bart-large'
pretrained: bool = True
def _scan_tokenizer_configs():
global _TOKENIZER_CONFIGS
config_ext = ('.json',)
config_files = []
for config_path in _TOKENIZER_CONFIG_PATHS:
if config_path.is_file() and config_path.suffix in config_ext:
config_files.append(config_path)
elif config_path.is_dir():
for ext in config_ext:
config_files.extend(config_path.glob(f'*{ext}'))
for cf in config_files:
tokenizer_cfg = TokenizerCfg.load(cf)
_TOKENIZER_CONFIGS[cf.stem] = tokenizer_cfg
_TOKENIZER_CONFIGS = {k: v for (k, v) in sorted(_TOKENIZER_CONFIGS.items(), key=lambda x: _natural_key(x[0]))}
_scan_tokenizer_configs()
def list_tokenizers():
return list(_TOKENIZER_CONFIGS.keys())
def get_tokenizer_config(tokenizer_name):
tokenizer_name = clean_name(tokenizer_name)
cfg = _TOKENIZER_CONFIGS.get(tokenizer_name, None)
return copy.deepcopy(cfg)
# File: pixparse-main/src/pixparse/tokenizers/tokenizer_hf.py
from torch import nn as nn
from pixparse.tokenizers.config import TokenizerCfg
from transformers import AutoTokenizer
def create_tokenizer(cfg: TokenizerCfg):
assert cfg.name
extra_kwargs = {}
tokenizer = AutoTokenizer.from_pretrained(cfg.name, **extra_kwargs)
return tokenizer
class TokenizerHF(nn.Module):
def __init__(self, cfg: TokenizerCfg):
super().__init__()
self.trunk = create_tokenizer(cfg)