File size: 1,929 Bytes
256a159
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
from opencompass.multimodal.models.instructblip import (
    InstructBlipMMBenchPromptConstructor, InstructBlipMMBenchPostProcessor)

# dataloader settings
val_pipeline = [
    dict(type='mmpretrain.torchvision/Resize',
         size=(224, 224),
         interpolation=3),
    dict(type='mmpretrain.torchvision/ToTensor'),
    dict(type='mmpretrain.torchvision/Normalize',
         mean=(0.48145466, 0.4578275, 0.40821073),
         std=(0.26862954, 0.26130258, 0.27577711)),
    dict(type='mmpretrain.PackInputs',
         algorithm_keys=[
             'question', 'category', 'l2-category', 'context', 'index',
             'options_dict', 'options', 'split'
         ])
]

dataset = dict(type='opencompass.MMBenchDataset',
               data_file='data/mmbench/mmbench_test_20230712.tsv',
               pipeline=val_pipeline)

instruct_blip_dataloader = dict(batch_size=1,
                                num_workers=4,
                                dataset=dataset,
                                collate_fn=dict(type='pseudo_collate'),
                                sampler=dict(type='DefaultSampler',
                                             shuffle=False))

# model settings
instruct_blip_model = dict(
    type='blip2-vicuna-instruct',
    prompt_constructor=dict(type=InstructBlipMMBenchPromptConstructor),
    post_processor=dict(type=InstructBlipMMBenchPostProcessor),
    freeze_vit=True,
    low_resource=False,
    llm_model='/path/to/vicuna-7b/',
    sys_prompt=  # noqa: E251
    '###Human: What is the capital of China? There are several options:\nA. Beijing\nB. Shanghai\nC. Guangzhou\nD. Shenzhen\n###Assistant: A\n'
)

# evaluation settings
instruct_blip_evaluator = [
    dict(
        type='opencompass.DumpResults',
        save_path=  # noqa: E251
        'work_dirs/instructblip_vicuna7b/instructblipvicuna_mmbench.xlsx')
]

instruct_blip_load_from = '/path/to/instruct_blip_vicuna7b_trimmed'