File size: 5,851 Bytes
3bbb319
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
# Copyright (c) OpenMMLab. All rights reserved.
import argparse
import glob
import os.path as osp

import mmcv
from gather_models import get_final_results

try:
    import xlrd
except ImportError:
    xlrd = None
try:
    import xlutils
    from xlutils.copy import copy
except ImportError:
    xlutils = None


def parse_args():
    parser = argparse.ArgumentParser(
        description='Gather benchmarked models metric')
    parser.add_argument(
        'root',
        type=str,
        help='root path of benchmarked models to be gathered')
    parser.add_argument(
        'txt_path', type=str, help='txt path output by benchmark_filter')
    parser.add_argument(
        '--out', type=str, help='output path of gathered metrics to be stored')
    parser.add_argument(
        '--not-show', action='store_true', help='not show metrics')
    parser.add_argument(
        '--excel', type=str, help='input path of excel to be recorded')
    parser.add_argument(
        '--ncol', type=int, help='Number of column to be modified or appended')

    args = parser.parse_args()
    return args


if __name__ == '__main__':
    args = parse_args()

    if args.excel:
        assert args.ncol, 'Please specify "--excel" and "--ncol" ' \
                          'at the same time'
        if xlrd is None:
            raise RuntimeError(
                'xlrd is not installed,'
                'Please use “pip install xlrd==1.2.0” to install')
        if xlutils is None:
            raise RuntimeError(
                'xlutils is not installed,'
                'Please use “pip install xlutils==2.0.0” to install')
        readbook = xlrd.open_workbook(args.excel)
        sheet = readbook.sheet_by_name('Sheet1')
        sheet_info = {}
        total_nrows = sheet.nrows
        for i in range(3, sheet.nrows):
            sheet_info[sheet.row_values(i)[0]] = i
        xlrw = copy(readbook)
        table = xlrw.get_sheet(0)

    root_path = args.root
    metrics_out = args.out

    result_dict = {}
    with open(args.txt_path, 'r') as f:
        model_cfgs = f.readlines()
        for i, config in enumerate(model_cfgs):
            config = config.strip()
            if len(config) == 0:
                continue

            config_name = osp.split(config)[-1]
            config_name = osp.splitext(config_name)[0]
            result_path = osp.join(root_path, config_name)
            if osp.exists(result_path):
                # 1 read config
                cfg = mmcv.Config.fromfile(config)
                total_epochs = cfg.runner.max_epochs
                final_results = cfg.evaluation.metric
                if not isinstance(final_results, list):
                    final_results = [final_results]
                final_results_out = []
                for key in final_results:
                    if 'proposal_fast' in key:
                        final_results_out.append('AR@1000')  # RPN
                    elif 'mAP' not in key:
                        final_results_out.append(key + '_mAP')

                # 2 determine whether total_epochs ckpt exists
                ckpt_path = f'epoch_{total_epochs}.pth'
                if osp.exists(osp.join(result_path, ckpt_path)):
                    log_json_path = list(
                        sorted(glob.glob(osp.join(result_path,
                                                  '*.log.json'))))[-1]

                    # 3 read metric
                    model_performance = get_final_results(
                        log_json_path, total_epochs, final_results_out)
                    if model_performance is None:
                        print(f'log file error: {log_json_path}')
                        continue
                    for performance in model_performance:
                        if performance in ['AR@1000', 'bbox_mAP', 'segm_mAP']:
                            metric = round(
                                model_performance[performance] * 100, 1)
                            model_performance[performance] = metric
                    result_dict[config] = model_performance

                    # update and append excel content
                    if args.excel:
                        if 'AR@1000' in model_performance:
                            metrics = f'{model_performance["AR@1000"]}' \
                                      f'(AR@1000)'
                        elif 'segm_mAP' in model_performance:
                            metrics = f'{model_performance["bbox_mAP"]}/' \
                                      f'{model_performance["segm_mAP"]}'
                        else:
                            metrics = f'{model_performance["bbox_mAP"]}'

                        row_num = sheet_info.get(config, None)
                        if row_num:
                            table.write(row_num, args.ncol, metrics)
                        else:
                            table.write(total_nrows, 0, config)
                            table.write(total_nrows, args.ncol, metrics)
                            total_nrows += 1

                else:
                    print(f'{config} not exist: {ckpt_path}')
            else:
                print(f'not exist: {config}')

        # 4 save or print results
        if metrics_out:
            mmcv.mkdir_or_exist(metrics_out)
            mmcv.dump(result_dict,
                      osp.join(metrics_out, 'model_metric_info.json'))
        if not args.not_show:
            print('===================================')
            for config_name, metrics in result_dict.items():
                print(config_name, metrics)
            print('===================================')
        if args.excel:
            filename, sufflx = osp.splitext(args.excel)
            xlrw.save(f'{filename}_o{sufflx}')
            print(f'>>> Output {filename}_o{sufflx}')