datasets = [ [ dict( abbr='siqa', eval_cfg=dict( evaluator=dict( type='opencompass.openicl.icl_evaluator.EDAccEvaluator'), pred_role='BOT'), infer_cfg=dict( inferencer=dict( type='opencompass.openicl.icl_inferencer.GenInferencer'), prompt_template=dict( template=dict(round=[ dict( prompt= '{context}\nQuestion: {question}\nA. {answerA}\nB. {answerB}\nC. {answerC}\nAnswer:', role='HUMAN'), ]), type= 'opencompass.openicl.icl_prompt_template.PromptTemplate'), retriever=dict( type='opencompass.openicl.icl_retriever.ZeroRetriever')), path='./data/siqa', reader_cfg=dict( input_columns=[ 'context', 'question', 'answerA', 'answerB', 'answerC', ], output_column='all_labels', test_split='validation'), type='opencompass.datasets.siqaDataset_V2'), ], ] models = [ dict( abbr='my_api', api_key='', batch_size=8, max_out_len=100, max_seq_len=2048, meta_template=dict(round=[ dict(api_role='HUMAN', role='HUMAN'), dict(api_role='BOT', generate=True, role='BOT'), ]), path='my_api', run_cfg=dict(num_gpus=1, num_procs=1), type='opencompass.models.my_api.MyAPIModel', url='http://127.0.0.1:12345/testing'), ] work_dir = './outputs/default/20240306_170301'