File size: 7,169 Bytes
77d018b fe81053 77d018b fe81053 77d018b 23d3311 77d018b 6545018 77d018b fe81053 77d018b fe81053 77d018b fe81053 77d018b fe81053 77d018b 23d3311 77d018b fe81053 77d018b |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 |
"""OAB Exams dataset"""
import datasets
import pandas as pd
import re
_CITATION = """@misc{delfino2017passing,
title={Passing the Brazilian OAB Exam: data preparation and some experiments},
author={Pedro Delfino and Bruno Cuconato and Edward Hermann Haeusler and Alexandre Rademaker},
year={2017},
eprint={1712.05128},
archivePrefix={arXiv},
primaryClass={cs.CL}
}
"""
_DESCRIPTION = """
This repository contains the bar exams from the Ordem dos Advogados do Brasil (OAB) in Brazil from 2010 to 2018.
In Brazil, all legal professionals must demonstrate their knowledge of the law and its application by passing the OAB exams, the national bar exams. The OAB exams therefore provide an excellent benchmark for the performance of legal information systems since passing the exam would arguably signal that the system has acquired capacity of legal reasoning comparable to that of a human lawyer.
"""
_HOMEPAGE="https://github.com/legal-nlp/oab-exams"
BASE_URL = "https://raw.githubusercontent.com/legal-nlp/oab-exams/master/official/raw/"
FILES = [
'2010-01.txt',
'2010-02.txt',
'2011-03.txt',
'2011-04.txt',
'2011-05.txt',
'2012-06.txt',
'2012-06a.txt',
'2012-07.txt',
'2012-08.txt',
'2012-09.txt',
'2013-10.txt',
'2013-11.txt',
'2013-12.txt',
'2014-13.txt',
'2014-14.txt',
'2014-15.txt',
'2015-16.txt',
'2015-17.txt',
'2015-18.txt',
'2016-19.txt',
'2016-20.txt',
'2016-20a.txt',
'2016-21.txt',
'2017-22.txt',
'2017-23.txt',
'2017-24.txt',
'2018-25.txt'
]
def join_lines(lines):
texts = []
for line in lines:
if line.strip() == "" and len(texts) > 0 and texts[-1] != "\n":
texts.append("\n")
else:
if len(texts) > 0 and texts[-1] != "\n":
texts.append(" ")
texts.append(line.strip())
return "".join(texts).strip()
class OABExams(datasets.GeneratorBasedBuilder):
VERSION = datasets.Version("1.1.0")
def _info(self):
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=datasets.Features(
{
"id": datasets.Value("string"),
"question_number": datasets.Value("int32"),
"exam_id": datasets.Value("string"),
"exam_year": datasets.Value("string"),
"question_type": datasets.Value("string"),
"nullified": datasets.Value("bool"),
"question": datasets.Value("string"),
"choices": datasets.Sequence(feature={
"text": datasets.Value("string"),
"label": datasets.Value("string")
}),
"answerKey": datasets.Value("string"),
}),
supervised_keys=None,
homepage=_HOMEPAGE,
citation=_CITATION,
)
def _split_generators(self, dl_manager):
links = [BASE_URL + file for file in FILES]
downloaded_files = dl_manager.download(links)
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={
"filepaths": downloaded_files,
"filenames": FILES
}
)
]
def _generate_examples(self, filepaths, filenames):
for filepath, filename in zip(filepaths, filenames):
exam_id = filename.replace(".txt", "")
exam_year = int(filename.split("-")[0])
questions_temp = []
with open(filepath, encoding="utf-8") as f:
lines = f.readlines()
for i, line in enumerate(lines):
# if line matches regex that validates "Questão 1" or "Questão 80 NULL"
if re.match(r"Questão \d{1,2}(\sNULL)?", line.strip()):
nullified = 'NULL' in line
question_number = int(line.strip().split(" ")[1])
question_id = exam_id + "_" + str(question_number)
questions_temp.append(
{
"question_id": question_id,
"question_number": question_number,
"exam_id": exam_id,
"exam_year": exam_year,
"lines": [line],
"nullified": nullified
}
)
else:
questions_temp[-1]["lines"].append(line)
for question_temp in questions_temp:
question_lines = question_temp["lines"]
area_index = 2
if question_lines[1].startswith("AREA"):
area_index = 1
area_line = question_lines[area_index].strip()
question_type = None if area_line == "AREA" else area_line.split(" ")[1]
index_options = None
for i, line in enumerate(question_lines):
if line.strip() == "OPTIONS":
index_options = i
break
if index_options is None:
print(question_temp)
question = join_lines(question_lines[3:index_options])
choices = {
"text": [],
"label": []
}
answerKey = None
temp_question_text = None
for i, line in enumerate(question_lines[index_options+2:]):
if "CORRECT)" in line:
answerKey = line[0]
if line[0] in ["A", "B", "C", "D", "E"] and (line[1:3] == ") " or line[1:11] == ":CORRECT) "):
if temp_question_text is not None:
choices["text"].append(join_lines(temp_question_text))
temp_question_text = [line[line.find(')')+2:]]
choices["label"].append(line[0])
else:
if temp_question_text is not None:
temp_question_text.append(line)
if temp_question_text is not None:
choices["text"].append(join_lines(temp_question_text))
temp_question_text = None
#Remove nulls
if question_temp["nullified"]:
continue
yield question_temp['question_id'], {
"id": question_temp['question_id'],
"question_number": question_temp['question_number'],
"exam_id": question_temp['exam_id'],
"exam_year": question_temp['exam_year'],
"question_type": question_type,
"nullified": question_temp['nullified'],
"question": question,
"choices": choices,
"answerKey": answerKey
} |