|
|
|
|
|
|
|
from datasets import load_dataset |
|
|
|
dataset = load_dataset("conll2003") |
|
|
|
|
|
dataset |
|
|
|
|
|
dataset['train'][0]['tokens'] |
|
|
|
|
|
ner_tags= {'O': 0, 'B-PER': 1, 'I-PER': 2, 'B-ORG': 3, 'I-ORG': 4, 'B-LOC': 5, 'I-LOC': 6, 'B-MISC': 7, 'I-MISC': 8} |
|
|
|
|
|
|
|
swapped_dict = {v: k for k, v in ner_tags.items()} |
|
|
|
|
|
print(swapped_dict) |
|
|
|
|
|
[swapped_dict[x] for x in dataset['train'][0]['ner_tags']] |
|
|
|
|
|
dataset['train'][0] |
|
|
|
|
|
def label_tokens(entry): |
|
entry['ner_labels'] = [swapped_dict[x] for x in entry['ner_tags']] |
|
return entry |
|
|
|
|
|
|
|
dataset['train'] = dataset["train"].map(label_tokens) |
|
dataset['test'] = dataset["test"].map(label_tokens) |
|
dataset['validation'] = dataset["validation"].map(label_tokens) |
|
|
|
|
|
|
|
def tokens_to_sentence(entry): |
|
entry['sentence'] = ' '.join(entry['tokens']) |
|
return entry |
|
|
|
dataset['train'] = dataset["train"].map(tokens_to_sentence) |
|
dataset['test'] = dataset["test"].map(tokens_to_sentence) |
|
dataset['validation'] = dataset["validation"].map(tokens_to_sentence) |
|
|
|
|
|
|
|
def extract_entities(entry): |
|
entities = {'PER': [], 'ORG': [], 'LOC': [], 'MISC': []} |
|
current_entity = {"type": None, "words": []} |
|
for word, label in zip(entry['sentence'].split(), entry['ner_labels']): |
|
if label.startswith('B-'): |
|
entity_type = label.split('-')[1] |
|
if current_entity["type"] == entity_type: |
|
entities[entity_type].append(' '.join(current_entity["words"])) |
|
current_entity["words"] = [word] |
|
else: |
|
if current_entity["type"] is not None: |
|
entities[current_entity["type"]].append(' '.join(current_entity["words"])) |
|
current_entity = {"type": entity_type, "words": [word]} |
|
elif label.startswith('I-'): |
|
if current_entity["type"] is not None: |
|
current_entity["words"].append(word) |
|
else: |
|
if current_entity["type"] is not None: |
|
entities[current_entity["type"]].append(' '.join(current_entity["words"])) |
|
current_entity = {"type": None, "words": []} |
|
if current_entity["type"] is not None: |
|
entities[current_entity["type"]].append(' '.join(current_entity["words"])) |
|
|
|
entry['entities'] = entities |
|
return entry |
|
|
|
|
|
dataset['train'] = dataset["train"].map(extract_entities) |
|
dataset['test'] = dataset["test"].map(extract_entities) |
|
dataset['validation'] = dataset["validation"].map(extract_entities) |
|
|
|
|
|
|
|
|
|
dataset['train'][10]['sentence'], dataset['train'][10]['entities'] |
|
|
|
|
|
dataset.push_to_hub("areias/conll2003-generative") |
|
|
|
|
|
from collections import Counter |
|
|
|
def get_count(entries): |
|
|
|
per_counter = Counter() |
|
org_counter = Counter() |
|
loc_counter = Counter() |
|
misc_counter = Counter() |
|
|
|
|
|
for item in entries: |
|
per_counter.update(item['entities']['PER']) |
|
org_counter.update(item['entities']['ORG']) |
|
loc_counter.update(item['entities']['LOC']) |
|
misc_counter.update(item['entities']['MISC']) |
|
|
|
|
|
print("Total PER entities:", sum(per_counter.values())) |
|
print("Total ORG entities:", sum(org_counter.values())) |
|
print("Total LOC entities:", sum(loc_counter.values())) |
|
print("Total MISC entities:", sum(misc_counter.values())) |
|
|
|
|
|
|
|
get_count(dataset['train']) |
|
|
|
|
|
get_count(dataset['test']) |
|
|
|
|
|
get_count(dataset['validation']) |
|
|
|
|
|
|
|
|
|
|
|
|