bart-base-cnn
Browse files- README.md +23 -0
- config.json +69 -0
- pytorch_model.bin +3 -0
- special_tokens_map.json +1 -0
- tokenizer.json +0 -0
- tokenizer_config.json +1 -0
README.md
ADDED
@@ -0,0 +1,23 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
---
|
2 |
+
language: en
|
3 |
+
license: apache-2.0
|
4 |
+
datasets:
|
5 |
+
- cnn_dailymail
|
6 |
+
tags:
|
7 |
+
- summarization
|
8 |
+
- bart
|
9 |
+
---
|
10 |
+
# BART base model finetuned with CNN DailyMail Using Teachable-NLP
|
11 |
+
|
12 |
+
- This model is finetuned with [Cnn Dailymail Dataset](https://huggingface.co/datasets/cnn_dailymail) from [bart-base](https://huggingface.co/facebook/bart-base).
|
13 |
+
|
14 |
+
The Bart model was proposed by Mike Lewis, Yinhan Liu, Naman Goyal, Marjan Ghazvininejad, Abdelrahman Mohamed, Omer Levy, Ves Stoyanov and Luke Zettlemoyer on 29 Oct, 2019. According to the abstract,
|
15 |
+
|
16 |
+
Bart uses a standard seq2seq/machine translation architecture with a bidirectional encoder (like BERT) and a left-to-right decoder (like GPT).
|
17 |
+
|
18 |
+
The pretraining task involves randomly shuffling the order of the original sentences and a novel in-filling scheme, where spans of text are replaced with a single mask token.
|
19 |
+
|
20 |
+
BART is particularly effective when fine tuned for text generation but also works well for comprehension tasks. It matches the performance of RoBERTa with comparable training resources on GLUE and SQuAD, achieves new state-of-the-art results on a range of abstractive dialogue, question answering, and summarization tasks, with gains of up to 6 ROUGE.
|
21 |
+
|
22 |
+
The Authors’ code can be found here:
|
23 |
+
https://github.com/pytorch/fairseq/tree/master/examples/bart
|
config.json
ADDED
@@ -0,0 +1,69 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"activation_dropout": 0.1,
|
3 |
+
"activation_function": "gelu",
|
4 |
+
"add_bias_logits": false,
|
5 |
+
"add_final_layer_norm": false,
|
6 |
+
"architectures": [
|
7 |
+
"BartModel",
|
8 |
+
"BartForConditionalGeneration",
|
9 |
+
"BartForSequenceClassification"
|
10 |
+
],
|
11 |
+
"attention_dropout": 0.1,
|
12 |
+
"bos_token_id": 0,
|
13 |
+
"classif_dropout": 0.1,
|
14 |
+
"d_model": 768,
|
15 |
+
"decoder_attention_heads": 12,
|
16 |
+
"decoder_ffn_dim": 3072,
|
17 |
+
"decoder_layerdrop": 0.0,
|
18 |
+
"decoder_layers": 6,
|
19 |
+
"decoder_start_token_id": 2,
|
20 |
+
"dropout": 0.1,
|
21 |
+
"early_stopping": true,
|
22 |
+
"encoder_attention_heads": 12,
|
23 |
+
"encoder_ffn_dim": 3072,
|
24 |
+
"encoder_layerdrop": 0.0,
|
25 |
+
"encoder_layers": 6,
|
26 |
+
"eos_token_id": 2,
|
27 |
+
"id2label": {
|
28 |
+
"0": "LABEL_0",
|
29 |
+
"1": "LABEL_1",
|
30 |
+
"2": "LABEL_2"
|
31 |
+
},
|
32 |
+
"init_std": 0.02,
|
33 |
+
"is_encoder_decoder": true,
|
34 |
+
"label2id": {
|
35 |
+
"LABEL_0": 0,
|
36 |
+
"LABEL_1": 1,
|
37 |
+
"LABEL_2": 2
|
38 |
+
},
|
39 |
+
"max_position_embeddings": 1024,
|
40 |
+
"model_type": "bart",
|
41 |
+
"normalize_before": false,
|
42 |
+
"normalize_embedding": true,
|
43 |
+
"num_hidden_layers": 6,
|
44 |
+
"pad_token_id": 1,
|
45 |
+
"scale_embedding": false,
|
46 |
+
"num_beams": 4,
|
47 |
+
"no_repeat_ngram_size": 3,
|
48 |
+
"task_specific_params": {
|
49 |
+
"summarization": {
|
50 |
+
"length_penalty": 1.0,
|
51 |
+
"max_length": 128,
|
52 |
+
"min_length": 12,
|
53 |
+
"num_beams": 4
|
54 |
+
},
|
55 |
+
"summarization_cnn": {
|
56 |
+
"length_penalty": 2.0,
|
57 |
+
"max_length": 142,
|
58 |
+
"min_length": 56,
|
59 |
+
"num_beams": 4
|
60 |
+
},
|
61 |
+
"summarization_xsum": {
|
62 |
+
"length_penalty": 1.0,
|
63 |
+
"max_length": 62,
|
64 |
+
"min_length": 11,
|
65 |
+
"num_beams": 6
|
66 |
+
}
|
67 |
+
},
|
68 |
+
"vocab_size": 50265
|
69 |
+
}
|
pytorch_model.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:faa42bee8c4b0f3d11d375f7eddbb63c671b42e26f9e9212aa6ade1203c66988
|
3 |
+
size 557985683
|
special_tokens_map.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"bos_token": "<s>", "eos_token": "</s>", "unk_token": "<unk>", "sep_token": "</s>", "pad_token": "<pad>", "cls_token": "<s>", "mask_token": {"content": "<mask>", "single_word": false, "lstrip": true, "rstrip": false, "normalized": false}}
|
tokenizer.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|
tokenizer_config.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"unk_token": "<unk>", "bos_token": "<s>", "eos_token": "</s>", "add_prefix_space": false, "errors": "replace", "sep_token": "</s>", "cls_token": "<s>", "pad_token": "<pad>", "mask_token": "<mask>", "special_tokens_map_file": null, "name_or_path": "bart-base"}
|