First version of the your-model-name model and tokenizer.
Browse files- SquadQA +1 -0
- main.py +2 -2
- test-squad-trained/config.json +0 -23
- test-squad-trained/pytorch_model.bin +0 -3
- test-squad-trained/special_tokens_map.json +0 -1
- test-squad-trained/tokenizer_config.json +0 -1
- test-squad-trained/vocab.txt +0 -0
SquadQA
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
Subproject commit 4c80bc7e918707ebec6205c2c532e646f89bff7f
|
main.py
CHANGED
@@ -49,8 +49,8 @@ for epoch in range(2):
|
|
49 |
optim.step()
|
50 |
print("Done")
|
51 |
model.eval()
|
52 |
-
model.save_pretrained("
|
53 |
-
data.tokenizer.save_pretrained("
|
54 |
|
55 |
|
56 |
subprocess.call(["git", "add","--all"])
|
|
|
49 |
optim.step()
|
50 |
print("Done")
|
51 |
model.eval()
|
52 |
+
model.save_pretrained("SquadQA")
|
53 |
+
data.tokenizer.save_pretrained("SquadQA")
|
54 |
|
55 |
|
56 |
subprocess.call(["git", "add","--all"])
|
test-squad-trained/config.json
DELETED
@@ -1,23 +0,0 @@
|
|
1 |
-
{
|
2 |
-
"_name_or_path": "distilbert-base-uncased",
|
3 |
-
"activation": "gelu",
|
4 |
-
"architectures": [
|
5 |
-
"DistilBertForQuestionAnswering"
|
6 |
-
],
|
7 |
-
"attention_dropout": 0.1,
|
8 |
-
"dim": 768,
|
9 |
-
"dropout": 0.1,
|
10 |
-
"hidden_dim": 3072,
|
11 |
-
"initializer_range": 0.02,
|
12 |
-
"max_position_embeddings": 512,
|
13 |
-
"model_type": "distilbert",
|
14 |
-
"n_heads": 12,
|
15 |
-
"n_layers": 6,
|
16 |
-
"pad_token_id": 0,
|
17 |
-
"qa_dropout": 0.1,
|
18 |
-
"seq_classif_dropout": 0.2,
|
19 |
-
"sinusoidal_pos_embds": false,
|
20 |
-
"tie_weights_": true,
|
21 |
-
"transformers_version": "4.3.2",
|
22 |
-
"vocab_size": 30522
|
23 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
test-squad-trained/pytorch_model.bin
DELETED
@@ -1,3 +0,0 @@
|
|
1 |
-
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:423cba4a34bfc72ad38bc33a07f81fd45f433c8e8f15383b8b35c95be8a1b26e
|
3 |
-
size 265498527
|
|
|
|
|
|
|
|
test-squad-trained/special_tokens_map.json
DELETED
@@ -1 +0,0 @@
|
|
1 |
-
{"unk_token": "[UNK]", "sep_token": "[SEP]", "pad_token": "[PAD]", "cls_token": "[CLS]", "mask_token": "[MASK]"}
|
|
|
|
test-squad-trained/tokenizer_config.json
DELETED
@@ -1 +0,0 @@
|
|
1 |
-
{"do_lower_case": true, "unk_token": "[UNK]", "sep_token": "[SEP]", "pad_token": "[PAD]", "cls_token": "[CLS]", "mask_token": "[MASK]", "tokenize_chinese_chars": true, "strip_accents": null, "model_max_length": 512, "name_or_path": "distilbert-base-uncased"}
|
|
|
|
test-squad-trained/vocab.txt
DELETED
The diff for this file is too large to render.
See raw diff
|
|