calpt commited on
Commit
ece95bf
1 Parent(s): 3682e3d

Add adapter gpt2_lingaccept_cola_pfeiffer version 1

Browse files
README.md ADDED
@@ -0,0 +1,55 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ tags:
3
+ - gpt2
4
+ - text-classification
5
+ - adapter-transformers
6
+ - adapterhub:lingaccept/cola
7
+ license: "apache-2.0"
8
+ ---
9
+
10
+ # Adapter `gpt2_lingaccept_cola_pfeiffer` for gpt2
11
+
12
+ Adapter for gpt2 in Pfeiffer architecture trained on the COLA dataset for 10 epochs with a learning rate of 1e-4.
13
+
14
+
15
+ **This adapter was created for usage with the [Adapters](https://github.com/Adapter-Hub/adapters) library.**
16
+
17
+ ## Usage
18
+
19
+ First, install `adapters`:
20
+
21
+ ```
22
+ pip install -U adapters
23
+ ```
24
+
25
+ Now, the adapter can be loaded and activated like this:
26
+
27
+ ```python
28
+ from adapters import AutoAdapterModel
29
+
30
+ model = AutoAdapterModel.from_pretrained("gpt2")
31
+ adapter_name = model.load_adapter("AdapterHub/gpt2_lingaccept_cola_pfeiffer")
32
+ model.set_active_adapters(adapter_name)
33
+ ```
34
+
35
+ ## Architecture & Training
36
+
37
+ - Adapter architecture: pfeiffer
38
+ - Prediction head: classification
39
+ - Dataset: [CoLA](https://nyu-mll.github.io/CoLA/)
40
+
41
+ ## Author Information
42
+
43
+ - Author name(s): Hannah Sterz
44
+ - Author email: [email protected]
45
+ - Author links: [Twitter](https://twitter.com/@h_sterz)
46
+
47
+
48
+
49
+ ## Citation
50
+
51
+ ```bibtex
52
+
53
+ ```
54
+
55
+ *This adapter has been auto-imported from https://github.com/Adapter-Hub/Hub/blob/master/adapters/ukp/gpt2_lingaccept_cola_pfeiffer.yaml*.
adapter_config.json ADDED
@@ -0,0 +1,41 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "config": {
3
+ "adapter_residual_before_ln": false,
4
+ "cross_adapter": false,
5
+ "dropout": 0.0,
6
+ "factorized_phm_W": true,
7
+ "factorized_phm_rule": false,
8
+ "hypercomplex_nonlinearity": "glorot-uniform",
9
+ "init_weights": "bert",
10
+ "inv_adapter": null,
11
+ "inv_adapter_reduction_factor": null,
12
+ "is_parallel": false,
13
+ "learn_phm": true,
14
+ "leave_out": [],
15
+ "ln_after": false,
16
+ "ln_before": false,
17
+ "mh_adapter": false,
18
+ "non_linearity": "relu",
19
+ "original_ln_after": true,
20
+ "original_ln_before": true,
21
+ "output_adapter": true,
22
+ "phm_bias": true,
23
+ "phm_c_init": "normal",
24
+ "phm_dim": 4,
25
+ "phm_init_range": 0.0001,
26
+ "phm_layer": false,
27
+ "phm_rank": 1,
28
+ "reduction_factor": 16,
29
+ "residual_before_ln": true,
30
+ "scaling": 1.0,
31
+ "shared_W_phm": false,
32
+ "shared_phm_rule": true,
33
+ "use_gating": false
34
+ },
35
+ "hidden_size": 768,
36
+ "model_class": "GPT2AdapterModel",
37
+ "model_name": "gpt2",
38
+ "model_type": "gpt2",
39
+ "name": "cola",
40
+ "version": "0.2.0"
41
+ }
head_config.json ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "config": {
3
+ "activation_function": null,
4
+ "bias": false,
5
+ "dropout_prob": null,
6
+ "head_type": "classification",
7
+ "label2id": {
8
+ "acceptable": 1,
9
+ "unacceptable": 0
10
+ },
11
+ "layers": 1,
12
+ "num_labels": 2,
13
+ "use_pooler": false
14
+ },
15
+ "hidden_size": 768,
16
+ "model_class": "GPT2AdapterModel",
17
+ "model_name": "gpt2",
18
+ "model_type": "gpt2",
19
+ "name": "cola",
20
+ "version": "0.2.0"
21
+ }
pytorch_adapter.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c668b9b54242fca60ecd62b04c9a90cc83c012a921b89646a3c0b097779f4b47
3
+ size 3594918
pytorch_model_head.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f0c876a6ae15fdec8a4ed6f9eb8cbd4df9dd39c23e06dcb8c9bba3547e30e6ff
3
+ size 7443