Aaditya1 commited on
Commit
a599152
1 Parent(s): 373715d

Create configuration_graphormer.pyx

Browse files
Files changed (1) hide show
  1. configuration_graphormer.pyx +216 -0
configuration_graphormer.pyx ADDED
@@ -0,0 +1,216 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 Microsoft, clefourrier and The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ Graphormer model configuration"""
16
+
17
+ from ...configuration_utils import PretrainedConfig
18
+ from ...utils import logging
19
+
20
+
21
+ logger = logging.get_logger(__name__)
22
+
23
+ GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP = {
24
+ # pcqm4mv1 now deprecated
25
+ "graphormer-base": "https://huggingface.co/clefourrier/graphormer-base-pcqm4mv2/resolve/main/config.json",
26
+ # See all Graphormer models at https://huggingface.co/models?filter=graphormer
27
+ }
28
+
29
+
30
+ class GraphormerConfig(PretrainedConfig):
31
+ r"""
32
+ This is the configuration class to store the configuration of a [`~GraphormerModel`]. It is used to instantiate an
33
+ Graphormer model according to the specified arguments, defining the model architecture. Instantiating a
34
+ configuration with the defaults will yield a similar configuration to that of the Graphormer
35
+ [graphormer-base-pcqm4mv1](https://huggingface.co/graphormer-base-pcqm4mv1) architecture.
36
+
37
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
38
+ documentation from [`PretrainedConfig`] for more information.
39
+
40
+
41
+ Args:
42
+ num_classes (`int`, *optional*, defaults to 1):
43
+ Number of target classes or labels, set to n for binary classification of n tasks.
44
+ num_atoms (`int`, *optional*, defaults to 512*9):
45
+ Number of node types in the graphs.
46
+ num_edges (`int`, *optional*, defaults to 512*3):
47
+ Number of edges types in the graph.
48
+ num_in_degree (`int`, *optional*, defaults to 512):
49
+ Number of in degrees types in the input graphs.
50
+ num_out_degree (`int`, *optional*, defaults to 512):
51
+ Number of out degrees types in the input graphs.
52
+ num_edge_dis (`int`, *optional*, defaults to 128):
53
+ Number of edge dis in the input graphs.
54
+ multi_hop_max_dist (`int`, *optional*, defaults to 20):
55
+ Maximum distance of multi hop edges between two nodes.
56
+ spatial_pos_max (`int`, *optional*, defaults to 1024):
57
+ Maximum distance between nodes in the graph attention bias matrices, used during preprocessing and
58
+ collation.
59
+ edge_type (`str`, *optional*, defaults to multihop):
60
+ Type of edge relation chosen.
61
+ max_nodes (`int`, *optional*, defaults to 512):
62
+ Maximum number of nodes which can be parsed for the input graphs.
63
+ share_input_output_embed (`bool`, *optional*, defaults to `False`):
64
+ Shares the embedding layer between encoder and decoder - careful, True is not implemented.
65
+ num_layers (`int`, *optional*, defaults to 12):
66
+ Number of layers.
67
+ embedding_dim (`int`, *optional*, defaults to 768):
68
+ Dimension of the embedding layer in encoder.
69
+ ffn_embedding_dim (`int`, *optional*, defaults to 768):
70
+ Dimension of the "intermediate" (often named feed-forward) layer in encoder.
71
+ num_attention_heads (`int`, *optional*, defaults to 32):
72
+ Number of attention heads in the encoder.
73
+ self_attention (`bool`, *optional*, defaults to `True`):
74
+ Model is self attentive (False not implemented).
75
+ activation_function (`str` or `function`, *optional*, defaults to `"gelu"`):
76
+ The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
77
+ `"relu"`, `"silu"` and `"gelu_new"` are supported.
78
+ dropout (`float`, *optional*, defaults to 0.1):
79
+ The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
80
+ attention_dropout (`float`, *optional*, defaults to 0.1):
81
+ The dropout probability for the attention weights.
82
+ layerdrop (`float`, *optional*, defaults to 0.0):
83
+ The LayerDrop probability for the encoder. See the [LayerDrop paper](see https://arxiv.org/abs/1909.11556)
84
+ for more details.
85
+ bias (`bool`, *optional*, defaults to `True`):
86
+ Uses bias in the attention module - unsupported at the moment.
87
+ embed_scale(`float`, *optional*, defaults to None):
88
+ Scaling factor for the node embeddings.
89
+ num_trans_layers_to_freeze (`int`, *optional*, defaults to 0):
90
+ Number of transformer layers to freeze.
91
+ encoder_normalize_before (`bool`, *optional*, defaults to `False`):
92
+ Normalize features before encoding the graph.
93
+ pre_layernorm (`bool`, *optional*, defaults to `False`):
94
+ Apply layernorm before self attention and the feed forward network. Without this, post layernorm will be
95
+ used.
96
+ apply_graphormer_init (`bool`, *optional*, defaults to `False`):
97
+ Apply a custom graphormer initialisation to the model before training.
98
+ freeze_embeddings (`bool`, *optional*, defaults to `False`):
99
+ Freeze the embedding layer, or train it along the model.
100
+ encoder_normalize_before (`bool`, *optional*, defaults to `False`):
101
+ Apply the layer norm before each encoder block.
102
+ q_noise (`float`, *optional*, defaults to 0.0):
103
+ Amount of quantization noise (see "Training with Quantization Noise for Extreme Model Compression"). (For
104
+ more detail, see fairseq's documentation on quant_noise).
105
+ qn_block_size (`int`, *optional*, defaults to 8):
106
+ Size of the blocks for subsequent quantization with iPQ (see q_noise).
107
+ kdim (`int`, *optional*, defaults to None):
108
+ Dimension of the key in the attention, if different from the other values.
109
+ vdim (`int`, *optional*, defaults to None):
110
+ Dimension of the value in the attention, if different from the other values.
111
+ use_cache (`bool`, *optional*, defaults to `True`):
112
+ Whether or not the model should return the last key/values attentions (not used by all models).
113
+ traceable (`bool`, *optional*, defaults to `False`):
114
+ Changes return value of the encoder's inner_state to stacked tensors.
115
+
116
+ Example:
117
+ ```python
118
+ >>> from transformers import GraphormerForGraphClassification, GraphormerConfig
119
+
120
+ >>> # Initializing a Graphormer graphormer-base-pcqm4mv2 style configuration
121
+ >>> configuration = GraphormerConfig()
122
+
123
+ >>> # Initializing a model from the graphormer-base-pcqm4mv1 style configuration
124
+ >>> model = GraphormerForGraphClassification(configuration)
125
+
126
+ >>> # Accessing the model configuration
127
+ >>> configuration = model.config
128
+ ```
129
+ """
130
+ model_type = "graphormer"
131
+ keys_to_ignore_at_inference = ["past_key_values"]
132
+
133
+ def __init__(
134
+ self,
135
+ num_classes: int = 1,
136
+ num_atoms: int = 512 * 9,
137
+ num_edges: int = 512 * 3,
138
+ num_in_degree: int = 512,
139
+ num_out_degree: int = 512,
140
+ num_spatial: int = 512,
141
+ num_edge_dis: int = 128,
142
+ multi_hop_max_dist: int = 5, # sometimes is 20
143
+ spatial_pos_max: int = 1024,
144
+ edge_type: str = "multi_hop",
145
+ max_nodes: int = 512,
146
+ share_input_output_embed: bool = False,
147
+ num_hidden_layers: int = 12,
148
+ embedding_dim: int = 768,
149
+ ffn_embedding_dim: int = 768,
150
+ num_attention_heads: int = 32,
151
+ dropout: float = 0.1,
152
+ attention_dropout: float = 0.1,
153
+ layerdrop: float = 0.0,
154
+ encoder_normalize_before: bool = False,
155
+ pre_layernorm: bool = False,
156
+ apply_graphormer_init: bool = False,
157
+ activation_fn: str = "gelu",
158
+ embed_scale: float = None,
159
+ freeze_embeddings: bool = False,
160
+ num_trans_layers_to_freeze: int = 0,
161
+ traceable: bool = False,
162
+ q_noise: float = 0.0,
163
+ qn_block_size: int = 8,
164
+ kdim: int = None,
165
+ vdim: int = None,
166
+ bias: bool = True,
167
+ self_attention: bool = True,
168
+ pad_token_id=0,
169
+ bos_token_id=1,
170
+ eos_token_id=2,
171
+ **kwargs,
172
+ ):
173
+ self.num_classes = num_classes
174
+ self.num_atoms = num_atoms
175
+ self.num_in_degree = num_in_degree
176
+ self.num_out_degree = num_out_degree
177
+ self.num_edges = num_edges
178
+ self.num_spatial = num_spatial
179
+ self.num_edge_dis = num_edge_dis
180
+ self.edge_type = edge_type
181
+ self.multi_hop_max_dist = multi_hop_max_dist
182
+ self.spatial_pos_max = spatial_pos_max
183
+ self.max_nodes = max_nodes
184
+ self.num_hidden_layers = num_hidden_layers
185
+ self.embedding_dim = embedding_dim
186
+ self.hidden_size = embedding_dim
187
+ self.ffn_embedding_dim = ffn_embedding_dim
188
+ self.num_attention_heads = num_attention_heads
189
+ self.dropout = dropout
190
+ self.attention_dropout = attention_dropout
191
+ self.layerdrop = layerdrop
192
+ self.encoder_normalize_before = encoder_normalize_before
193
+ self.pre_layernorm = pre_layernorm
194
+ self.apply_graphormer_init = apply_graphormer_init
195
+ self.activation_fn = activation_fn
196
+ self.embed_scale = embed_scale
197
+ self.freeze_embeddings = freeze_embeddings
198
+ self.num_trans_layers_to_freeze = num_trans_layers_to_freeze
199
+ self.share_input_output_embed = share_input_output_embed
200
+ self.traceable = traceable
201
+ self.q_noise = q_noise
202
+ self.qn_block_size = qn_block_size
203
+
204
+ # These parameters are here for future extensions
205
+ # atm, the model only supports self attention
206
+ self.kdim = kdim
207
+ self.vdim = vdim
208
+ self.self_attention = self_attention
209
+ self.bias = bias
210
+
211
+ super().__init__(
212
+ pad_token_id=pad_token_id,
213
+ bos_token_id=bos_token_id,
214
+ eos_token_id=eos_token_id,
215
+ **kwargs,
216
+ )