clincolnoz
commited on
Commit
•
d805e08
1
Parent(s):
c76fedb
epoch 60 of 100
Browse files- README.md +52 -52
- optimizer.pt +1 -1
- pytorch_model.bin +1 -1
- rng_state.pth +1 -1
- scaler.pt +1 -1
- scheduler.pt +1 -1
- trainer_state.json +0 -0
README.md
CHANGED
@@ -84,26 +84,26 @@ You can use this model directly with a pipeline for masked language modeling:
|
|
84 |
>>> unmasker = pipeline('fill-mask', model='clincolnoz/LessSexistBERT')
|
85 |
>>> unmasker("Hello I'm a [MASK] model.")
|
86 |
|
87 |
-
[{'score': 0.
|
88 |
'token': 3287,
|
89 |
'token_str': 'male',
|
90 |
'sequence': "hello i'm a male model."},
|
91 |
-
{'score': 0.
|
92 |
-
'token':
|
93 |
-
'token_str': '
|
94 |
-
'sequence': "hello i'm a
|
95 |
-
{'score': 0.
|
96 |
-
'token': 4827,
|
97 |
-
'token_str': 'fashion',
|
98 |
-
'sequence': "hello i'm a fashion model."},
|
99 |
-
{'score': 0.05920252203941345,
|
100 |
'token': 2535,
|
101 |
'token_str': 'role',
|
102 |
'sequence': "hello i'm a role model."},
|
103 |
-
{'score': 0.
|
104 |
-
'token':
|
105 |
-
'token_str': '
|
106 |
-
'sequence': "hello i'm a
|
|
|
|
|
|
|
|
|
107 |
```
|
108 |
|
109 |
Here is how to use this model to get the features of a given text in PyTorch:
|
@@ -112,11 +112,11 @@ Here is how to use this model to get the features of a given text in PyTorch:
|
|
112 |
from transformers import BertTokenizer, BertModel
|
113 |
tokenizer = BertTokenizer.from_pretrained(
|
114 |
'clincolnoz/LessSexistBERT',
|
115 |
-
revision='v0.
|
116 |
)
|
117 |
model = BertModel.from_pretrained(
|
118 |
'clincolnoz/LessSexistBERT',
|
119 |
-
revision='v0.
|
120 |
)
|
121 |
text = "Replace me by any text you'd like."
|
122 |
encoded_input = tokenizer(text, return_tensors='pt')
|
@@ -129,12 +129,12 @@ and in TensorFlow:
|
|
129 |
from transformers import BertTokenizer, TFBertModel
|
130 |
tokenizer = BertTokenizer.from_pretrained(
|
131 |
'clincolnoz/LessSexistBERT',
|
132 |
-
revision='v0.
|
133 |
)
|
134 |
model = TFBertModel.from_pretrained(
|
135 |
'clincolnoz/LessSexistBERT',
|
136 |
from_pt=True,
|
137 |
-
revision='v0.
|
138 |
)
|
139 |
text = "Replace me by any text you'd like."
|
140 |
encoded_input = tokenizer(text, return_tensors='tf')
|
@@ -151,49 +151,49 @@ neutral, this model can have biased predictions:
|
|
151 |
>>> unmasker = pipeline('fill-mask', model='clincolnoz/LessSexistBERT')
|
152 |
>>> unmasker("The man worked as a [MASK].")
|
153 |
|
154 |
-
[{'score': 0.
|
155 |
-
'token':
|
156 |
-
'token_str': '
|
157 |
-
'sequence': 'the man worked as a
|
158 |
-
{'score': 0.
|
|
|
|
|
|
|
|
|
159 |
'token': 8872,
|
160 |
'token_str': 'cop',
|
161 |
'sequence': 'the man worked as a cop.'},
|
162 |
-
{'score': 0.
|
163 |
-
'token':
|
164 |
-
'token_str': '
|
165 |
-
'sequence': 'the man worked as a
|
166 |
-
{'score': 0.
|
167 |
-
'token':
|
168 |
-
'token_str': '
|
169 |
-
'sequence': 'the man worked as a
|
170 |
-
{'score': 0.042874787002801895,
|
171 |
-
'token': 15034,
|
172 |
-
'token_str': 'psychologist',
|
173 |
-
'sequence': 'the man worked as a psychologist.'}]
|
174 |
|
175 |
>>> unmasker("The woman worked as a [MASK].")
|
176 |
|
177 |
-
[{'score': 0.
|
178 |
-
'token':
|
179 |
-
'token_str': '
|
180 |
-
'sequence': 'the woman worked as a
|
181 |
-
{'score': 0.
|
182 |
-
'token': 8872,
|
183 |
-
'token_str': 'cop',
|
184 |
-
'sequence': 'the woman worked as a cop.'},
|
185 |
-
{'score': 0.0551241897046566,
|
186 |
-
'token': 15893,
|
187 |
-
'token_str': 'mechanic',
|
188 |
-
'sequence': 'the woman worked as a mechanic.'},
|
189 |
-
{'score': 0.039989013224840164,
|
190 |
'token': 3208,
|
191 |
'token_str': 'manager',
|
192 |
'sequence': 'the woman worked as a manager.'},
|
193 |
-
{'score': 0.
|
194 |
-
'token':
|
195 |
-
'token_str': '
|
196 |
-
'sequence': 'the woman worked as a
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
197 |
```
|
198 |
|
199 |
This bias may also affect all fine-tuned versions of this model.
|
|
|
84 |
>>> unmasker = pipeline('fill-mask', model='clincolnoz/LessSexistBERT')
|
85 |
>>> unmasker("Hello I'm a [MASK] model.")
|
86 |
|
87 |
+
[{'score': 0.6341338753700256,
|
88 |
'token': 3287,
|
89 |
'token_str': 'male',
|
90 |
'sequence': "hello i'm a male model."},
|
91 |
+
{'score': 0.056475281715393066,
|
92 |
+
'token': 3565,
|
93 |
+
'token_str': 'super',
|
94 |
+
'sequence': "hello i'm a super model."},
|
95 |
+
{'score': 0.025802666321396828,
|
|
|
|
|
|
|
|
|
96 |
'token': 2535,
|
97 |
'token_str': 'role',
|
98 |
'sequence': "hello i'm a role model."},
|
99 |
+
{'score': 0.021720068529248238,
|
100 |
+
'token': 2931,
|
101 |
+
'token_str': 'female',
|
102 |
+
'sequence': "hello i'm a female model."},
|
103 |
+
{'score': 0.02069762349128723,
|
104 |
+
'token': 4094,
|
105 |
+
'token_str': 'scale',
|
106 |
+
'sequence': "hello i'm a scale model."}]
|
107 |
```
|
108 |
|
109 |
Here is how to use this model to get the features of a given text in PyTorch:
|
|
|
112 |
from transformers import BertTokenizer, BertModel
|
113 |
tokenizer = BertTokenizer.from_pretrained(
|
114 |
'clincolnoz/LessSexistBERT',
|
115 |
+
revision='v0.60' # tag name, or branch name, or commit hash
|
116 |
)
|
117 |
model = BertModel.from_pretrained(
|
118 |
'clincolnoz/LessSexistBERT',
|
119 |
+
revision='v0.60' # tag name, or branch name, or commit hash
|
120 |
)
|
121 |
text = "Replace me by any text you'd like."
|
122 |
encoded_input = tokenizer(text, return_tensors='pt')
|
|
|
129 |
from transformers import BertTokenizer, TFBertModel
|
130 |
tokenizer = BertTokenizer.from_pretrained(
|
131 |
'clincolnoz/LessSexistBERT',
|
132 |
+
revision='v0.60' # tag name, or branch name, or commit hash
|
133 |
)
|
134 |
model = TFBertModel.from_pretrained(
|
135 |
'clincolnoz/LessSexistBERT',
|
136 |
from_pt=True,
|
137 |
+
revision='v0.60' # tag name, or branch name, or commit hash
|
138 |
)
|
139 |
text = "Replace me by any text you'd like."
|
140 |
encoded_input = tokenizer(text, return_tensors='tf')
|
|
|
151 |
>>> unmasker = pipeline('fill-mask', model='clincolnoz/LessSexistBERT')
|
152 |
>>> unmasker("The man worked as a [MASK].")
|
153 |
|
154 |
+
[{'score': 0.21743303537368774,
|
155 |
+
'token': 7155,
|
156 |
+
'token_str': 'scientist',
|
157 |
+
'sequence': 'the man worked as a scientist.'},
|
158 |
+
{'score': 0.09627354890108109,
|
159 |
+
'token': 18968,
|
160 |
+
'token_str': 'salesman',
|
161 |
+
'sequence': 'the man worked as a salesman.'},
|
162 |
+
{'score': 0.07860496640205383,
|
163 |
'token': 8872,
|
164 |
'token_str': 'cop',
|
165 |
'sequence': 'the man worked as a cop.'},
|
166 |
+
{'score': 0.050374675542116165,
|
167 |
+
'token': 8930,
|
168 |
+
'token_str': 'consultant',
|
169 |
+
'sequence': 'the man worked as a consultant.'},
|
170 |
+
{'score': 0.035686127841472626,
|
171 |
+
'token': 3213,
|
172 |
+
'token_str': 'writer',
|
173 |
+
'sequence': 'the man worked as a writer.'}]
|
|
|
|
|
|
|
|
|
174 |
|
175 |
>>> unmasker("The woman worked as a [MASK].")
|
176 |
|
177 |
+
[{'score': 0.11849718540906906,
|
178 |
+
'token': 8930,
|
179 |
+
'token_str': 'consultant',
|
180 |
+
'sequence': 'the woman worked as a consultant.'},
|
181 |
+
{'score': 0.10927138477563858,
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
182 |
'token': 3208,
|
183 |
'token_str': 'manager',
|
184 |
'sequence': 'the woman worked as a manager.'},
|
185 |
+
{'score': 0.09836961328983307,
|
186 |
+
'token': 8872,
|
187 |
+
'token_str': 'cop',
|
188 |
+
'sequence': 'the woman worked as a cop.'},
|
189 |
+
{'score': 0.08795220404863358,
|
190 |
+
'token': 15812,
|
191 |
+
'token_str': 'bartender',
|
192 |
+
'sequence': 'the woman worked as a bartender.'},
|
193 |
+
{'score': 0.054000891745090485,
|
194 |
+
'token': 6821,
|
195 |
+
'token_str': 'nurse',
|
196 |
+
'sequence': 'the woman worked as a nurse.'}]
|
197 |
```
|
198 |
|
199 |
This bias may also affect all fine-tuned versions of this model.
|
optimizer.pt
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 881735429
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:524dc9c1f7c6598a129e9e8bfe33d49c9624a2a978212f689a242907e3a230b2
|
3 |
size 881735429
|
pytorch_model.bin
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 440881865
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:1dfd1ef19d11718d73822b928b970b94c246015470c75694b4a238ceb5b54bc1
|
3 |
size 440881865
|
rng_state.pth
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 14575
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:698596d1d7fe05ffb0ee317704dbe2c7eb8fe7fd3430f63142dcb14b7dee5efe
|
3 |
size 14575
|
scaler.pt
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 557
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:440d2c18e937c9055c3009823978273755788ab4e97a8c4b086fd1e62befc9ad
|
3 |
size 557
|
scheduler.pt
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 627
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:58c8fcdbeec020740e4aef4eb2774809794bd9e113bedc250330cf4813e86b7e
|
3 |
size 627
|
trainer_state.json
CHANGED
The diff for this file is too large to render.
See raw diff
|
|