Upload README.md with huggingface_hub
Browse files
README.md
CHANGED
@@ -5,7 +5,7 @@ tags:
|
|
5 |
- 奇虎360
|
6 |
- RAG-reranking
|
7 |
model-index:
|
8 |
-
- name: 360Zhinao-
|
9 |
results:
|
10 |
- task:
|
11 |
type: Reranking
|
@@ -70,7 +70,7 @@ We have validated the performance of our model on the [mteb-chinese-reranking le
|
|
70 |
|
71 |
| Model | T2Reranking | MMarcoReranking | CMedQAv1 | CMedQAv2 | Avg |
|
72 |
|:-------------------------------|:--------:|:--------:|:--------:|:--------:|:--------:|
|
73 |
-
| **360Zhinao-
|
74 |
| piccolo-large-zh-v2 | 67.15 | 33.39 | 90.14 | 89.31 | 70 |
|
75 |
| Baichuan-text-embedding | 67.85 | 34.3 | 88.46 | 88.06 | 69.67 |
|
76 |
| stella-mrl-large-zh-v3.5-1792d | 66.43 | 28.85 | 89.18 | 89.33 | 68.45 |
|
@@ -95,6 +95,10 @@ cd flash-attention && pip install .
|
|
95 |
# No need to install the following if the flash-attn version is above 2.1.1.
|
96 |
# pip install csrc/rotary
|
97 |
```
|
|
|
|
|
|
|
|
|
98 |
|
99 |
# Model Introduction
|
100 |
|
@@ -262,7 +266,7 @@ class FlagRerankerCustom:
|
|
262 |
|
263 |
all_scores = []
|
264 |
for start_index in tqdm(range(0, len(sentence_pairs), batch_size), desc="Compute Scores",
|
265 |
-
disable=
|
266 |
sentences_batch = sentence_pairs[start_index:start_index + batch_size] # [[q,ans],[q, ans]...]
|
267 |
inputs = preprocess(sources=sentences_batch, tokenizer=self.tokenizer,max_len=1024,device=self.device)
|
268 |
scores = self.model(**inputs, return_dict=True).logits.view(-1, ).float()
|
@@ -274,7 +278,7 @@ class FlagRerankerCustom:
|
|
274 |
|
275 |
|
276 |
if __name__ == "__main__":
|
277 |
-
model_name_or_path = "360Zhinao-
|
278 |
model = FlagRerankerCustom(model_name_or_path, use_fp16=False)
|
279 |
inputs=[["What Color Is the Sky","Blue"], ["What Color Is the Sky","Pink"],]
|
280 |
ret = model.compute_score(inputs)
|
|
|
5 |
- 奇虎360
|
6 |
- RAG-reranking
|
7 |
model-index:
|
8 |
+
- name: 360Zhinao-1_8B-reranking
|
9 |
results:
|
10 |
- task:
|
11 |
type: Reranking
|
|
|
70 |
|
71 |
| Model | T2Reranking | MMarcoReranking | CMedQAv1 | CMedQAv2 | Avg |
|
72 |
|:-------------------------------|:--------:|:--------:|:--------:|:--------:|:--------:|
|
73 |
+
| **360Zhinao-1_8B-Reranking** | **68.55** | **37.29** | **86.75** | **87.92** | **70.13** |
|
74 |
| piccolo-large-zh-v2 | 67.15 | 33.39 | 90.14 | 89.31 | 70 |
|
75 |
| Baichuan-text-embedding | 67.85 | 34.3 | 88.46 | 88.06 | 69.67 |
|
76 |
| stella-mrl-large-zh-v3.5-1792d | 66.43 | 28.85 | 89.18 | 89.33 | 68.45 |
|
|
|
95 |
# No need to install the following if the flash-attn version is above 2.1.1.
|
96 |
# pip install csrc/rotary
|
97 |
```
|
98 |
+
You can also use the following command to install flash-attention.
|
99 |
+
```bash
|
100 |
+
FLASH_ATTENTION_FORCE_BUILD=TRUE ./miniconda3/bin/python -m pip install flash-attn==2.3.6
|
101 |
+
```
|
102 |
|
103 |
# Model Introduction
|
104 |
|
|
|
266 |
|
267 |
all_scores = []
|
268 |
for start_index in tqdm(range(0, len(sentence_pairs), batch_size), desc="Compute Scores",
|
269 |
+
disable=False):
|
270 |
sentences_batch = sentence_pairs[start_index:start_index + batch_size] # [[q,ans],[q, ans]...]
|
271 |
inputs = preprocess(sources=sentences_batch, tokenizer=self.tokenizer,max_len=1024,device=self.device)
|
272 |
scores = self.model(**inputs, return_dict=True).logits.view(-1, ).float()
|
|
|
278 |
|
279 |
|
280 |
if __name__ == "__main__":
|
281 |
+
model_name_or_path = "360Zhinao-1_8B-Reranking"
|
282 |
model = FlagRerankerCustom(model_name_or_path, use_fp16=False)
|
283 |
inputs=[["What Color Is the Sky","Blue"], ["What Color Is the Sky","Pink"],]
|
284 |
ret = model.compute_score(inputs)
|