Update README.md
Browse files
README.md
CHANGED
@@ -29,6 +29,27 @@ configs:
|
|
29 |
- split: test
|
30 |
path: data/test-*
|
31 |
---
|
32 |
-
|
|
|
|
|
|
|
|
|
|
|
33 |
|
34 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
29 |
- split: test
|
30 |
path: data/test-*
|
31 |
---
|
32 |
+
Employing the CoIR evaluation framework's dataset version, utilize the code below for assessment:
|
33 |
+
```python
|
34 |
+
import coir
|
35 |
+
from coir.data_loader import get_tasks
|
36 |
+
from coir.evaluation import COIR
|
37 |
+
from coir.models import YourCustomDEModel
|
38 |
|
39 |
+
model_name = "intfloat/e5-base-v2"
|
40 |
+
|
41 |
+
# Load the model
|
42 |
+
model = YourCustomDEModel(model_name=model_name)
|
43 |
+
|
44 |
+
# Get tasks
|
45 |
+
#all task ["codetrans-dl","stackoverflow-qa","apps","codefeedback-mt","codefeedback-st","codetrans-contest","synthetic-
|
46 |
+
# text2sql","cosqa","codesearchnet","codesearchnet-ccr"]
|
47 |
+
tasks = get_tasks(tasks=["codetrans-dl"])
|
48 |
+
|
49 |
+
# Initialize evaluation
|
50 |
+
evaluation = COIR(tasks=tasks,batch_size=128)
|
51 |
+
|
52 |
+
# Run evaluation
|
53 |
+
results = evaluation.run(model, output_folder=f"results/{model_name}")
|
54 |
+
print(results)
|
55 |
+
```
|