Datasets:

Modalities:
Text
Formats:
parquet
Size:
< 1K
ArXiv:
DOI:
Libraries:
Datasets
pandas
License:
codelion commited on
Commit
3c831f2
1 Parent(s): 9aa3f7e

Upload _script_for_eval.py

Browse files
Files changed (1) hide show
  1. _script_for_eval.py +27 -8
_script_for_eval.py CHANGED
@@ -7,15 +7,15 @@ import datetime
7
  import subprocess
8
  import argparse
9
  import re
 
 
10
  from openai import OpenAI
11
  from openai import OpenAIError
12
  from tqdm import tqdm
13
  from functools import partial
14
- import multiprocessing
15
  from datasets import load_dataset
16
- from sklearn.feature_extraction.text import TfidfVectorizer
17
  from sklearn.metrics.pairwise import cosine_similarity
18
- import numpy as np
19
 
20
  client = OpenAI()
21
 
@@ -41,17 +41,36 @@ def fetch_dataset_examples(prompt, num_examples=0, use_similarity=False):
41
  dataset = load_dataset("patched-codes/synth-vuln-fixes", split="train")
42
 
43
  if use_similarity:
 
 
 
 
 
 
 
44
  user_messages = [
45
  next(msg['content'] for msg in item['messages'] if msg['role'] == 'user')
46
  for item in dataset
47
  ]
48
 
49
- vectorizer = TfidfVectorizer().fit(user_messages + [prompt])
50
- user_vectors = vectorizer.transform(user_messages)
51
- prompt_vector = vectorizer.transform([prompt])
 
 
 
 
 
 
 
 
 
 
 
52
 
53
- similarities = cosine_similarity(prompt_vector, user_vectors)[0]
54
- top_indices = np.argsort(similarities)[-num_examples:][::-1]
 
55
  else:
56
  top_indices = np.random.choice(len(dataset), num_examples, replace=False)
57
 
 
7
  import subprocess
8
  import argparse
9
  import re
10
+ import multiprocessing
11
+ import numpy as np
12
  from openai import OpenAI
13
  from openai import OpenAIError
14
  from tqdm import tqdm
15
  from functools import partial
 
16
  from datasets import load_dataset
17
+ from sentence_transformers import SentenceTransformer, CrossEncoder
18
  from sklearn.metrics.pairwise import cosine_similarity
 
19
 
20
  client = OpenAI()
21
 
 
41
  dataset = load_dataset("patched-codes/synth-vuln-fixes", split="train")
42
 
43
  if use_similarity:
44
+ # Load a lightweight model for initial retrieval
45
+ retrieval_model = SentenceTransformer('all-MiniLM-L6-v2')
46
+
47
+ # Load the cross-encoder model for reranking
48
+ rerank_model = CrossEncoder('cross-encoder/ms-marco-MiniLM-L-6-v2')
49
+
50
+ # Extract user messages
51
  user_messages = [
52
  next(msg['content'] for msg in item['messages'] if msg['role'] == 'user')
53
  for item in dataset
54
  ]
55
 
56
+ # Encode the prompt and user messages for initial retrieval
57
+ prompt_embedding = retrieval_model.encode(prompt, convert_to_tensor=False)
58
+ corpus_embeddings = retrieval_model.encode(user_messages, convert_to_tensor=False, show_progress_bar=True)
59
+
60
+ # Perform initial retrieval
61
+ similarities = cosine_similarity([prompt_embedding], corpus_embeddings)[0]
62
+ top_k = min(100, len(dataset))
63
+ top_indices = similarities.argsort()[-top_k:][::-1]
64
+
65
+ # Prepare pairs for reranking
66
+ rerank_pairs = [[prompt, user_messages[idx]] for idx in top_indices]
67
+
68
+ # Rerank using the cross-encoder model
69
+ rerank_scores = rerank_model.predict(rerank_pairs)
70
 
71
+ # Sort by reranked score and select top examples
72
+ reranked_indices = [top_indices[i] for i in np.argsort(rerank_scores)[::-1][:num_examples]]
73
+ top_indices = reranked_indices
74
  else:
75
  top_indices = np.random.choice(len(dataset), num_examples, replace=False)
76