Datasets:

Modalities:
Text
Formats:
parquet
Size:
< 1K
ArXiv:
DOI:
Libraries:
Datasets
pandas
License:
codelion commited on
Commit
60ac2bd
1 Parent(s): d6db1fd

Upload _script_for_eval.py

Browse files
Files changed (1) hide show
  1. _script_for_eval.py +55 -51
_script_for_eval.py CHANGED
@@ -8,8 +8,7 @@ import subprocess
8
  import argparse
9
  import re
10
  from openai import OpenAI
11
- from openai.types.chat import ChatCompletion
12
- from openai.types import APIError, APIConnectionError, APIResponseValidationError, APIStatusError
13
  from tqdm import tqdm
14
  from functools import partial
15
  import multiprocessing
@@ -20,15 +19,16 @@ import numpy as np
20
 
21
  client = OpenAI()
22
 
23
- def load_cache():
24
- if os.path.exists('cache.pkl'):
25
  with open('cache.pkl', 'rb') as f:
26
  return pickle.load(f)
27
  return {}
28
 
29
- def save_cache(cache):
30
- with open('cache.pkl', 'wb') as f:
31
- pickle.dump(cache, f)
 
32
 
33
  def has_all_comments(text):
34
  lines=text.split('\n')
@@ -37,7 +37,7 @@ def has_all_comments(text):
37
  return False
38
  return True
39
 
40
- def fetch_dataset_examples(prompt, num_examples=3, use_similarity=True):
41
  dataset = load_dataset("patched-codes/synth-vuln-fixes", split="train")
42
 
43
  if use_similarity:
@@ -106,28 +106,22 @@ def get_fixed_code_fine_tuned(prompt, few_shot_messages, model_name):
106
  temperature=0.2,
107
  top_p=0.95
108
  )
109
- if isinstance(response, ChatCompletion):
110
- return response.choices[0].message.content
111
- else:
112
- raise ValueError("Unexpected response type from OpenAI API")
113
- except (APIError, APIConnectionError, APIResponseValidationError) as e:
114
  if attempt < max_retries - 1:
115
  time.sleep(2 ** attempt) # Exponential backoff
116
  else:
117
  raise Exception(f"API call failed after {max_retries} attempts: {str(e)}")
118
- except APIStatusError as e:
119
- raise Exception(f"API call failed with status {e.status_code}: {str(e)}")
120
 
121
- def process_file(test_case, cache, fixed_files, model_name):
122
  file_name = test_case["file_name"]
123
  input_file = os.path.join("staticeval", file_name)
124
 
125
- if input_file in cache:
126
  tqdm.write(f"Skipping {input_file} (cached)")
127
  return cache[input_file]
128
 
129
  file_text = test_case["source"]
130
- test_cwe = test_case["cwe"].strip()
131
  output_file = input_file + "_fixed.py"
132
  tmp_file = input_file + ".output.json"
133
 
@@ -173,7 +167,7 @@ def process_file(test_case, cache, fixed_files, model_name):
173
 
174
  Task: Fix the vulnerability in the code above. Provide only the complete fixed code without explanations or comments. Make minimal changes necessary to address the security issue while preserving the original functionality."""
175
 
176
- few_shot_messages = fetch_dataset_examples(prompt, 8, True)
177
  response = get_fixed_code_fine_tuned(prompt, few_shot_messages, model_name)
178
 
179
  if "```python" in response:
@@ -184,61 +178,65 @@ def process_file(test_case, cache, fixed_files, model_name):
184
  fixed_code = response
185
 
186
  stop_words = ["```", "assistant"]
 
187
  for w in stop_words:
188
  if w in fixed_code:
189
  fixed_code = fixed_code[:fixed_code.find(w)]
190
-
191
- if len(fixed_code) < 400 or all(line.strip().startswith("#") for line in fixed_code.split('\n') if line.strip()):
192
  result = False
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
193
  else:
194
- with open(output_file, 'w') as wf:
195
- wf.write(fixed_code)
196
-
197
- scan_command_output = f"semgrep --config p/python {output_file} --output {tmp_file} --json > /dev/null 2>&1"
198
- os.system(scan_command_output)
199
-
200
- if not os.path.exists(tmp_file):
201
- tqdm.write(f"Semgrep failed to create output file for {output_file}")
202
- result = False
203
- else:
204
- with open(tmp_file, 'r') as jf:
205
- data = json.load(jf)
206
-
207
- if len(data.get("errors", [])) == 0 and len(data.get("results", [])) == 0:
208
- tqdm.write("Passing response for " + input_file + " at 1 ...")
209
- result = True
210
- fixed_files.append(file_name)
211
- else:
212
- result = False
213
  else:
214
  tqdm.write(f"Semgrep reported errors for {input_file}")
215
  result = False
216
 
217
  if os.path.exists(tmp_file):
218
  os.remove(tmp_file)
 
 
219
 
220
- cache[input_file] = result
221
- save_cache(cache)
222
  return result
223
  except Exception as e:
224
  tqdm.write(f"Error processing {input_file}: {str(e)}")
225
  return False
226
 
227
- def process_test_case(test_case, cache, fixed_files, model_name):
228
- return process_file(test_case, cache, fixed_files, model_name)
229
 
230
  def main():
231
  parser = argparse.ArgumentParser(description="Run Static Analysis Evaluation")
232
- parser.add_argument("--model", type=str, default="gpt-4-0125-preview", help="OpenAI model to use")
 
 
 
233
  args = parser.parse_args()
234
 
235
  model_name = args.model
236
- sanitized_model_name = sanitize_filename(model_name)
 
 
 
237
 
238
  dataset = load_dataset("patched-codes/static-analysis-eval", split="train")
239
  data = [{"file_name": item["file_name"], "source": item["source"], "cwe": item["cwe"]} for item in dataset]
240
 
241
- cache = load_cache()
242
  total_tests = len(data)
243
 
244
  semgrep_version = get_semgrep_version()
@@ -248,23 +246,29 @@ def main():
248
  manager = multiprocessing.Manager()
249
  fixed_files = manager.list()
250
 
251
- process_func = partial(process_test_case, cache=cache, fixed_files=fixed_files, model_name=model_name)
252
 
253
- with multiprocessing.Pool() as pool:
254
- results = list(tqdm(pool.imap_unordered(process_func, data), total=total_tests))
255
 
256
  passing_tests = sum(results)
257
  score = passing_tests / total_tests * 100
258
 
 
 
 
259
  with open(log_file_name, 'w') as log_file:
260
  log_file.write(f"Evaluation Run Log\n")
261
  log_file.write(f"==================\n\n")
262
  log_file.write(f"Date and Time: {datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')}\n")
263
  log_file.write(f"Model: {model_name}\n")
264
- log_file.write(f"Semgrep Version: {semgrep_version}\n\n")
 
265
  log_file.write(f"Total Tests: {total_tests}\n")
266
  log_file.write(f"Passing Tests: {passing_tests}\n")
267
  log_file.write(f"Score: {score:.2f}%\n\n")
 
 
268
  log_file.write("Fixed Files:\n")
269
  for file in fixed_files:
270
  log_file.write(f"- {file}\n")
 
8
  import argparse
9
  import re
10
  from openai import OpenAI
11
+ from openai import OpenAIError
 
12
  from tqdm import tqdm
13
  from functools import partial
14
  import multiprocessing
 
19
 
20
  client = OpenAI()
21
 
22
+ def load_cache(use_cache):
23
+ if use_cache and os.path.exists('cache.pkl'):
24
  with open('cache.pkl', 'rb') as f:
25
  return pickle.load(f)
26
  return {}
27
 
28
+ def save_cache(cache, use_cache):
29
+ if use_cache:
30
+ with open('cache.pkl', 'wb') as f:
31
+ pickle.dump(cache, f)
32
 
33
  def has_all_comments(text):
34
  lines=text.split('\n')
 
37
  return False
38
  return True
39
 
40
+ def fetch_dataset_examples(prompt, num_examples=0, use_similarity=False):
41
  dataset = load_dataset("patched-codes/synth-vuln-fixes", split="train")
42
 
43
  if use_similarity:
 
106
  temperature=0.2,
107
  top_p=0.95
108
  )
109
+ return response.choices[0].message.content
110
+ except OpenAIError as e:
 
 
 
111
  if attempt < max_retries - 1:
112
  time.sleep(2 ** attempt) # Exponential backoff
113
  else:
114
  raise Exception(f"API call failed after {max_retries} attempts: {str(e)}")
 
 
115
 
116
+ def process_file(test_case, cache, fixed_files, model_name, use_cache, n_shot, use_similarity):
117
  file_name = test_case["file_name"]
118
  input_file = os.path.join("staticeval", file_name)
119
 
120
+ if use_cache and input_file in cache:
121
  tqdm.write(f"Skipping {input_file} (cached)")
122
  return cache[input_file]
123
 
124
  file_text = test_case["source"]
 
125
  output_file = input_file + "_fixed.py"
126
  tmp_file = input_file + ".output.json"
127
 
 
167
 
168
  Task: Fix the vulnerability in the code above. Provide only the complete fixed code without explanations or comments. Make minimal changes necessary to address the security issue while preserving the original functionality."""
169
 
170
+ few_shot_messages = fetch_dataset_examples(prompt, n_shot, use_similarity)
171
  response = get_fixed_code_fine_tuned(prompt, few_shot_messages, model_name)
172
 
173
  if "```python" in response:
 
178
  fixed_code = response
179
 
180
  stop_words = ["```", "assistant"]
181
+
182
  for w in stop_words:
183
  if w in fixed_code:
184
  fixed_code = fixed_code[:fixed_code.find(w)]
185
+ if len(fixed_code) < 400:
 
186
  result = False
187
+ if has_all_comments(fixed_code):
188
+ result = False
189
+ if os.path.exists(output_file):
190
+ os.remove(output_file)
191
+ with open(output_file, 'w') as wf:
192
+ wf.write(fixed_code)
193
+ if os.path.exists(tmp_file):
194
+ os.remove(tmp_file)
195
+ scan_command_output = f"semgrep --config p/python {output_file} --output {tmp_file} --json > /dev/null 2>&1"
196
+ os.system(scan_command_output)
197
+ with open(tmp_file, 'r') as jf:
198
+ data = json.load(jf)
199
+ if len(data["errors"]) == 0 and len(data["results"]) == 0:
200
+ tqdm.write("Passing response for " + input_file + " at 1 ...")
201
+ result = True
202
+ fixed_files.append(file_name)
203
  else:
204
+ result = False
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
205
  else:
206
  tqdm.write(f"Semgrep reported errors for {input_file}")
207
  result = False
208
 
209
  if os.path.exists(tmp_file):
210
  os.remove(tmp_file)
211
+ if use_cache:
212
+ cache[input_file] = result
213
 
 
 
214
  return result
215
  except Exception as e:
216
  tqdm.write(f"Error processing {input_file}: {str(e)}")
217
  return False
218
 
219
+ def process_test_case(test_case, cache, fixed_files, model_name, use_cache, n_shot, use_similarity):
220
+ return process_file(test_case, cache, fixed_files, model_name, use_cache, n_shot, use_similarity)
221
 
222
  def main():
223
  parser = argparse.ArgumentParser(description="Run Static Analysis Evaluation")
224
+ parser.add_argument("--model", type=str, default="gpt-4o-mini", help="OpenAI model to use")
225
+ parser.add_argument("--cache", action="store_true", help="Enable caching of results")
226
+ parser.add_argument("--n_shot", type=int, default=0, help="Number of examples to use for few-shot learning")
227
+ parser.add_argument("--use_similarity", action="store_true", help="Use similarity for fetching dataset examples")
228
  args = parser.parse_args()
229
 
230
  model_name = args.model
231
+ use_cache = args.cache
232
+ n_shot = args.n_shot
233
+ use_similarity = args.use_similarity
234
+ sanitized_model_name = f"{sanitize_filename(model_name)}-{n_shot}-shot{'-sim' if use_similarity else ''}"
235
 
236
  dataset = load_dataset("patched-codes/static-analysis-eval", split="train")
237
  data = [{"file_name": item["file_name"], "source": item["source"], "cwe": item["cwe"]} for item in dataset]
238
 
239
+ cache = load_cache(use_cache)
240
  total_tests = len(data)
241
 
242
  semgrep_version = get_semgrep_version()
 
246
  manager = multiprocessing.Manager()
247
  fixed_files = manager.list()
248
 
249
+ process_func = partial(process_test_case, cache=cache, fixed_files=fixed_files, model_name=model_name, use_cache=use_cache, n_shot=n_shot, use_similarity=use_similarity)
250
 
251
+ with multiprocessing.Pool(processes=4) as pool:
252
+ results = list(tqdm(pool.imap(process_func, data), total=total_tests))
253
 
254
  passing_tests = sum(results)
255
  score = passing_tests / total_tests * 100
256
 
257
+ if use_cache:
258
+ save_cache(cache, use_cache)
259
+
260
  with open(log_file_name, 'w') as log_file:
261
  log_file.write(f"Evaluation Run Log\n")
262
  log_file.write(f"==================\n\n")
263
  log_file.write(f"Date and Time: {datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')}\n")
264
  log_file.write(f"Model: {model_name}\n")
265
+ log_file.write(f"Semgrep Version: {semgrep_version}\n")
266
+ log_file.write(f"Caching: {'Enabled' if use_cache else 'Disabled'}\n\n")
267
  log_file.write(f"Total Tests: {total_tests}\n")
268
  log_file.write(f"Passing Tests: {passing_tests}\n")
269
  log_file.write(f"Score: {score:.2f}%\n\n")
270
+ log_file.write(f"Number of few-shot examples: {n_shot}\n")
271
+ log_file.write(f"Use similarity for examples: {'Yes' if use_similarity else 'No'}\n")
272
  log_file.write("Fixed Files:\n")
273
  for file in fixed_files:
274
  log_file.write(f"- {file}\n")