Datasets:

Modalities:
Text
Formats:
parquet
Size:
< 1K
ArXiv:
DOI:
Libraries:
Datasets
pandas
License:
codelion commited on
Commit
5eb5743
1 Parent(s): dcc5d31

Upload 2 files

Browse files
Files changed (2) hide show
  1. _script_for_eval.py +62 -54
  2. _script_for_gen.py +2 -4
_script_for_eval.py CHANGED
@@ -17,6 +17,7 @@ from datasets import load_dataset
17
  from sentence_transformers import SentenceTransformer, CrossEncoder
18
  from sklearn.metrics.pairwise import cosine_similarity
19
 
 
20
  client = OpenAI()
21
 
22
  def load_cache(use_cache):
@@ -116,12 +117,13 @@ def get_fixed_code_fine_tuned(prompt, few_shot_messages, model_name):
116
  messages.append({"role": "user", "content": prompt})
117
 
118
  max_retries = 3
 
119
  for attempt in range(max_retries):
120
  try:
121
  response = client.chat.completions.create(
122
  model=model_name,
123
  messages=messages,
124
- max_tokens=512,
125
  temperature=0.2,
126
  top_p=0.95
127
  )
@@ -131,6 +133,20 @@ def get_fixed_code_fine_tuned(prompt, few_shot_messages, model_name):
131
  time.sleep(2 ** attempt) # Exponential backoff
132
  else:
133
  raise Exception(f"API call failed after {max_retries} attempts: {str(e)}")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
134
 
135
  def process_file(test_case, cache, fixed_files, model_name, use_cache, n_shot, use_similarity, oracle_mode):
136
  file_name = test_case["file_name"]
@@ -154,7 +170,7 @@ def process_file(test_case, cache, fixed_files, model_name, use_cache, n_shot, u
154
  os.remove(tmp_file)
155
 
156
  tqdm.write("Scanning file " + input_file + "...")
157
- scan_command_input = f"semgrep --config p/python {input_file} --output {tmp_file} --json > /dev/null 2>&1"
158
  os.system(scan_command_input)
159
 
160
  if not os.path.exists(tmp_file):
@@ -163,71 +179,62 @@ def process_file(test_case, cache, fixed_files, model_name, use_cache, n_shot, u
163
 
164
  with open(tmp_file, 'r') as jf:
165
  data = json.load(jf)
166
-
167
- if len(data.get("errors", [])) == 0:
168
- if len(data.get("results", [])) == 0:
169
- tqdm.write(input_file + " has no vulnerabilities")
170
- result = False
 
 
 
 
 
 
 
 
171
  else:
172
- tqdm.write("Vulnerability found in " + input_file + "...")
173
- if oracle_mode:
174
- result = True
175
- fixed_files.append(file_name)
176
- else:
177
- cwe = data["results"][0]["extra"]["metadata"]["cwe"][0]
178
- lines = data["results"][0]["extra"]["lines"]
179
- message = data["results"][0]["extra"]["message"]
180
-
181
- prompt = f"""Vulnerability Report:
182
- - Type: {cwe}
183
- - Location: {lines}
184
- - Description: {message}
185
-
186
- Original Code:
187
- ```
188
- {file_text}
189
- ```
190
-
191
- Task: Fix the vulnerability in the code above. Provide only the complete fixed code without explanations or comments. Make minimal changes necessary to address the security issue while preserving the original functionality."""
192
-
193
- few_shot_messages = fetch_dataset_examples(prompt, n_shot, use_similarity)
194
- response = get_fixed_code_fine_tuned(prompt, few_shot_messages, model_name)
195
-
196
- if "```python" in response:
197
- idx = response.find("```python")
198
- shift = len("```python")
199
- fixed_code = response[idx + shift :]
200
- else:
201
- fixed_code = response
202
-
203
- stop_words = ["```", "assistant"]
204
-
205
- for w in stop_words:
206
- if w in fixed_code:
207
- fixed_code = fixed_code[:fixed_code.find(w)]
208
- if len(fixed_code) < 400:
209
- result = False
210
- if has_all_comments(fixed_code):
211
- result = False
212
  if os.path.exists(output_file):
213
  os.remove(output_file)
214
  with open(output_file, 'w') as wf:
215
  wf.write(fixed_code)
216
  if os.path.exists(tmp_file):
217
  os.remove(tmp_file)
218
- scan_command_output = f"semgrep --config p/python {output_file} --output {tmp_file} --json > /dev/null 2>&1"
219
  os.system(scan_command_output)
220
  with open(tmp_file, 'r') as jf:
221
  data = json.load(jf)
222
- if len(data["errors"]) == 0 and len(data["results"]) == 0:
223
  tqdm.write("Passing response for " + input_file + " at 1 ...")
224
  result = True
225
  fixed_files.append(file_name)
226
  else:
227
  result = False
228
- else:
229
- tqdm.write(f"Semgrep reported errors for {input_file}")
230
- result = False
231
 
232
  if os.path.exists(tmp_file):
233
  os.remove(tmp_file)
@@ -258,7 +265,7 @@ def main():
258
  oracle_mode = args.oracle
259
  sanitized_model_name = f"{sanitize_filename(model_name)}-{n_shot}-shot{'-sim' if use_similarity else ''}"
260
 
261
- dataset = load_dataset("patched-codes/static-analysis-eval", split="train")
262
  data = [{"file_name": item["file_name"], "source": item["source"], "cwe": item["cwe"]} for item in dataset]
263
 
264
  cache = load_cache(use_cache)
@@ -271,7 +278,8 @@ def main():
271
  manager = multiprocessing.Manager()
272
  fixed_files = manager.list()
273
 
274
- process_func = partial(process_test_case, cache=cache, fixed_files=fixed_files, model_name=model_name, use_cache=use_cache, n_shot=n_shot, use_similarity=use_similarity, oracle_mode=oracle_mode)
 
275
 
276
  with multiprocessing.Pool(processes=4) as pool:
277
  results = list(tqdm(pool.imap(process_func, data), total=total_tests))
 
17
  from sentence_transformers import SentenceTransformer, CrossEncoder
18
  from sklearn.metrics.pairwise import cosine_similarity
19
 
20
+ # client = OpenAI(base_url="http://localhost:11434/v1/", api_key="ollama")
21
  client = OpenAI()
22
 
23
  def load_cache(use_cache):
 
117
  messages.append({"role": "user", "content": prompt})
118
 
119
  max_retries = 3
120
+
121
  for attempt in range(max_retries):
122
  try:
123
  response = client.chat.completions.create(
124
  model=model_name,
125
  messages=messages,
126
+ max_tokens=4096,
127
  temperature=0.2,
128
  top_p=0.95
129
  )
 
133
  time.sleep(2 ** attempt) # Exponential backoff
134
  else:
135
  raise Exception(f"API call failed after {max_retries} attempts: {str(e)}")
136
+
137
+ def clean_code_snippet(response):
138
+ # Remove opening delimiter
139
+ if response.startswith("```python"):
140
+ response = response[len("```python"):]
141
+ elif response.startswith("```"):
142
+ response = response[len("```"):]
143
+
144
+ # Remove closing delimiter
145
+ if response.endswith("```"):
146
+ response = response[:-len("```")]
147
+
148
+ # Trim any remaining whitespace
149
+ return response.strip()
150
 
151
  def process_file(test_case, cache, fixed_files, model_name, use_cache, n_shot, use_similarity, oracle_mode):
152
  file_name = test_case["file_name"]
 
170
  os.remove(tmp_file)
171
 
172
  tqdm.write("Scanning file " + input_file + "...")
173
+ scan_command_input = f"semgrep --config auto {input_file} --output {tmp_file} --json > /dev/null 2>&1"
174
  os.system(scan_command_input)
175
 
176
  if not os.path.exists(tmp_file):
 
179
 
180
  with open(tmp_file, 'r') as jf:
181
  data = json.load(jf)
182
+
183
+ if len(data.get("errors", [])) > 0:
184
+ tqdm.write(f"Error processing {input_file} ...")
185
+ return False
186
+
187
+ if len(data.get("results", [])) == 0:
188
+ tqdm.write(input_file + " has no vulnerabilities")
189
+ result = False
190
+ else:
191
+ tqdm.write("Vulnerability found in " + input_file + "...")
192
+ if oracle_mode:
193
+ result = True
194
+ fixed_files.append(file_name)
195
  else:
196
+ cwe = test_case['cwe']
197
+ lines = data["results"][0]["extra"]["lines"]
198
+ message = data["results"][0]["extra"]["message"]
199
+
200
+ prompt = f"""Vulnerability Report:
201
+ - Type: {cwe}
202
+ - Location: {lines}
203
+ - Description: {message}
204
+
205
+ Original Code:
206
+ ```
207
+ {file_text}
208
+ ```
209
+
210
+ Task: Fix the vulnerability in the code above. Provide only the complete fixed code without explanations or comments. Make minimal changes necessary to address the security issue while preserving the original functionality."""
211
+ # print(prompt)
212
+ few_shot_messages = fetch_dataset_examples(prompt, n_shot, use_similarity)
213
+ response = get_fixed_code_fine_tuned(prompt, few_shot_messages, model_name)
214
+ # print(response)
215
+
216
+ fixed_code = clean_code_snippet(response)
217
+
218
+ if len(fixed_code) < 512 or has_all_comments(fixed_code):
219
+ result = False
220
+ else:
221
+ # print("Here2\n" + fixed_code)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
222
  if os.path.exists(output_file):
223
  os.remove(output_file)
224
  with open(output_file, 'w') as wf:
225
  wf.write(fixed_code)
226
  if os.path.exists(tmp_file):
227
  os.remove(tmp_file)
228
+ scan_command_output = f"semgrep --config auto {output_file} --output {tmp_file} --json > /dev/null 2>&1"
229
  os.system(scan_command_output)
230
  with open(tmp_file, 'r') as jf:
231
  data = json.load(jf)
232
+ if len(data["results"]) == 0:
233
  tqdm.write("Passing response for " + input_file + " at 1 ...")
234
  result = True
235
  fixed_files.append(file_name)
236
  else:
237
  result = False
 
 
 
238
 
239
  if os.path.exists(tmp_file):
240
  os.remove(tmp_file)
 
265
  oracle_mode = args.oracle
266
  sanitized_model_name = f"{sanitize_filename(model_name)}-{n_shot}-shot{'-sim' if use_similarity else ''}"
267
 
268
+ dataset = load_dataset("patched-codes/static-analysis-eval", split="train", download_mode='force_redownload')
269
  data = [{"file_name": item["file_name"], "source": item["source"], "cwe": item["cwe"]} for item in dataset]
270
 
271
  cache = load_cache(use_cache)
 
278
  manager = multiprocessing.Manager()
279
  fixed_files = manager.list()
280
 
281
+ process_func = partial(process_test_case, cache=cache, fixed_files=fixed_files, model_name=model_name,
282
+ use_cache=use_cache, n_shot=n_shot, use_similarity=use_similarity, oracle_mode=oracle_mode)
283
 
284
  with multiprocessing.Pool(processes=4) as pool:
285
  results = list(tqdm(pool.imap(process_func, data), total=total_tests))
_script_for_gen.py CHANGED
@@ -160,10 +160,8 @@ def merge_and_push_dataset(jsonl_file, new_dataset_name):
160
  logging.info("Creating dataset")
161
  try:
162
  dataset = Dataset.from_list(preprocessed_data)
163
- except pa.lib.ArrowInvalid as e:
164
  logging.error(f"Error creating dataset: {str(e)}")
165
- logging.info("Attempting to create dataset with type inference disabled")
166
- dataset = Dataset.from_list(preprocessed_data, features=pa.schema([]))
167
 
168
  # Push the dataset to the new repository
169
  logging.info(f"Pushing dataset with {len(dataset)} records to Hugging Face")
@@ -178,7 +176,7 @@ def main():
178
 
179
  if args.push_to_dataset:
180
  # Merge and push the dataset
181
- jsonl_file = "output.jsonl"
182
  merge_and_push_dataset(jsonl_file, args.push_to_dataset)
183
  else:
184
  # Perform the regular dataset extension process
 
160
  logging.info("Creating dataset")
161
  try:
162
  dataset = Dataset.from_list(preprocessed_data)
163
+ except Exception as e:
164
  logging.error(f"Error creating dataset: {str(e)}")
 
 
165
 
166
  # Push the dataset to the new repository
167
  logging.info(f"Pushing dataset with {len(dataset)} records to Hugging Face")
 
176
 
177
  if args.push_to_dataset:
178
  # Merge and push the dataset
179
+ jsonl_file = "static_analysis_eval.jsonl"
180
  merge_and_push_dataset(jsonl_file, args.push_to_dataset)
181
  else:
182
  # Perform the regular dataset extension process