Commit
•
17ab974
1
Parent(s):
7345094
add LC_Code (#3)
Browse files- add LC_Code (63a230165e0d4eb5706ab0a3f13c1ae5c6df4e59)
Co-authored-by: Yeeef <[email protected]>
- LC_Code.py +0 -2
- LC_CodeCollab.py +6 -0
- LC_CodeCollab.yaml +71 -0
- LC_CodeCritic.py +7 -0
- LC_CodeCritic.yaml +77 -0
- LC_CodeCriticWrongAttempt.py +6 -0
- LC_CodeCriticWrongAttempt.yaml +89 -0
- LC_CodeCriticWrongAttemptWithPlan.py +6 -0
- LC_CodeCriticWrongAttemptWithPlan.yaml +97 -0
- LC_CodeDebug.py +12 -0
- LC_CodeDebug.yaml +64 -0
- LC_CodeDebugCollab.py +12 -0
- LC_CodeDebugCollab.yaml +68 -0
- LC_CodeDebugCollabWithPlan.py +12 -0
- LC_CodeDebugCollabWithPlan.yaml +68 -0
- LC_CodeDebugCritic.py +13 -0
- LC_CodeDebugCritic.yaml +31 -0
- LC_CodeDebugCriticWithPlan.py +13 -0
- LC_CodeDebugCriticWithPlan.yaml +34 -0
- LC_CodeReflect.py +6 -0
- LC_CodeReflect.yaml +49 -0
- LC_CodeTesting.py +33 -0
- LC_CodeTesting.yaml +90 -0
- LC_CodeWithPlan.py +6 -0
- LC_CodeWithPlan.yaml +92 -0
- LC_Plan.py +6 -0
- LC_Plan.yaml +80 -0
- LC_PlanCollab.py +6 -0
- LC_PlanCollab.yaml +69 -0
- LC_PlanCollab_Code.py +6 -0
- LC_PlanCollab_Code.yaml +22 -0
- LC_PlanCritic.py +6 -0
- LC_PlanCritic.yaml +77 -0
- LC_PlanReflect.py +6 -0
- LC_PlanReflect.yaml +63 -0
- LC_PlanReflect_Code.py +6 -0
- LC_PlanReflect_Code.yaml +15 -0
- LC_Plan_Code.py +6 -0
- LC_Plan_Code.yaml +26 -0
- __init__.py +43 -1
- src/evaluation/testing_utils_codeforces.py +1 -1
- src/evaluation/testing_utils_leetcode.py +258 -0
LC_Code.py
CHANGED
@@ -1,5 +1,3 @@
|
|
1 |
-
import os
|
2 |
-
|
3 |
from martinjosifoski.OpenAIChatAtomicFlow import OpenAIChatAtomicFlow
|
4 |
|
5 |
|
|
|
|
|
|
|
1 |
from martinjosifoski.OpenAIChatAtomicFlow import OpenAIChatAtomicFlow
|
2 |
|
3 |
|
LC_CodeCollab.py
ADDED
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from flows.base_flows import GeneratorCriticFlow
|
2 |
+
|
3 |
+
|
4 |
+
class LC_CodeCollab(GeneratorCriticFlow):
|
5 |
+
def __init__(self, **kwargs):
|
6 |
+
super().__init__(**kwargs)
|
LC_CodeCollab.yaml
ADDED
@@ -0,0 +1,71 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
name: "CodeCollab_Flow"
|
2 |
+
verbose: True
|
3 |
+
description: "ToDO: add description"
|
4 |
+
|
5 |
+
reset_generator_every_round: False
|
6 |
+
reset_critic_every_round: True
|
7 |
+
max_rounds: 2 # ToDO: To increase to 4
|
8 |
+
early_exit_key: "end_of_interaction"
|
9 |
+
|
10 |
+
input_data_transformations: []
|
11 |
+
input_keys:
|
12 |
+
- "problem_description"
|
13 |
+
- "io_description"
|
14 |
+
- "constraints"
|
15 |
+
- "python_stub"
|
16 |
+
|
17 |
+
output_data_transformations:
|
18 |
+
- _target_: flows.data_transformations.KeyRename
|
19 |
+
old_key2new_key:
|
20 |
+
raw_response.code: "code"
|
21 |
+
output_keys:
|
22 |
+
- "code"
|
23 |
+
|
24 |
+
subflows_config:
|
25 |
+
- _target_: martinjosifoski.CC_flows.LC_Code.instantiate_from_default_config
|
26 |
+
overrides:
|
27 |
+
name: "CodeGenerator"
|
28 |
+
model_name: "gpt-4"
|
29 |
+
human_message_prompt_template:
|
30 |
+
_target_: langchain.PromptTemplate
|
31 |
+
template: |2-
|
32 |
+
# Feedback on the last proposed solution
|
33 |
+
{{code_feedback}}
|
34 |
+
|
35 |
+
|
36 |
+
Consider the original problem statement, the last proposed solution and the provided feedback. Does the solution need to be updated? If so, provide the corrected version of the code in the following format:
|
37 |
+
```python
|
38 |
+
{{code_placeholder}}
|
39 |
+
```
|
40 |
+
otherwise, reply:
|
41 |
+
"Final answer."
|
42 |
+
input_variables:
|
43 |
+
- code_feedback
|
44 |
+
partial_variables:
|
45 |
+
code_placeholder: "{{python_code}}"
|
46 |
+
template_format: jinja2
|
47 |
+
default_human_input_keys:
|
48 |
+
- "code_feedback"
|
49 |
+
output_data_transformations:
|
50 |
+
- _target_: flows.data_transformations.RegexFirstOccurrenceExtractor
|
51 |
+
regex: '(?<=```python)([\s\S]*?)(?=```)'
|
52 |
+
regex_fallback: '(?<=```)([\s\S]*?)(?=```)'
|
53 |
+
input_key: "raw_response"
|
54 |
+
output_key: "code"
|
55 |
+
strip: True
|
56 |
+
assert_unique: True
|
57 |
+
verbose: True
|
58 |
+
- _target_: flows.data_transformations.EndOfInteraction
|
59 |
+
end_of_interaction_string: "Final answer"
|
60 |
+
output_key: "end_of_interaction"
|
61 |
+
verbose: True
|
62 |
+
output_keys:
|
63 |
+
- "code"
|
64 |
+
- "end_of_interaction"
|
65 |
+
- _target_: martinjosifoski.CC_flows.LC_CodeCritic.instantiate_from_default_config
|
66 |
+
overrides:
|
67 |
+
name: CodeCritic
|
68 |
+
output_data_transformations:
|
69 |
+
- _target_: flows.data_transformations.KeyRename
|
70 |
+
old_key2new_key:
|
71 |
+
raw_response: "code_feedback"
|
LC_CodeCritic.py
ADDED
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from martinjosifoski.OpenAIChatAtomicFlow import OpenAIChatAtomicFlow
|
2 |
+
|
3 |
+
|
4 |
+
|
5 |
+
class LC_CodeCritic(OpenAIChatAtomicFlow):
|
6 |
+
def __init__(self, **kwargs):
|
7 |
+
super().__init__(**kwargs)
|
LC_CodeCritic.yaml
ADDED
@@ -0,0 +1,77 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
name: "CodeCritic_Flow"
|
2 |
+
verbose: True
|
3 |
+
description: "ToDO: add description"
|
4 |
+
|
5 |
+
model_name: "gpt-4"
|
6 |
+
generation_parameters:
|
7 |
+
n: 1
|
8 |
+
max_tokens: 3000
|
9 |
+
temperature: 0.3
|
10 |
+
|
11 |
+
model_kwargs:
|
12 |
+
top_p: 0.2
|
13 |
+
frequency_penalty: 0
|
14 |
+
presence_penalty: 0
|
15 |
+
|
16 |
+
system_message_prompt_template:
|
17 |
+
_target_: langchain.PromptTemplate
|
18 |
+
template: |2-
|
19 |
+
Your goal is to identify potential issues with a competitive programming solution attempt.
|
20 |
+
|
21 |
+
The user will specify the problem by providing you with:
|
22 |
+
- the problem statement
|
23 |
+
- example test cases
|
24 |
+
- the constraints of the problem
|
25 |
+
- (optional) explanation of the test cases
|
26 |
+
- a Python solution attempt
|
27 |
+
|
28 |
+
Crucially, your goal is to correctly identify potential issues with the solution attempt, and not to provide the code implementation yourself.
|
29 |
+
The user will provide you with a task and an output format that you will strictly follow.
|
30 |
+
input_variables: []
|
31 |
+
template_format: jinja2
|
32 |
+
|
33 |
+
human_message_prompt_template:
|
34 |
+
_target_: langchain.PromptTemplate
|
35 |
+
template: "{{query}}"
|
36 |
+
input_variables:
|
37 |
+
- "query"
|
38 |
+
template_format: jinja2
|
39 |
+
|
40 |
+
query_message_prompt_template:
|
41 |
+
_target_: langchain.PromptTemplate
|
42 |
+
template: |2-
|
43 |
+
# Problem statement
|
44 |
+
{{problem_description}}
|
45 |
+
|
46 |
+
{{io_description}}
|
47 |
+
|
48 |
+
# Constraints
|
49 |
+
{{constraints}}
|
50 |
+
|
51 |
+
# code stub
|
52 |
+
```python
|
53 |
+
{{python_stub}}
|
54 |
+
```
|
55 |
+
|
56 |
+
# Python solution attempt
|
57 |
+
```python
|
58 |
+
{{code}}
|
59 |
+
```
|
60 |
+
|
61 |
+
|
62 |
+
Consider the problem statement and the solution attempt. Are there any issues with the proposed solution or it is correct? Explain your reasoning very concisely, and do not provide code.
|
63 |
+
input_variables:
|
64 |
+
- "problem_description"
|
65 |
+
- "io_description"
|
66 |
+
- "constraints"
|
67 |
+
- "python_stub"
|
68 |
+
- "code"
|
69 |
+
template_format: jinja2
|
70 |
+
|
71 |
+
input_keys:
|
72 |
+
- "problem_description"
|
73 |
+
- "io_description"
|
74 |
+
- "constraints"
|
75 |
+
- "python_stub"
|
76 |
+
- "code"
|
77 |
+
output_keys: []
|
LC_CodeCriticWrongAttempt.py
ADDED
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from martinjosifoski.OpenAIChatAtomicFlow import OpenAIChatAtomicFlow
|
2 |
+
|
3 |
+
|
4 |
+
class LC_CodeCriticWrongAttempt(OpenAIChatAtomicFlow):
|
5 |
+
def __init__(self, **kwargs):
|
6 |
+
super().__init__(**kwargs)
|
LC_CodeCriticWrongAttempt.yaml
ADDED
@@ -0,0 +1,89 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
name: "CodeCriticWrongAttempt_Flow"
|
2 |
+
verbose: True
|
3 |
+
description: "ToDO: add description"
|
4 |
+
|
5 |
+
model_name: "gpt-4"
|
6 |
+
generation_parameters:
|
7 |
+
n: 1
|
8 |
+
max_tokens: 3000
|
9 |
+
temperature: 0.3
|
10 |
+
|
11 |
+
model_kwargs:
|
12 |
+
top_p: 0.2
|
13 |
+
frequency_penalty: 0
|
14 |
+
presence_penalty: 0
|
15 |
+
|
16 |
+
system_message_prompt_template:
|
17 |
+
_target_: langchain.PromptTemplate
|
18 |
+
template: |2-
|
19 |
+
Your goal is to identify the issues with an incorrect competitive programming solution attempt.
|
20 |
+
|
21 |
+
The user will specify the problem by providing you with:
|
22 |
+
- the problem statement
|
23 |
+
- example test cases
|
24 |
+
- the constraints of the problem
|
25 |
+
- python stub code
|
26 |
+
- an incorrect Python solution attempt and a description of its issue
|
27 |
+
|
28 |
+
Crucially, your goal is to consider all aspects of the problem and pinpoint the issues with the solution attempt, and not to provide the code implementation yourself.
|
29 |
+
Some aspects to consider: Is the input correctly parsed? Is the output correctly formatted? Are the corner cases correctly handled? Is there a logical mistake with the algorithm itself?
|
30 |
+
Use the code execution results provided in the issue description to guide your reasoning/debugging.
|
31 |
+
input_variables: []
|
32 |
+
template_format: jinja2
|
33 |
+
|
34 |
+
human_message_prompt_template:
|
35 |
+
_target_: langchain.PromptTemplate
|
36 |
+
template: "{{query}}"
|
37 |
+
input_variables:
|
38 |
+
- "query"
|
39 |
+
template_format: jinja2
|
40 |
+
|
41 |
+
query_message_prompt_template:
|
42 |
+
_target_: langchain.PromptTemplate
|
43 |
+
template: |2-
|
44 |
+
# Problem statement
|
45 |
+
{{problem_description}}
|
46 |
+
|
47 |
+
{{io_description}}
|
48 |
+
|
49 |
+
# Constraints
|
50 |
+
{{constraints}}
|
51 |
+
|
52 |
+
The code should extend the following stub:
|
53 |
+
```python
|
54 |
+
{{python_stub}}
|
55 |
+
```
|
56 |
+
|
57 |
+
# Solution attempt to be fixed
|
58 |
+
```python
|
59 |
+
{{code}}
|
60 |
+
```
|
61 |
+
|
62 |
+
{{testing_results_summary}}
|
63 |
+
|
64 |
+
|
65 |
+
Consider the problem statement, the solution attempt and the issue. Why is the solution attempt incorrect? How should it be fixed? Explain your reasoning very concisely, and do not provide code.
|
66 |
+
input_variables:
|
67 |
+
- "problem_description"
|
68 |
+
- "io_description"
|
69 |
+
- "constraints"
|
70 |
+
- "python_stub"
|
71 |
+
- "code"
|
72 |
+
- "testing_results_summary"
|
73 |
+
template_format: jinja2
|
74 |
+
|
75 |
+
input_data_transformations: []
|
76 |
+
input_keys:
|
77 |
+
- "problem_description"
|
78 |
+
- "io_description"
|
79 |
+
- "constraints"
|
80 |
+
- "python_stub"
|
81 |
+
- "testing_results_summary"
|
82 |
+
- "code"
|
83 |
+
|
84 |
+
output_data_transformations:
|
85 |
+
- _target_: flows.data_transformations.KeyRename
|
86 |
+
old_key2new_key:
|
87 |
+
raw_response: "code_feedback"
|
88 |
+
output_keys:
|
89 |
+
- "code_feedback"
|
LC_CodeCriticWrongAttemptWithPlan.py
ADDED
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from martinjosifoski.OpenAIChatAtomicFlow import OpenAIChatAtomicFlow
|
2 |
+
|
3 |
+
|
4 |
+
class LC_CodeCriticWrongAttemptWithPlan(OpenAIChatAtomicFlow):
|
5 |
+
def __init__(self, **kwargs):
|
6 |
+
super().__init__(**kwargs)
|
LC_CodeCriticWrongAttemptWithPlan.yaml
ADDED
@@ -0,0 +1,97 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
name: "CodeCriticWrongAttemptWithPlan_Flow"
|
2 |
+
verbose: True
|
3 |
+
description: "ToDO: add description"
|
4 |
+
|
5 |
+
model_name: "gpt-4"
|
6 |
+
generation_parameters:
|
7 |
+
n: 1
|
8 |
+
max_tokens: 3000
|
9 |
+
temperature: 0.3
|
10 |
+
|
11 |
+
model_kwargs:
|
12 |
+
top_p: 0.2
|
13 |
+
frequency_penalty: 0
|
14 |
+
presence_penalty: 0
|
15 |
+
|
16 |
+
system_message_prompt_template:
|
17 |
+
_target_: langchain.PromptTemplate
|
18 |
+
template: |2-
|
19 |
+
Your goal is to identify the issues with an incorrect competitive programming solution attempt.
|
20 |
+
|
21 |
+
The user will specify the problem by providing you with:
|
22 |
+
- the problem statement
|
23 |
+
- example test cases
|
24 |
+
- the constraints of the problem
|
25 |
+
- python stub code
|
26 |
+
- an incorrect Python solution attempt and a description of its issue
|
27 |
+
|
28 |
+
Additionally, the user will provide you with a conceptual solution to the problem which should guide your reasoning.
|
29 |
+
|
30 |
+
Crucially, your goal is to consider all aspects of the problem and pinpoint the issues with the solution attempt, and not to provide the code implementation yourself.
|
31 |
+
Some aspects to consider: Is the input correctly parsed? Is the output correctly formatted? Is the code implementation consistent with the conceptual solution? Are the corner cases correctly handled? Is there a logical mistake with the algorithm itself?
|
32 |
+
Use the code execution results provided in the issue description to guide your reasoning/debugging.
|
33 |
+
input_variables: []
|
34 |
+
template_format: jinja2
|
35 |
+
|
36 |
+
human_message_prompt_template:
|
37 |
+
_target_: langchain.PromptTemplate
|
38 |
+
template: "{{query}}"
|
39 |
+
input_variables:
|
40 |
+
- "query"
|
41 |
+
template_format: jinja2
|
42 |
+
|
43 |
+
query_message_prompt_template:
|
44 |
+
_target_: langchain.PromptTemplate
|
45 |
+
template: |2-
|
46 |
+
# Problem statement
|
47 |
+
{{problem_description}}
|
48 |
+
|
49 |
+
{{io_description}}
|
50 |
+
|
51 |
+
# Constraints
|
52 |
+
{{constraints}}
|
53 |
+
|
54 |
+
The code should extend the following stub:
|
55 |
+
```python
|
56 |
+
{{python_stub}}
|
57 |
+
```
|
58 |
+
|
59 |
+
# Conceptual solution
|
60 |
+
{{plan}}
|
61 |
+
|
62 |
+
|
63 |
+
# Solution attempt to be fixed
|
64 |
+
```python
|
65 |
+
{{code}}
|
66 |
+
```
|
67 |
+
|
68 |
+
{{testing_results_summary}}
|
69 |
+
|
70 |
+
|
71 |
+
Consider the problem statement, the conceptual solution, the code implementation and the issue. Why is the solution attempt incorrect? How should it be fixed? Explain your reasoning very concisely, and do not provide code.
|
72 |
+
input_variables:
|
73 |
+
- "problem_description"
|
74 |
+
- "io_description"
|
75 |
+
- "constraints"
|
76 |
+
- "python_stub"
|
77 |
+
- "plan"
|
78 |
+
- "code"
|
79 |
+
- "testing_results_summary"
|
80 |
+
template_format: jinja2
|
81 |
+
|
82 |
+
input_data_transformations: []
|
83 |
+
input_keys:
|
84 |
+
- "problem_description"
|
85 |
+
- "io_description"
|
86 |
+
- "constraints"
|
87 |
+
- "python_stub"
|
88 |
+
- "testing_results_summary"
|
89 |
+
- "plan"
|
90 |
+
- "code"
|
91 |
+
|
92 |
+
output_data_transformations:
|
93 |
+
- _target_: flows.data_transformations.KeyRename
|
94 |
+
old_key2new_key:
|
95 |
+
raw_response: "code_feedback"
|
96 |
+
output_keys:
|
97 |
+
- "code_feedback"
|
LC_CodeDebug.py
ADDED
@@ -0,0 +1,12 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from flows.base_flows import GeneratorCriticFlow
|
2 |
+
|
3 |
+
|
4 |
+
class LC_CodeDebug(GeneratorCriticFlow):
|
5 |
+
def __init__(self, **kwargs):
|
6 |
+
super().__init__(**kwargs)
|
7 |
+
|
8 |
+
def _early_exit(self):
|
9 |
+
if self.flow_state.get("all_tests_passed", False):
|
10 |
+
return True
|
11 |
+
|
12 |
+
return super()._early_exit()
|
LC_CodeDebug.yaml
ADDED
@@ -0,0 +1,64 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
name: "CodeDebug_Flow"
|
2 |
+
verbose: True
|
3 |
+
description: "ToDO: add description"
|
4 |
+
|
5 |
+
reset_generator_every_round: False
|
6 |
+
reset_critic_every_round: True
|
7 |
+
max_rounds: 2 # ToDo: To increase to 4
|
8 |
+
early_exit_key: "end_of_interaction"
|
9 |
+
|
10 |
+
input_data_transformations: []
|
11 |
+
input_keys:
|
12 |
+
- "problem_description"
|
13 |
+
- "io_description"
|
14 |
+
- "constraints"
|
15 |
+
- "python_stub"
|
16 |
+
- "public_tests_individual_io"
|
17 |
+
|
18 |
+
output_data_transformations:
|
19 |
+
- _target_: flows.data_transformations.KeyRename
|
20 |
+
old_key2new_key:
|
21 |
+
raw_response.code: "code"
|
22 |
+
output_keys:
|
23 |
+
- "code"
|
24 |
+
|
25 |
+
subflows_config:
|
26 |
+
- _target_: martinjosifoski.CC_flows.LC_Code.instantiate_from_default_config
|
27 |
+
overrides:
|
28 |
+
name: "CodeGenerator"
|
29 |
+
model_name: "gpt-4"
|
30 |
+
human_message_prompt_template:
|
31 |
+
template: |2-
|
32 |
+
{{testing_results_summary}}
|
33 |
+
|
34 |
+
|
35 |
+
Consider the problem statement, the last proposed solution, and its issue. Provide a corrected version of the code that solves the original problem and resolves the issue, without any explanation, in the following format:
|
36 |
+
```python
|
37 |
+
{{code_placeholder}}
|
38 |
+
```
|
39 |
+
input_variables:
|
40 |
+
- testing_results_summary
|
41 |
+
partial_variables:
|
42 |
+
code_placeholder: "{{python_code}}"
|
43 |
+
default_human_input_keys:
|
44 |
+
- "testing_results_summary"
|
45 |
+
- "all_tests_passed"
|
46 |
+
output_data_transformations:
|
47 |
+
- _target_: flows.data_transformations.RegexFirstOccurrenceExtractor
|
48 |
+
regex: '(?<=```python)([\s\S]*?)(?=```)'
|
49 |
+
regex_fallback: '(?<=```)([\s\S]*?)(?=```)'
|
50 |
+
input_key: "raw_response"
|
51 |
+
output_key: "code"
|
52 |
+
strip: True
|
53 |
+
assert_unique: True
|
54 |
+
verbose: True
|
55 |
+
- _target_: flows.data_transformations.EndOfInteraction
|
56 |
+
end_of_interaction_string: "Final answer"
|
57 |
+
output_key: "end_of_interaction"
|
58 |
+
verbose: True
|
59 |
+
output_keys:
|
60 |
+
- "code"
|
61 |
+
- "end_of_interaction"
|
62 |
+
- _target_: martinjosifoski.CC_flows.LC_CodeTesting.instantiate_from_default_config
|
63 |
+
overrides:
|
64 |
+
name: "CodeTestingCritic"
|
LC_CodeDebugCollab.py
ADDED
@@ -0,0 +1,12 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from flows.base_flows import GeneratorCriticFlow
|
2 |
+
|
3 |
+
|
4 |
+
class LC_CodeDebugCollab(GeneratorCriticFlow):
|
5 |
+
def __init__(self, **kwargs):
|
6 |
+
super().__init__(**kwargs)
|
7 |
+
|
8 |
+
def _early_exit(self):
|
9 |
+
if self.flow_state.get("all_tests_passed", False):
|
10 |
+
return True
|
11 |
+
|
12 |
+
return super()._early_exit()
|
LC_CodeDebugCollab.yaml
ADDED
@@ -0,0 +1,68 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
name: "CodeDebugCollab_Flow"
|
2 |
+
verbose: True
|
3 |
+
description: "ToDO: add description"
|
4 |
+
|
5 |
+
reset_generator_every_round: False
|
6 |
+
reset_critic_every_round: True
|
7 |
+
max_rounds: 2 # ToDo: To increase to 4
|
8 |
+
early_exit_key: "end_of_interaction"
|
9 |
+
|
10 |
+
input_data_transformations: []
|
11 |
+
input_keys:
|
12 |
+
- "problem_description"
|
13 |
+
- "io_description"
|
14 |
+
- "constraints"
|
15 |
+
- "python_stub"
|
16 |
+
- "public_tests_individual_io"
|
17 |
+
|
18 |
+
output_data_transformations:
|
19 |
+
- _target_: flows.data_transformations.KeyRename
|
20 |
+
old_key2new_key:
|
21 |
+
raw_response.code: "code"
|
22 |
+
output_keys:
|
23 |
+
- "code"
|
24 |
+
|
25 |
+
subflows_config:
|
26 |
+
- _target_: martinjosifoski.CC_flows.LC_Code.instantiate_from_default_config
|
27 |
+
overrides:
|
28 |
+
name: "CodeGenerator"
|
29 |
+
model_name: "gpt-4"
|
30 |
+
human_message_prompt_template:
|
31 |
+
_target_: langchain.PromptTemplate
|
32 |
+
template: |2-
|
33 |
+
{{testing_results_summary}}
|
34 |
+
|
35 |
+
{{code_feedback}}
|
36 |
+
|
37 |
+
|
38 |
+
Consider the problem statement, the last proposed solution, its issue and the provided feedback. Return a corrected version of the code that solves the original problem and resolves the issue, without any explanation, in the following format:
|
39 |
+
```python
|
40 |
+
{{code_placeholder}}
|
41 |
+
```
|
42 |
+
input_variables:
|
43 |
+
- code_feedback
|
44 |
+
- testing_results_summary
|
45 |
+
partial_variables:
|
46 |
+
code_placeholder: "{{python_code}}"
|
47 |
+
template_format: jinja2
|
48 |
+
default_human_input_keys:
|
49 |
+
- "code_feedback"
|
50 |
+
- "testing_results_summary"
|
51 |
+
output_data_transformations:
|
52 |
+
- _target_: flows.data_transformations.RegexFirstOccurrenceExtractor
|
53 |
+
regex: '(?<=```python)([\s\S]*?)(?=```)'
|
54 |
+
regex_fallback: '(?<=```)([\s\S]*?)(?=```)'
|
55 |
+
input_key: "raw_response"
|
56 |
+
output_key: "code"
|
57 |
+
strip: True
|
58 |
+
assert_unique: True
|
59 |
+
verbose: True
|
60 |
+
- _target_: flows.data_transformations.EndOfInteraction
|
61 |
+
end_of_interaction_string: "Final answer"
|
62 |
+
output_key: "end_of_interaction"
|
63 |
+
verbose: True
|
64 |
+
output_keys:
|
65 |
+
- "code"
|
66 |
+
- "end_of_interaction"
|
67 |
+
- _target_: martinjosifoski.CC_flows.LC_CodeDebugCritic.instantiate_from_default_config
|
68 |
+
|
LC_CodeDebugCollabWithPlan.py
ADDED
@@ -0,0 +1,12 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from flows.base_flows import GeneratorCriticFlow
|
2 |
+
|
3 |
+
|
4 |
+
class LC_CodeDebugCollabWithPlan(GeneratorCriticFlow):
|
5 |
+
def __init__(self, **kwargs):
|
6 |
+
super().__init__(**kwargs)
|
7 |
+
|
8 |
+
def _early_exit(self):
|
9 |
+
if self.flow_state.get("all_tests_passed", False):
|
10 |
+
return True
|
11 |
+
|
12 |
+
return super()._early_exit()
|
LC_CodeDebugCollabWithPlan.yaml
ADDED
@@ -0,0 +1,68 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
name: "CodeDebugCollabWithPlan_Flow"
|
2 |
+
verbose: True
|
3 |
+
description: "ToDO: add description"
|
4 |
+
|
5 |
+
reset_generator_every_round: False
|
6 |
+
reset_critic_every_round: True
|
7 |
+
max_rounds: 2 # ToDo: To increase to 4
|
8 |
+
early_exit_key: "end_of_interaction"
|
9 |
+
|
10 |
+
input_data_transformations: []
|
11 |
+
input_keys:
|
12 |
+
- "problem_description"
|
13 |
+
- "io_description"
|
14 |
+
- "constraints"
|
15 |
+
- "python_stub"
|
16 |
+
- "public_tests_individual_io"
|
17 |
+
- "plan"
|
18 |
+
|
19 |
+
output_data_transformations:
|
20 |
+
- _target_: flows.data_transformations.KeyRename
|
21 |
+
old_key2new_key:
|
22 |
+
raw_response.code: "code"
|
23 |
+
output_keys:
|
24 |
+
- "code"
|
25 |
+
|
26 |
+
subflows_config:
|
27 |
+
- _target_: martinjosifoski.CC_flows.LC_CodeWithPlan.instantiate_from_default_config
|
28 |
+
overrides:
|
29 |
+
name: "CodeGenerator"
|
30 |
+
model_name: "gpt-4"
|
31 |
+
human_message_prompt_template:
|
32 |
+
_target_: langchain.PromptTemplate
|
33 |
+
template: |2-
|
34 |
+
{{testing_results_summary}}
|
35 |
+
|
36 |
+
{{code_feedback}}
|
37 |
+
|
38 |
+
|
39 |
+
Consider the problem statement, the last proposed solution, its issue and the provided feedback. Return a corrected version of the code that solves the original problem and resolves the issue, without any explanation, in the following format:
|
40 |
+
```python
|
41 |
+
{{code_placeholder}}
|
42 |
+
```
|
43 |
+
input_variables:
|
44 |
+
- code_feedback
|
45 |
+
- testing_results_summary
|
46 |
+
partial_variables:
|
47 |
+
code_placeholder: "{{python_code}}"
|
48 |
+
template_format: jinja2
|
49 |
+
default_human_input_keys:
|
50 |
+
- "code_feedback"
|
51 |
+
- "testing_results_summary"
|
52 |
+
output_data_transformations:
|
53 |
+
- _target_: flows.data_transformations.RegexFirstOccurrenceExtractor
|
54 |
+
regex: '(?<=```python)([\s\S]*?)(?=```)'
|
55 |
+
regex_fallback: '(?<=```)([\s\S]*?)(?=```)'
|
56 |
+
input_key: "raw_response"
|
57 |
+
output_key: "code"
|
58 |
+
strip: True
|
59 |
+
assert_unique: True
|
60 |
+
verbose: True
|
61 |
+
- _target_: flows.data_transformations.EndOfInteraction
|
62 |
+
end_of_interaction_string: "Final answer"
|
63 |
+
output_key: "end_of_interaction"
|
64 |
+
verbose: True
|
65 |
+
output_keys:
|
66 |
+
- "code"
|
67 |
+
- "end_of_interaction"
|
68 |
+
- _target_: martinjosifoski.CC_flows.LC_CodeDebugCriticWithPlan.instantiate_from_default_config
|
LC_CodeDebugCritic.py
ADDED
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from flows.base_flows import SequentialFlow
|
2 |
+
|
3 |
+
|
4 |
+
class LC_CodeDebugCritic(SequentialFlow):
|
5 |
+
def __init__(self, **kwargs):
|
6 |
+
super().__init__(**kwargs)
|
7 |
+
|
8 |
+
def _early_exit(self):
|
9 |
+
if self.flow_state.get("all_tests_passed", False):
|
10 |
+
self.flow_state["code_feedback"] = None
|
11 |
+
return True
|
12 |
+
|
13 |
+
return super()._early_exit()
|
LC_CodeDebugCritic.yaml
ADDED
@@ -0,0 +1,31 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
name: "CodeDebugCritic_Flow"
|
2 |
+
verbose: True
|
3 |
+
description: "ToDo: add description"
|
4 |
+
|
5 |
+
early_exit_key: null
|
6 |
+
|
7 |
+
input_data_transformations: []
|
8 |
+
input_keys:
|
9 |
+
- "problem_description"
|
10 |
+
- "io_description"
|
11 |
+
- "constraints"
|
12 |
+
- "python_stub"
|
13 |
+
- "public_tests_individual_io"
|
14 |
+
- "code"
|
15 |
+
|
16 |
+
output_data_transformations:
|
17 |
+
- _target_: flows.data_transformations.KeyRename
|
18 |
+
old_key2new_key:
|
19 |
+
raw_response.testing_results_summary: "testing_results_summary"
|
20 |
+
raw_response.all_tests_passed: "all_tests_passed"
|
21 |
+
raw_response.code_feedback: "code_feedback"
|
22 |
+
output_keys:
|
23 |
+
- "testing_results_summary"
|
24 |
+
- "all_tests_passed"
|
25 |
+
- "code_feedback"
|
26 |
+
|
27 |
+
subflows_config:
|
28 |
+
- _target_: martinjosifoski.CC_flows.LC_CodeTesting.instantiate_from_default_config
|
29 |
+
overrides:
|
30 |
+
name: "CodeTestingCritic"
|
31 |
+
- _target_: martinjosifoski.CC_flows.LC_CodeCriticWrongAttempt.instantiate_from_default_config
|
LC_CodeDebugCriticWithPlan.py
ADDED
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from flows.base_flows import SequentialFlow
|
2 |
+
|
3 |
+
|
4 |
+
class LC_CodeDebugCriticWithPlan(SequentialFlow):
|
5 |
+
def __init__(self, **kwargs):
|
6 |
+
super().__init__(**kwargs)
|
7 |
+
|
8 |
+
def _early_exit(self):
|
9 |
+
if self.flow_state.get("all_tests_passed", False):
|
10 |
+
self.flow_state["code_feedback"] = None
|
11 |
+
return True
|
12 |
+
|
13 |
+
return super()._early_exit()
|
LC_CodeDebugCriticWithPlan.yaml
ADDED
@@ -0,0 +1,34 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
name: "CodeDebugCriticWithPlan_Flow"
|
2 |
+
verbose: True
|
3 |
+
description: "ToDo: add description"
|
4 |
+
|
5 |
+
early_exit_key: null
|
6 |
+
|
7 |
+
input_data_transformations: []
|
8 |
+
input_keys:
|
9 |
+
- "problem_description"
|
10 |
+
- "io_description"
|
11 |
+
- "constraints"
|
12 |
+
- "python_stub"
|
13 |
+
- "public_tests_individual_io"
|
14 |
+
- "code"
|
15 |
+
- "plan"
|
16 |
+
|
17 |
+
output_data_transformations:
|
18 |
+
- _target_: flows.data_transformations.KeyRename
|
19 |
+
old_key2new_key:
|
20 |
+
raw_response.testing_results_summary: "testing_results_summary"
|
21 |
+
raw_response.all_tests_passed: "all_tests_passed"
|
22 |
+
raw_response.code_feedback: "code_feedback"
|
23 |
+
output_keys:
|
24 |
+
- "testing_results_summary"
|
25 |
+
- "all_tests_passed"
|
26 |
+
- "code_feedback"
|
27 |
+
|
28 |
+
subflows_config:
|
29 |
+
- _target_: martinjosifoski.CC_flows.CF_CodeCritic.instantiate_from_default_config
|
30 |
+
overrides:
|
31 |
+
name: "CodeTestingCritic"
|
32 |
+
- _target_: flows.flow_verse.instantiate_flow
|
33 |
+
repository_id: ${oc.env:CC_FLOWS}
|
34 |
+
class_name: LC_CodeCriticWrongAttemptWithPlan
|
LC_CodeReflect.py
ADDED
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from flows.base_flows import GeneratorCriticFlow
|
2 |
+
|
3 |
+
|
4 |
+
class LC_CodeReflect(GeneratorCriticFlow):
|
5 |
+
def __init__(self, **kwargs):
|
6 |
+
super().__init__(**kwargs)
|
LC_CodeReflect.yaml
ADDED
@@ -0,0 +1,49 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
name: "CodeReflect_Flow"
|
2 |
+
verbose: True
|
3 |
+
description: "ToDO: add description"
|
4 |
+
|
5 |
+
reset_generator_every_round: False
|
6 |
+
reset_critic_every_round: True
|
7 |
+
max_rounds: 2 # ToDo: To increase to 4
|
8 |
+
early_exit_key: "end_of_interaction"
|
9 |
+
|
10 |
+
input_data_transformations: []
|
11 |
+
input_keys:
|
12 |
+
- "problem_description"
|
13 |
+
- "io_description"
|
14 |
+
- "constraints"
|
15 |
+
- "python_stub"
|
16 |
+
|
17 |
+
output_data_transformations:
|
18 |
+
- _target_: flows.data_transformations.KeyRename
|
19 |
+
old_key2new_key:
|
20 |
+
raw_response.code: "code"
|
21 |
+
output_keys:
|
22 |
+
- "code"
|
23 |
+
|
24 |
+
subflows_config:
|
25 |
+
- _target_: martinjosifoski.CC_flows.LC_Code.instantiate_from_default_config
|
26 |
+
overrides:
|
27 |
+
name: "CodeGenerator"
|
28 |
+
model_name: "gpt-4"
|
29 |
+
input_data_transformations:
|
30 |
+
- _target_: flows.data_transformations.KeyRename
|
31 |
+
old_key2new_key:
|
32 |
+
code_reflect_message: "query"
|
33 |
+
output_data_transformations:
|
34 |
+
- _target_: flows.data_transformations.RegexFirstOccurrenceExtractor
|
35 |
+
regex: '(?<=```python)([\s\S]*?)(?=```)'
|
36 |
+
regex_fallback: '(?<=```)([\s\S]*?)(?=```)'
|
37 |
+
output_key: "code"
|
38 |
+
strip: True
|
39 |
+
assert_unique: True
|
40 |
+
verbose: True
|
41 |
+
- _target_: flows.data_transformations.EndOfInteraction
|
42 |
+
end_of_interaction_string: "Final answer"
|
43 |
+
output_key: "end_of_interaction"
|
44 |
+
verbose: True
|
45 |
+
output_keys:
|
46 |
+
- "code"
|
47 |
+
- "end_of_interaction"
|
48 |
+
|
49 |
+
- _target_: martinjosifoski.CC_flows.FixedReply_CodeReflect.instantiate_from_default_config
|
LC_CodeTesting.py
ADDED
@@ -0,0 +1,33 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from typing import Any, Dict
|
2 |
+
|
3 |
+
from flows.utils import logging
|
4 |
+
from .src.evaluation import testing_utils_leetcode
|
5 |
+
from .CodeTesting import CodeTesting
|
6 |
+
|
7 |
+
log = logging.get_logger(__name__)
|
8 |
+
|
9 |
+
# ToDo: Add a flags to control whether hidden, public or both tests should be used for evaluation
|
10 |
+
|
11 |
+
|
12 |
+
class LC_CodeTesting(CodeTesting):
|
13 |
+
REQUIRED_KEYS_CONFIG = []
|
14 |
+
REQUIRED_KEYS_KWARGS = []
|
15 |
+
|
16 |
+
def __init__(self, **kwargs):
|
17 |
+
super().__init__(**kwargs)
|
18 |
+
|
19 |
+
def _get_test_data(self, input_data: Dict):
|
20 |
+
"""This function retrieves (or generates) input-output pairs that will be used to test the implementation."""
|
21 |
+
return input_data["public_tests_individual_io"]
|
22 |
+
|
23 |
+
def _run_tests(self, input_data: Dict, test_data: Dict) -> Dict[str, Any]:
|
24 |
+
testing_results = testing_utils_leetcode.evaluate_solution_for_problem(
|
25 |
+
candidate_solution=input_data["code"],
|
26 |
+
python_stub=input_data["python_stub"],
|
27 |
+
public_tests_io=test_data
|
28 |
+
)
|
29 |
+
|
30 |
+
for test_output in testing_results["public_tests_results"]:
|
31 |
+
test_output["input"] = "\n".join(test_output["input"])
|
32 |
+
|
33 |
+
return testing_results
|
LC_CodeTesting.yaml
ADDED
@@ -0,0 +1,90 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
name: "CodeTesting_Flow"
|
2 |
+
description: "ToDo: add description"
|
3 |
+
|
4 |
+
input_data_transformations: []
|
5 |
+
input_keys:
|
6 |
+
- "code"
|
7 |
+
- "python_stub"
|
8 |
+
- "public_tests_individual_io"
|
9 |
+
|
10 |
+
output_keys:
|
11 |
+
- "all_tests_passed"
|
12 |
+
- "testing_results_summary"
|
13 |
+
output_data_transformations:
|
14 |
+
- _target_: .src.data_transformations.CorrectnessFlag
|
15 |
+
input_key: "raw_response.public_tests_results"
|
16 |
+
output_key: "all_tests_passed"
|
17 |
+
- _target_: .src.data_transformations.TestingResultsSummaryGeneration
|
18 |
+
output_key: "testing_results_summary"
|
19 |
+
|
20 |
+
single_test_error_message: True
|
21 |
+
|
22 |
+
no_error_template: |2-
|
23 |
+
${.issue_title}
|
24 |
+
All of the executed tests passed.
|
25 |
+
|
26 |
+
compilation_error_template: |2-
|
27 |
+
${.issue_title}
|
28 |
+
The execution resulted in a compilation error.
|
29 |
+
## Compilation error message:
|
30 |
+
{{error_message}}
|
31 |
+
timeout_error_template: |2-
|
32 |
+
${.issue_title}
|
33 |
+
The execution timed out, the solution is not efficient enough.
|
34 |
+
runtime_error_template: |2-
|
35 |
+
${.issue_title}
|
36 |
+
The execution resulted in a runtime error on the following test.
|
37 |
+
## [Failed test] Input
|
38 |
+
```
|
39 |
+
{{test_input}}
|
40 |
+
```
|
41 |
+
## [Failed test] Runtime error message
|
42 |
+
{{error_message}}
|
43 |
+
single_test_error_template: |2-
|
44 |
+
${.issue_title}
|
45 |
+
The Python code does not solve the problem in the problem description due to logical errors. It fails the following test:
|
46 |
+
## [Failed test] Input
|
47 |
+
```
|
48 |
+
{{test_input}}
|
49 |
+
```
|
50 |
+
## [Failed test] Expected output
|
51 |
+
```
|
52 |
+
{{expected_output}}
|
53 |
+
```
|
54 |
+
## [Failed test] Generated output
|
55 |
+
```
|
56 |
+
{{generated_output}}
|
57 |
+
```
|
58 |
+
all_tests_header: |2-
|
59 |
+
${.issue_title}
|
60 |
+
The Python code does not solve the problem in the problem description due to logical errors. It fails on the following tests.
|
61 |
+
test_error_template: |2-
|
62 |
+
## [Failed test {{idx}}]
|
63 |
+
### [Failed test {{idx}}] Input
|
64 |
+
```
|
65 |
+
{{test_input}}
|
66 |
+
```
|
67 |
+
### [Failed test {{idx}}] Expected output
|
68 |
+
```
|
69 |
+
{{expected_output}}
|
70 |
+
```
|
71 |
+
### [Failed test {{idx}}] Generated output
|
72 |
+
```
|
73 |
+
{{generated_output}}
|
74 |
+
```
|
75 |
+
tests_separator: "\n\n"
|
76 |
+
|
77 |
+
issue_title: "# Issue with the last proposed solution"
|
78 |
+
|
79 |
+
feedback_title: "# Feedback on the last proposed solution"
|
80 |
+
|
81 |
+
no_code_template: |2-
|
82 |
+
${.feedback_title}
|
83 |
+
The code was not provided in the correct output format specified in the request or it was not provided at all.
|
84 |
+
feedback_only_template: |2-
|
85 |
+
${.feedback_title}
|
86 |
+
{{feedback_content}}
|
87 |
+
feedback_and_issue_template: |2-
|
88 |
+
{{issue_description}}
|
89 |
+
|
90 |
+
{{feedback_content}}
|
LC_CodeWithPlan.py
ADDED
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from martinjosifoski.OpenAIChatAtomicFlow import OpenAIChatAtomicFlow
|
2 |
+
|
3 |
+
|
4 |
+
class LC_CodeWithPlan(OpenAIChatAtomicFlow):
|
5 |
+
def __init__(self, **kwargs):
|
6 |
+
super().__init__(**kwargs)
|
LC_CodeWithPlan.yaml
ADDED
@@ -0,0 +1,92 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
name: "CodeFlowWithPlan_Flow"
|
2 |
+
verbose: True
|
3 |
+
description: "ToDO: add description"
|
4 |
+
|
5 |
+
model_name: "gpt-4"
|
6 |
+
generation_parameters:
|
7 |
+
n: 1
|
8 |
+
max_tokens: 3000
|
9 |
+
temperature: 0.3
|
10 |
+
|
11 |
+
model_kwargs:
|
12 |
+
top_p: 0.2
|
13 |
+
frequency_penalty: 0
|
14 |
+
presence_penalty: 0
|
15 |
+
|
16 |
+
system_message_prompt_template:
|
17 |
+
_target_: langchain.PromptTemplate
|
18 |
+
template: |2-
|
19 |
+
Your goal is to provide executable Python code that solves a coding interview problem. The code should correctly handle all corner cases in order to pass the hidden test cases, which are used to evaluate the correctness of the solution.
|
20 |
+
|
21 |
+
The user will specify the problem by providing you with:
|
22 |
+
- the problem statement
|
23 |
+
- example test cases
|
24 |
+
- the constraints of the problem
|
25 |
+
|
26 |
+
Additionally, the user will provide you with a conceptual solution to the problem which should guide your reasoning and the code implementation.
|
27 |
+
|
28 |
+
The user will provide you with a task and an output format that you will strictly follow.
|
29 |
+
input_variables: []
|
30 |
+
template_format: jinja2
|
31 |
+
|
32 |
+
human_message_prompt_template:
|
33 |
+
_target_: langchain.PromptTemplate
|
34 |
+
template: "{{query}}"
|
35 |
+
input_variables:
|
36 |
+
- "query"
|
37 |
+
template_format: jinja2
|
38 |
+
|
39 |
+
query_message_prompt_template:
|
40 |
+
_target_: langchain.PromptTemplate
|
41 |
+
template: |2-
|
42 |
+
# Problem statement
|
43 |
+
{{problem_description}}
|
44 |
+
|
45 |
+
{{io_description}}
|
46 |
+
|
47 |
+
# Constraints
|
48 |
+
{{constraints}}
|
49 |
+
|
50 |
+
# Conceptual solution
|
51 |
+
{{plan}}
|
52 |
+
|
53 |
+
|
54 |
+
Return Python code that solves the problem. The code should extend the following stub:
|
55 |
+
```python
|
56 |
+
{{python_stub}}
|
57 |
+
```
|
58 |
+
|
59 |
+
without changing the method signatures.
|
60 |
+
Reply in the following format:
|
61 |
+
```python
|
62 |
+
{{code_placeholder}}
|
63 |
+
```
|
64 |
+
```
|
65 |
+
input_variables:
|
66 |
+
- "problem_description"
|
67 |
+
- "io_description"
|
68 |
+
- "constraints"
|
69 |
+
- "python_stub"
|
70 |
+
- "plan"
|
71 |
+
partial_variables:
|
72 |
+
code_placeholder: "{{python_code}}"
|
73 |
+
template_format: jinja2
|
74 |
+
|
75 |
+
input_data_transformations: []
|
76 |
+
input_keys:
|
77 |
+
- "problem_description"
|
78 |
+
- "io_description"
|
79 |
+
- "constraints"
|
80 |
+
- "python_stub"
|
81 |
+
- "plan"
|
82 |
+
|
83 |
+
output_data_transformations:
|
84 |
+
- _target_: flows.data_transformations.RegexFirstOccurrenceExtractor
|
85 |
+
regex: '(?<=```python)([\s\S]*?)(?=```)'
|
86 |
+
regex_fallback: '(?<=```)([\s\S]*?)(?=```)'
|
87 |
+
output_key: "code"
|
88 |
+
strip: True
|
89 |
+
assert_unique: True
|
90 |
+
verbose: True
|
91 |
+
output_keys:
|
92 |
+
- "code"
|
LC_Plan.py
ADDED
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from martinjosifoski.OpenAIChatAtomicFlow import OpenAIChatAtomicFlow
|
2 |
+
|
3 |
+
|
4 |
+
class LC_Plan(OpenAIChatAtomicFlow):
|
5 |
+
def __init__(self, **kwargs):
|
6 |
+
super().__init__(**kwargs)
|
LC_Plan.yaml
ADDED
@@ -0,0 +1,80 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
name: "Plan_Flow"
|
2 |
+
verbose: True
|
3 |
+
description: "ToDO: add description"
|
4 |
+
|
5 |
+
model_name: "gpt-4"
|
6 |
+
generation_parameters:
|
7 |
+
n: 1
|
8 |
+
max_tokens: 3000
|
9 |
+
temperature: 0.3
|
10 |
+
|
11 |
+
model_kwargs:
|
12 |
+
top_p: 0.2
|
13 |
+
frequency_penalty: 0
|
14 |
+
presence_penalty: 0
|
15 |
+
|
16 |
+
input_data_transformations: []
|
17 |
+
input_keys:
|
18 |
+
- "problem_description"
|
19 |
+
- "io_description"
|
20 |
+
- "constraints"
|
21 |
+
|
22 |
+
output_data_transformations:
|
23 |
+
- _target_: flows.data_transformations.RegexFirstOccurrenceExtractor
|
24 |
+
regex: '(?<=Conceptual solution)([\s\S]*?)(?=\n\n# [A-Z]|\Z)'
|
25 |
+
regex_fallback:
|
26 |
+
- '(?<=Conceptual solution:)([\s\S]*?)(?=\n\n# [A-Z]|\Z)'
|
27 |
+
output_key: "plan"
|
28 |
+
strip: True
|
29 |
+
assert_unique: True
|
30 |
+
verbose: True
|
31 |
+
output_keys:
|
32 |
+
- "plan"
|
33 |
+
|
34 |
+
system_message_prompt_template:
|
35 |
+
_target_: langchain.PromptTemplate
|
36 |
+
template: |2-
|
37 |
+
Your goal is to provide a high-level conceptual solution that, if implemented, will solve a given coding interview problem.
|
38 |
+
|
39 |
+
The user will specify the problem by providing you with:
|
40 |
+
- the problem statement
|
41 |
+
- example test cases
|
42 |
+
- the constraints of the problem
|
43 |
+
|
44 |
+
The proposed algorithm should be computationally efficient, logically correct and handle all corner cases.
|
45 |
+
|
46 |
+
The user will provide you with a task and an output format that you will strictly follow.
|
47 |
+
input_variables: []
|
48 |
+
template_format: jinja2
|
49 |
+
|
50 |
+
human_message_prompt_template:
|
51 |
+
_target_: langchain.PromptTemplate
|
52 |
+
template: "{{query}}"
|
53 |
+
input_variables:
|
54 |
+
- "query"
|
55 |
+
template_format: jinja2
|
56 |
+
|
57 |
+
query_message_prompt_template:
|
58 |
+
_target_: langchain.PromptTemplate
|
59 |
+
template: |2-
|
60 |
+
# Problem statement
|
61 |
+
{{problem_description}}
|
62 |
+
|
63 |
+
{{io_description}}
|
64 |
+
|
65 |
+
# Constraints
|
66 |
+
{{constraints}}
|
67 |
+
|
68 |
+
|
69 |
+
Return a high-level conceptual solution that would solve the problem. Be very concise, and do not provide code.
|
70 |
+
Reply in the following format:
|
71 |
+
# Conceptual solution
|
72 |
+
{{plan_placeholder}}
|
73 |
+
input_variables:
|
74 |
+
- "problem_description"
|
75 |
+
- "io_description"
|
76 |
+
- "constraints"
|
77 |
+
partial_variables:
|
78 |
+
plan_placeholder: "{{conceptual_solution}}"
|
79 |
+
template_format: jinja2
|
80 |
+
|
LC_PlanCollab.py
ADDED
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from flows.base_flows import GeneratorCriticFlow
|
2 |
+
|
3 |
+
|
4 |
+
class LC_PlanCollab(GeneratorCriticFlow):
|
5 |
+
def __init__(self, **kwargs):
|
6 |
+
super().__init__(**kwargs)
|
LC_PlanCollab.yaml
ADDED
@@ -0,0 +1,69 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
name: "PlanCollab_Flow"
|
2 |
+
verbose: True
|
3 |
+
description: "ToDO: add description"
|
4 |
+
|
5 |
+
reset_generator_every_round: False
|
6 |
+
reset_critic_every_round: True
|
7 |
+
max_rounds: 2 # ToDo: To increase to 4
|
8 |
+
early_exit_key: "end_of_interaction"
|
9 |
+
|
10 |
+
input_data_transformations: []
|
11 |
+
input_keys:
|
12 |
+
- "problem_description"
|
13 |
+
- "io_description"
|
14 |
+
- "constraints"
|
15 |
+
- "python_stub"
|
16 |
+
|
17 |
+
output_data_transformations:
|
18 |
+
- _target_: flows.data_transformations.KeyRename
|
19 |
+
old_key2new_key:
|
20 |
+
raw_response.plan: "plan"
|
21 |
+
output_keys:
|
22 |
+
- "plan"
|
23 |
+
|
24 |
+
subflows_config:
|
25 |
+
- _target_: martinjosifoski.CC_flows.LC_Plan.instantiate_from_default_config
|
26 |
+
overrides:
|
27 |
+
name: "PlanGenerator"
|
28 |
+
human_message_prompt_template:
|
29 |
+
_target_: langchain.PromptTemplate
|
30 |
+
template: |2-
|
31 |
+
# Feedback on the last proposed conceptual solution
|
32 |
+
{{plan_feedback}}
|
33 |
+
|
34 |
+
|
35 |
+
Consider the original problem statement, the last proposed solution and the provided feedback. Does the solution need to be updated? If so, provide the corrected version of the conceptual solution in the following format:
|
36 |
+
# Conceptual solution
|
37 |
+
{{plan_placeholder}}
|
38 |
+
otherwise, reply:
|
39 |
+
"Final answer."
|
40 |
+
input_variables:
|
41 |
+
- plan_feedback
|
42 |
+
partial_variables:
|
43 |
+
plan_placeholder: "{{conceptual_solution}}"
|
44 |
+
template_format: jinja2
|
45 |
+
default_human_input_keys:
|
46 |
+
- "plan_feedback"
|
47 |
+
output_data_transformations:
|
48 |
+
- _target_: flows.data_transformations.RegexFirstOccurrenceExtractor
|
49 |
+
regex: '(?<=Conceptual solution)([\s\S]*?)(?=\n\n# [A-Z]|\Z)'
|
50 |
+
regex_fallback:
|
51 |
+
- '(?<=Conceptual solution:)([\s\S]*?)(?=\n\n# [A-Z]|\Z)'
|
52 |
+
output_key: "plan"
|
53 |
+
strip: True
|
54 |
+
assert_unique: True
|
55 |
+
verbose: True
|
56 |
+
- _target_: flows.data_transformations.EndOfInteraction
|
57 |
+
end_of_interaction_string: "Final answer"
|
58 |
+
output_key: "end_of_interaction"
|
59 |
+
verbose: True
|
60 |
+
output_keys:
|
61 |
+
- "plan"
|
62 |
+
- "end_of_interaction"
|
63 |
+
- _target_: martinjosifoski.CC_flows.LC_PlanCritic.instantiate_from_default_config
|
64 |
+
overrides:
|
65 |
+
name: PlanCritic
|
66 |
+
output_data_transformations:
|
67 |
+
- _target_: flows.data_transformations.KeyRename
|
68 |
+
old_key2new_key:
|
69 |
+
raw_response: "plan_feedback"
|
LC_PlanCollab_Code.py
ADDED
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from flows.base_flows import SequentialFlow
|
2 |
+
|
3 |
+
|
4 |
+
class LC_PlanCollab_Code(SequentialFlow):
|
5 |
+
def __init__(self, **kwargs):
|
6 |
+
super().__init__(**kwargs)
|
LC_PlanCollab_Code.yaml
ADDED
@@ -0,0 +1,22 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
name: "PlanCollab_Code_Flow"
|
2 |
+
description: "ToDO: add description"
|
3 |
+
|
4 |
+
early_exit_key: null
|
5 |
+
|
6 |
+
input_data_transformations: []
|
7 |
+
input_keys:
|
8 |
+
- "problem_description"
|
9 |
+
- "io_description"
|
10 |
+
- "constraints"
|
11 |
+
- "python_stub"
|
12 |
+
|
13 |
+
output_data_transformations:
|
14 |
+
- _target_: flows.data_transformations.KeyRename
|
15 |
+
old_key2new_key:
|
16 |
+
raw_response.code: "code"
|
17 |
+
output_keys:
|
18 |
+
- "code"
|
19 |
+
|
20 |
+
subflows_config:
|
21 |
+
- _target_: martinjosifoski.CC_flows.LC_PlanCollab.instantiate_from_default_config
|
22 |
+
- _target_: martinjosifoski.CC_flows.LC_CodeWithPlan.instantiate_from_default_config
|
LC_PlanCritic.py
ADDED
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from martinjosifoski.OpenAIChatAtomicFlow import OpenAIChatAtomicFlow
|
2 |
+
|
3 |
+
|
4 |
+
class LC_PlanCritic(OpenAIChatAtomicFlow):
|
5 |
+
def __init__(self, **kwargs):
|
6 |
+
super().__init__(**kwargs)
|
LC_PlanCritic.yaml
ADDED
@@ -0,0 +1,77 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
name: "PlanCritic_Flow"
|
2 |
+
verbose: True
|
3 |
+
description: "ToDO: add description"
|
4 |
+
|
5 |
+
model_name: "gpt-4"
|
6 |
+
generation_parameters:
|
7 |
+
n: 1
|
8 |
+
max_tokens: 3000
|
9 |
+
temperature: 0.3
|
10 |
+
|
11 |
+
model_kwargs:
|
12 |
+
top_p: 0.2
|
13 |
+
frequency_penalty: 0
|
14 |
+
presence_penalty: 0
|
15 |
+
|
16 |
+
system_message_prompt_template:
|
17 |
+
_target_: langchain.PromptTemplate
|
18 |
+
template: |2-
|
19 |
+
Your goal is to identify potential issues with a conceptual solution to a given competitive programming problem.
|
20 |
+
|
21 |
+
The user will specify the problem by providing you with:
|
22 |
+
- the problem statement
|
23 |
+
- example test cases
|
24 |
+
- the constraints of the problem
|
25 |
+
- a conceptual solution attempt
|
26 |
+
|
27 |
+
Crucially, your goal is to consider all aspects of the problem and pinpoint potential issues with the conceptual solution attempt (if any), and not to provide the conceptual solution or the code implementation yourself.
|
28 |
+
Some aspects to consider: Are there any logical mistakes with the proposed algorithm? Are the corner cases correctly handled?
|
29 |
+
The user will provide you with a task and an output format that you will strictly follow.
|
30 |
+
input_variables: []
|
31 |
+
template_format: jinja2
|
32 |
+
|
33 |
+
human_message_prompt_template:
|
34 |
+
_target_: langchain.PromptTemplate
|
35 |
+
template: "{{query}}"
|
36 |
+
input_variables:
|
37 |
+
- "query"
|
38 |
+
template_format: jinja2
|
39 |
+
|
40 |
+
query_message_prompt_template:
|
41 |
+
_target_: langchain.PromptTemplate
|
42 |
+
template: |2-
|
43 |
+
# Problem statement
|
44 |
+
{{problem_description}}
|
45 |
+
|
46 |
+
{{io_description}}
|
47 |
+
|
48 |
+
# Constraints
|
49 |
+
{{constraints}}
|
50 |
+
|
51 |
+
# code stub
|
52 |
+
```python
|
53 |
+
{{python_stub}}
|
54 |
+
```
|
55 |
+
|
56 |
+
# Conceptual solution attempt
|
57 |
+
{{plan}}
|
58 |
+
|
59 |
+
|
60 |
+
Consider the problem statement and the solution attempt. Are there any issues with the proposed conceptual solution or it is correct? Explain your reasoning very concisely.
|
61 |
+
input_variables:
|
62 |
+
- "problem_description"
|
63 |
+
- "io_description"
|
64 |
+
- "constraints"
|
65 |
+
- "python_stub"
|
66 |
+
- "plan"
|
67 |
+
template_format: jinja2
|
68 |
+
|
69 |
+
input_keys:
|
70 |
+
- "problem_description"
|
71 |
+
- "io_description"
|
72 |
+
- "constraints"
|
73 |
+
- "python_stub"
|
74 |
+
- "plan"
|
75 |
+
|
76 |
+
output_keys:
|
77 |
+
- "plan_feedback"
|
LC_PlanReflect.py
ADDED
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from flows.base_flows import GeneratorCriticFlow
|
2 |
+
|
3 |
+
|
4 |
+
class LC_PlanReflect(GeneratorCriticFlow):
|
5 |
+
def __init__(self, **kwargs):
|
6 |
+
super().__init__(**kwargs)
|
LC_PlanReflect.yaml
ADDED
@@ -0,0 +1,63 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
name: "PlanReflect_Flow"
|
2 |
+
verbose: True
|
3 |
+
description: "ToDO: add description"
|
4 |
+
|
5 |
+
reset_generator_every_round: False
|
6 |
+
reset_critic_every_round: True
|
7 |
+
max_rounds: 2 # ToDo: increase to 4
|
8 |
+
early_exit_key: "end_of_interaction"
|
9 |
+
|
10 |
+
input_data_transformations: []
|
11 |
+
input_keys:
|
12 |
+
- "problem_description"
|
13 |
+
- "io_description"
|
14 |
+
- "constraints"
|
15 |
+
|
16 |
+
output_data_transformations:
|
17 |
+
- _target_: flows.data_transformations.KeyRename
|
18 |
+
old_key2new_key:
|
19 |
+
raw_response.plan: "plan"
|
20 |
+
output_keys:
|
21 |
+
- "plan"
|
22 |
+
|
23 |
+
subflows_config:
|
24 |
+
- _target_: martinjosifoski.CC_flows.LC_Plan.instantiate_from_default_config
|
25 |
+
overrides:
|
26 |
+
name: "PlanGenerator_Flow"
|
27 |
+
model_name: "gpt-4"
|
28 |
+
input_data_transformations:
|
29 |
+
- _target_: flows.data_transformations.KeyRename
|
30 |
+
old_key2new_key:
|
31 |
+
plan_reflect_message: "query"
|
32 |
+
output_data_transformations:
|
33 |
+
- _target_: flows.data_transformations.RegexFirstOccurrenceExtractor
|
34 |
+
regex: '(?<=Conceptual solution)([\s\S]*?)(?=\n\n# [A-Z]|\Z)'
|
35 |
+
regex_fallback:
|
36 |
+
- '(?<=Conceptual solution:)([\s\S]*?)(?=\n\n# [A-Z]|\Z)'
|
37 |
+
output_key: "plan"
|
38 |
+
strip: True
|
39 |
+
assert_unique: True
|
40 |
+
verbose: True
|
41 |
+
- _target_: flows.data_transformations.EndOfInteraction
|
42 |
+
end_of_interaction_string: "Final answer"
|
43 |
+
output_key: "end_of_interaction"
|
44 |
+
verbose: True
|
45 |
+
output_keys:
|
46 |
+
- "plan"
|
47 |
+
- "end_of_interaction"
|
48 |
+
|
49 |
+
- _target_: martinjosifoski.CC_flows.FixedReply_PlanReflect.instantiate_from_default_config
|
50 |
+
overrides:
|
51 |
+
name: "PlanFixedReplyCritic"
|
52 |
+
description: "ToDo: Add description"
|
53 |
+
input_keys:
|
54 |
+
- "plan"
|
55 |
+
output_keys:
|
56 |
+
- "query"
|
57 |
+
fixed_reply: |2-
|
58 |
+
Consider the problem statement and the last proposed solution. Are you sure that the solution is provided in the requested format, and crucially, solves the problem?
|
59 |
+
If that is not the case, provide the corrected version of the conceptual solution in the following format:
|
60 |
+
# Conceptual solution
|
61 |
+
{{conceptual_solution}}
|
62 |
+
otherwise, reply:
|
63 |
+
"Final answer."
|
LC_PlanReflect_Code.py
ADDED
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from flows.base_flows import SequentialFlow
|
2 |
+
|
3 |
+
|
4 |
+
class LC_PlanReflect_Code(SequentialFlow):
|
5 |
+
def __init__(self, **kwargs):
|
6 |
+
super().__init__(**kwargs)
|
LC_PlanReflect_Code.yaml
ADDED
@@ -0,0 +1,15 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
name: "PlanReflect_Code_Flow"
|
2 |
+
description: "ToDO: add description"
|
3 |
+
|
4 |
+
input_keys:
|
5 |
+
- "problem_description"
|
6 |
+
- "io_description"
|
7 |
+
- "constraints"
|
8 |
+
- "python_stub"
|
9 |
+
|
10 |
+
output_keys:
|
11 |
+
- "code"
|
12 |
+
|
13 |
+
subflows_config:
|
14 |
+
- _target_: martinjosifoski.CC_flows.LC_PlanReflect.instantiate_from_default_config
|
15 |
+
- _target_: martinjosifoski.CC_flows.LC_CodeWithPlan.instantiate_from_default_config
|
LC_Plan_Code.py
ADDED
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from flows.base_flows import SequentialFlow
|
2 |
+
|
3 |
+
|
4 |
+
class LC_Plan_Code(SequentialFlow):
|
5 |
+
def __init__(self, **kwargs):
|
6 |
+
super().__init__(**kwargs)
|
LC_Plan_Code.yaml
ADDED
@@ -0,0 +1,26 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
name: "Plan_Code_Flow"
|
2 |
+
description: "ToDO: add description"
|
3 |
+
|
4 |
+
early_exit_key: null
|
5 |
+
|
6 |
+
input_data_transformations: []
|
7 |
+
input_keys:
|
8 |
+
- "problem_description"
|
9 |
+
- "io_description"
|
10 |
+
- "constraints"
|
11 |
+
- "python_stub"
|
12 |
+
|
13 |
+
output_data_transformations:
|
14 |
+
- _target_: flows.data_transformations.KeyRename
|
15 |
+
old_key2new_key:
|
16 |
+
raw_response.code: "code"
|
17 |
+
output_keys:
|
18 |
+
- "code"
|
19 |
+
|
20 |
+
subflows_config:
|
21 |
+
- _target_: martinjosifoski.CC_flows.LC_Plan.instantiate_from_default_config
|
22 |
+
overrides:
|
23 |
+
model_name: "gpt-4"
|
24 |
+
- _target_: martinjosifoski.CC_flows.LC_CodeWithPlan.instantiate_from_default_config
|
25 |
+
overrides:
|
26 |
+
model_name: "gpt-4"
|
__init__.py
CHANGED
@@ -3,7 +3,6 @@ from flows import flow_verse
|
|
3 |
|
4 |
dependencies = [
|
5 |
{"url": "martinjosifoski/OpenAIChatAtomicFlow", "revision": "main"},
|
6 |
-
# {"url": os.getenv("CC_flows_repo"), "revision": "main"}
|
7 |
]
|
8 |
flow_verse.sync_dependencies(dependencies)
|
9 |
|
@@ -48,3 +47,46 @@ from .CF_CodeDebugCollab import CF_CodeDebugCollab
|
|
48 |
from .CF_CodeCriticWrongAttemptWithPlan import CF_CodeCriticWrongAttemptWithPlan
|
49 |
from .CF_CodeDebugCriticWithPlan import CF_CodeDebugCriticWithPlan
|
50 |
from .CF_CodeDebugCollabWithPlan import CF_CodeDebugCollabWithPlan
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
3 |
|
4 |
dependencies = [
|
5 |
{"url": "martinjosifoski/OpenAIChatAtomicFlow", "revision": "main"},
|
|
|
6 |
]
|
7 |
flow_verse.sync_dependencies(dependencies)
|
8 |
|
|
|
47 |
from .CF_CodeCriticWrongAttemptWithPlan import CF_CodeCriticWrongAttemptWithPlan
|
48 |
from .CF_CodeDebugCriticWithPlan import CF_CodeDebugCriticWithPlan
|
49 |
from .CF_CodeDebugCollabWithPlan import CF_CodeDebugCollabWithPlan
|
50 |
+
|
51 |
+
|
52 |
+
########################## LC ##########################
|
53 |
+
|
54 |
+
# lc-code
|
55 |
+
from .LC_Code import LC_Code
|
56 |
+
|
57 |
+
# lc-code_reflect
|
58 |
+
from .LC_CodeReflect import LC_CodeReflect
|
59 |
+
|
60 |
+
# lc-code_collab
|
61 |
+
from .LC_CodeCritic import LC_CodeCritic
|
62 |
+
from .LC_CodeCollab import LC_CodeCollab
|
63 |
+
|
64 |
+
# lc-plan-code (and lc-plan_oracle-code)
|
65 |
+
from .LC_Plan import LC_Plan
|
66 |
+
from .LC_CodeWithPlan import LC_CodeWithPlan
|
67 |
+
from .LC_Plan_Code import LC_Plan_Code
|
68 |
+
|
69 |
+
# lc-plan_reflect-code
|
70 |
+
from .LC_PlanReflect import LC_PlanReflect
|
71 |
+
from .LC_PlanReflect_Code import LC_PlanReflect_Code
|
72 |
+
|
73 |
+
# lc-plan_collab-code
|
74 |
+
from .LC_PlanCritic import LC_PlanCritic
|
75 |
+
from .LC_PlanCollab import LC_PlanCollab
|
76 |
+
from .LC_PlanCollab_Code import LC_PlanCollab_Code
|
77 |
+
|
78 |
+
# lc-code_debug
|
79 |
+
from .LC_CodeTesting import LC_CodeTesting
|
80 |
+
from .LC_CodeDebug import LC_CodeDebug
|
81 |
+
|
82 |
+
# lc-code_debug_collab
|
83 |
+
from .LC_CodeCriticWrongAttempt import LC_CodeCriticWrongAttempt
|
84 |
+
from .LC_CodeDebugCritic import LC_CodeDebugCritic
|
85 |
+
from .LC_CodeDebugCollab import LC_CodeDebugCollab
|
86 |
+
|
87 |
+
# lc-plan_oracle-code_debug_collab
|
88 |
+
from .LC_CodeCriticWrongAttemptWithPlan import LC_CodeCriticWrongAttemptWithPlan
|
89 |
+
from .LC_CodeDebugCriticWithPlan import LC_CodeDebugCriticWithPlan
|
90 |
+
from .LC_CodeDebugCollabWithPlan import LC_CodeDebugCollabWithPlan
|
91 |
+
|
92 |
+
|
src/evaluation/testing_utils_codeforces.py
CHANGED
@@ -17,7 +17,7 @@ from pyext import RuntimeModule
|
|
17 |
from wrapt_timeout_decorator import timeout as wrapt_timeout
|
18 |
import threading
|
19 |
|
20 |
-
from
|
21 |
|
22 |
from flows import logging
|
23 |
|
|
|
17 |
from wrapt_timeout_decorator import timeout as wrapt_timeout
|
18 |
import threading
|
19 |
|
20 |
+
from ..datasets.schema import assert_test_format_codeforces
|
21 |
|
22 |
from flows import logging
|
23 |
|
src/evaluation/testing_utils_leetcode.py
ADDED
@@ -0,0 +1,258 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# This is based heavily on the huggingface APPS metric
|
2 |
+
# to run the solution files we're using a timing based approach
|
3 |
+
# for capturing the stdout
|
4 |
+
# used for testing the code that reads from input
|
5 |
+
import logging
|
6 |
+
import re
|
7 |
+
from subprocess import Popen, PIPE, TimeoutExpired
|
8 |
+
from typing import List, Tuple
|
9 |
+
import threading
|
10 |
+
|
11 |
+
log = logging.getLogger(__name__)
|
12 |
+
lock = threading.Lock()
|
13 |
+
|
14 |
+
def evaluate_solution_for_problem(
|
15 |
+
candidate_solution,
|
16 |
+
python_stub,
|
17 |
+
hidden_tests_io=None,
|
18 |
+
public_tests_io=None,
|
19 |
+
timeout=10,
|
20 |
+
debug=False,
|
21 |
+
add_extra_imports=False,
|
22 |
+
):
|
23 |
+
with lock:
|
24 |
+
"""See the readme for the output format of this function."""
|
25 |
+
if hidden_tests_io is None:
|
26 |
+
hidden_tests_io = []
|
27 |
+
if public_tests_io is None:
|
28 |
+
public_tests_io = []
|
29 |
+
|
30 |
+
if candidate_solution is None:
|
31 |
+
results_dict = {
|
32 |
+
"compilation_status": False,
|
33 |
+
"compilation_error_message": "No code was provided.",
|
34 |
+
"timeout_error": False,
|
35 |
+
"hidden_tests_results": [
|
36 |
+
{
|
37 |
+
"status": False,
|
38 |
+
"error_message": "No code was provided.",
|
39 |
+
"generated_output": None,
|
40 |
+
"input": test[0],
|
41 |
+
"expected_output": test[1],
|
42 |
+
}
|
43 |
+
for test in hidden_tests_io
|
44 |
+
],
|
45 |
+
"public_tests_results": [
|
46 |
+
{
|
47 |
+
"status": False,
|
48 |
+
"error_message": "No code was provided.",
|
49 |
+
"generated_output": None,
|
50 |
+
"input": test[0],
|
51 |
+
"expected_output": test[1],
|
52 |
+
}
|
53 |
+
for test in public_tests_io
|
54 |
+
],
|
55 |
+
}
|
56 |
+
return results_dict
|
57 |
+
|
58 |
+
hidden_tests_results = check_correctness(
|
59 |
+
candidate_solution, python_stub, hidden_tests_io, timeout, debug, add_extra_imports
|
60 |
+
)
|
61 |
+
public_tests_results = check_correctness(
|
62 |
+
candidate_solution, python_stub, public_tests_io, timeout, debug, add_extra_imports
|
63 |
+
)
|
64 |
+
|
65 |
+
# the compilation status shouldn't depend on the tests
|
66 |
+
if len(hidden_tests_io) > 0 and len(public_tests_io) > 0:
|
67 |
+
assert hidden_tests_results["compilation_status"] == public_tests_results["compilation_status"]
|
68 |
+
|
69 |
+
compilation_status = True
|
70 |
+
error_message = None
|
71 |
+
timeout_error = False
|
72 |
+
|
73 |
+
if len(hidden_tests_io) > 0:
|
74 |
+
compilation_status = compilation_status and hidden_tests_results["compilation_status"]
|
75 |
+
error_message = hidden_tests_results["error_message"]
|
76 |
+
timeout_error = timeout_error or hidden_tests_results["timeout_error"]
|
77 |
+
|
78 |
+
if len(public_tests_io) > 0:
|
79 |
+
compilation_status = compilation_status and public_tests_results["compilation_status"]
|
80 |
+
error_message = public_tests_results["error_message"]
|
81 |
+
timeout_error = timeout_error or public_tests_results["timeout_error"]
|
82 |
+
|
83 |
+
results_dict = {
|
84 |
+
"compilation_status": compilation_status,
|
85 |
+
"compilation_error_message": error_message,
|
86 |
+
"timeout_error": timeout_error,
|
87 |
+
"hidden_tests_results": hidden_tests_results["results"],
|
88 |
+
"public_tests_results": public_tests_results["results"],
|
89 |
+
}
|
90 |
+
|
91 |
+
return results_dict
|
92 |
+
|
93 |
+
|
94 |
+
def check_correctness(
|
95 |
+
candidate_solution: str,
|
96 |
+
python_stub: str,
|
97 |
+
tests: List[Tuple[List[str], str]],
|
98 |
+
timeout: int = 6000,
|
99 |
+
debug=True,
|
100 |
+
add_extra_imports=False,
|
101 |
+
):
|
102 |
+
compilation_status = True
|
103 |
+
compilation_error = None
|
104 |
+
results = []
|
105 |
+
timeout_occurred = False
|
106 |
+
|
107 |
+
for idx, test in enumerate(tests):
|
108 |
+
inp, out, expl = test
|
109 |
+
result = one_test(
|
110 |
+
candidate_solution, python_stub, inp, out, timeout=timeout, debug=debug, add_extra_imports=add_extra_imports
|
111 |
+
)
|
112 |
+
error_message = result["error_message"]
|
113 |
+
|
114 |
+
if error_message is not None:
|
115 |
+
if "syntaxerror" in error_message.lower():
|
116 |
+
compilation_status = False
|
117 |
+
compilation_error = error_message
|
118 |
+
if "timeout" in error_message.lower():
|
119 |
+
timeout_occurred = True
|
120 |
+
results.append(result)
|
121 |
+
|
122 |
+
if timeout_occurred:
|
123 |
+
break
|
124 |
+
|
125 |
+
if timeout_occurred:
|
126 |
+
return {
|
127 |
+
"compilation_status": True,
|
128 |
+
"timeout_error": True,
|
129 |
+
"error_message": "Timeout error.",
|
130 |
+
"results": results,
|
131 |
+
}
|
132 |
+
|
133 |
+
return {
|
134 |
+
"compilation_status": compilation_status,
|
135 |
+
"timeout_error": False,
|
136 |
+
"error_message": compilation_error,
|
137 |
+
"results": results,
|
138 |
+
}
|
139 |
+
|
140 |
+
|
141 |
+
def one_test(candidate_solution, python_stub, inp, out, timeout=10, debug=False, add_extra_imports=False):
|
142 |
+
python_stub = python_stub.strip()
|
143 |
+
candidate_solution = candidate_solution.strip()
|
144 |
+
|
145 |
+
out = out.replace("null", "None").replace("true", "True").replace("false", "False")
|
146 |
+
|
147 |
+
# reformat the solution and parse class and method name
|
148 |
+
class_def, signature = python_stub.split(" def ")
|
149 |
+
class_name = class_def.split("class ")[1].strip().rstrip(":")
|
150 |
+
func_name, _ = signature.split("(")
|
151 |
+
|
152 |
+
# reformatting the input
|
153 |
+
first_param = r"^\w+\s\=\s"
|
154 |
+
later_params = r",\s\w+\s\=\s"
|
155 |
+
|
156 |
+
inp = re.sub(first_param, "", inp)
|
157 |
+
inp = re.sub(later_params, ", ", inp)
|
158 |
+
|
159 |
+
# we add custom code to invoke the solution
|
160 |
+
before_output = "AFTER THIS COMES OUR OWN GENERATED OUTPUT !@#!@!"
|
161 |
+
after_output = "AFTER THIS COMES OUR VERDICT !@#!@!"
|
162 |
+
|
163 |
+
if add_extra_imports:
|
164 |
+
sol = f"""
|
165 |
+
from collections import *
|
166 |
+
from math import *
|
167 |
+
import math
|
168 |
+
from functools import *
|
169 |
+
from heapq import *
|
170 |
+
import heapq
|
171 |
+
import itertools
|
172 |
+
from itertools import *
|
173 |
+
import bisect
|
174 |
+
from bisect import *
|
175 |
+
"""
|
176 |
+
else:
|
177 |
+
sol = ""
|
178 |
+
|
179 |
+
sol += f"""
|
180 |
+
from typing import List, Tuple, Optional
|
181 |
+
{candidate_solution}
|
182 |
+
sfohsdfdsfjhsdkfjhsdkjfh = {class_name}()
|
183 |
+
res = sfohsdfdsfjhsdkfjhsdkjfh.{func_name}({inp})
|
184 |
+
|
185 |
+
def nested_list_convert(inp):
|
186 |
+
try:
|
187 |
+
try:
|
188 |
+
inp = list(inp)
|
189 |
+
except BaseException as e:
|
190 |
+
return inp
|
191 |
+
out = []
|
192 |
+
for i in inp:
|
193 |
+
out.append(nested_list_convert(i))
|
194 |
+
except BaseException as e:
|
195 |
+
return inp
|
196 |
+
return out
|
197 |
+
|
198 |
+
matching = False
|
199 |
+
matching = matching or res == {out}
|
200 |
+
matching = matching or nested_list_convert(res) == {out}
|
201 |
+
matching = matching or nested_list_convert(res) == nested_list_convert({out})
|
202 |
+
matching = matching or str({out})==str(res).replace("{{","[").replace("(","[").replace("}}","]").replace(")","]")
|
203 |
+
matching = matching or str({out})==str(res).replace("{{","[").replace("(","[").replace("}}","]").replace(")","]")
|
204 |
+
print("res: ", res)
|
205 |
+
print("out: ", {out})
|
206 |
+
print("{before_output}")
|
207 |
+
print(res)
|
208 |
+
print("{after_output}")
|
209 |
+
print(matching)
|
210 |
+
"""
|
211 |
+
|
212 |
+
cmd = "python3"
|
213 |
+
|
214 |
+
proc = Popen([cmd, "-c", sol], stdin=PIPE, stdout=PIPE, stderr=PIPE)
|
215 |
+
|
216 |
+
result_object = {"input": inp, "expected_output": out.strip('"')}
|
217 |
+
|
218 |
+
try:
|
219 |
+
stdout, stderr = proc.communicate("", timeout=timeout)
|
220 |
+
except TimeoutExpired as e:
|
221 |
+
if debug:
|
222 |
+
log.info(f"Timeout error, timeout={timeout}")
|
223 |
+
result_object.update({"status": False, "error_message": "Timeout error.", "generated_output": None})
|
224 |
+
return result_object
|
225 |
+
|
226 |
+
finally:
|
227 |
+
proc.kill()
|
228 |
+
|
229 |
+
stdout = stdout.decode()
|
230 |
+
stderr = stderr.decode().lower()
|
231 |
+
|
232 |
+
if stderr == "":
|
233 |
+
# No compilation or runtime error
|
234 |
+
stderr = None
|
235 |
+
else:
|
236 |
+
# Runtime or compilation error (distinction is made by the presence of "syntaxerror" in the error message)
|
237 |
+
result_object.update(**{"status": False, "error_message": stderr, "generated_output": None})
|
238 |
+
return result_object
|
239 |
+
|
240 |
+
try:
|
241 |
+
generated_output = stdout.split(before_output)[1]
|
242 |
+
generated_output, verdict = generated_output.split(after_output)
|
243 |
+
result_object.update(
|
244 |
+
**{
|
245 |
+
"status": verdict.strip() == "True",
|
246 |
+
"error_message": stderr,
|
247 |
+
"generated_output": generated_output.strip(),
|
248 |
+
}
|
249 |
+
)
|
250 |
+
return result_object
|
251 |
+
except IndexError as e:
|
252 |
+
raise Exception(f"An unexpected error has occurred while parsing the following generated output: {stdout}")
|
253 |
+
# Used in debugging
|
254 |
+
# log.info(e)
|
255 |
+
# result_object.update(
|
256 |
+
# **{"status": False, "error_message": "The output couldn't be parsed", "generated_output": None}
|
257 |
+
# )
|
258 |
+
# return result_object
|