sohojoe commited on
Commit
eb6999f
β€’
1 Parent(s): 90a9891

move pretty printing to response_state_manager

Browse files
Files changed (2) hide show
  1. charles_app.py +7 -18
  2. response_state_manager.py +17 -1
charles_app.py CHANGED
@@ -89,7 +89,6 @@ class CharlesApp:
89
 
90
  process_speech_to_text_future = []
91
  human_preview_text = ""
92
- robot_preview_text = ""
93
  additional_prompt = None
94
  previous_prompt = ""
95
  is_talking = False
@@ -128,14 +127,10 @@ class CharlesApp:
128
 
129
  if speaker_finished and len(prompt) > 0 and prompt not in prompts_to_ignore:
130
  print(f"Prompt: {prompt}")
131
- line = ""
132
- for i, response in enumerate(response_state.current_responses):
133
- line += "πŸ€– " if len(line) == 0 else ""
134
- line += f"[{response_state.speech_chunks_per_response[i]}] {response} \n"
135
- if len(line) > 0:
136
- await add_debug_output(line)
137
  human_preview_text = ""
138
- robot_preview_text = ""
139
  if additional_prompt is not None:
140
  prompt = additional_prompt + ". " + prompt
141
  await add_debug_output(f"πŸ‘¨ {prompt}")
@@ -167,20 +162,14 @@ class CharlesApp:
167
  human_preview_text = f"πŸ‘¨β“ {prompt}"
168
 
169
  for new_response in response_step_obs.llm_responses:
170
- # add_debug_output(f"πŸ€– {new_response}")
171
  self._prompt_manager.append_assistant_message(new_response)
172
- robot_preview_text = ""
173
- if len(response_step_obs.llm_preview):
174
- robot_preview_text = f"πŸ€–β“ {response_step_obs.llm_preview}"
175
 
176
  list_of_strings = debug_output_history.copy()
177
- line = ""
178
- for i, response in enumerate(response_state.current_responses):
179
- line += "πŸ€– " if len(line) == 0 else ""
180
- line += f"[{response_state.speech_chunks_per_response[i]}] {response} \n"
181
  if len(robot_preview_text) > 0:
182
- line += robot_preview_text+" \n"
183
- list_of_strings.append(line)
184
  if len(human_preview_text) > 0:
185
  list_of_strings.append(human_preview_text)
186
  if len(list_of_strings) > 10:
 
89
 
90
  process_speech_to_text_future = []
91
  human_preview_text = ""
 
92
  additional_prompt = None
93
  previous_prompt = ""
94
  is_talking = False
 
127
 
128
  if speaker_finished and len(prompt) > 0 and prompt not in prompts_to_ignore:
129
  print(f"Prompt: {prompt}")
130
+ response_preview_text = self._response_state_manager.pretty_print_current_responses()
131
+ if len(response_preview_text) > 0:
132
+ await add_debug_output(response_preview_text)
 
 
 
133
  human_preview_text = ""
 
134
  if additional_prompt is not None:
135
  prompt = additional_prompt + ". " + prompt
136
  await add_debug_output(f"πŸ‘¨ {prompt}")
 
162
  human_preview_text = f"πŸ‘¨β“ {prompt}"
163
 
164
  for new_response in response_step_obs.llm_responses:
 
165
  self._prompt_manager.append_assistant_message(new_response)
 
 
 
166
 
167
  list_of_strings = debug_output_history.copy()
168
+ robot_preview_text = self._response_state_manager.pretty_print_preview_text()
169
+ response_preview_text = self._response_state_manager.pretty_print_current_responses()
 
 
170
  if len(robot_preview_text) > 0:
171
+ response_preview_text += robot_preview_text+" \n"
172
+ list_of_strings.append(response_preview_text)
173
  if len(human_preview_text) > 0:
174
  list_of_strings.append(human_preview_text)
175
  if len(list_of_strings) > 10:
response_state_manager.py CHANGED
@@ -20,6 +20,7 @@ class ResponseState:
20
  self.step = step
21
  self.current_responses = []
22
  self.speech_chunks_per_response = []
 
23
  self.is_speaking = False
24
 
25
  def __str__(self):
@@ -50,13 +51,28 @@ class ResponseStateManager:
50
 
51
  def set_llm_preview(self, llm_preview):
52
  self.response_step_obs.llm_preview = llm_preview
 
53
 
54
  def add_llm_response_and_clear_llm_preview(self, llm_response):
55
  self.response_state.current_responses.append(llm_response)
56
  self.response_state.speech_chunks_per_response.append(0)
57
  self.response_step_obs.llm_responses.append(llm_response)
58
  self.response_step_obs.llm_preview = ''
 
59
 
60
  def add_tts_raw_chunk_id(self, chunk_id, llm_sentence_id):
61
  self.response_state.speech_chunks_per_response[llm_sentence_id] += 1
62
- self.response_step_obs.tts_raw_chunk_ids.append(chunk_id)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
20
  self.step = step
21
  self.current_responses = []
22
  self.speech_chunks_per_response = []
23
+ self.llm_preview = ''
24
  self.is_speaking = False
25
 
26
  def __str__(self):
 
51
 
52
  def set_llm_preview(self, llm_preview):
53
  self.response_step_obs.llm_preview = llm_preview
54
+ self.response_state.llm_preview = llm_preview
55
 
56
  def add_llm_response_and_clear_llm_preview(self, llm_response):
57
  self.response_state.current_responses.append(llm_response)
58
  self.response_state.speech_chunks_per_response.append(0)
59
  self.response_step_obs.llm_responses.append(llm_response)
60
  self.response_step_obs.llm_preview = ''
61
+ self.response_state.llm_preview = ''
62
 
63
  def add_tts_raw_chunk_id(self, chunk_id, llm_sentence_id):
64
  self.response_state.speech_chunks_per_response[llm_sentence_id] += 1
65
+ self.response_step_obs.tts_raw_chunk_ids.append(chunk_id)
66
+
67
+ def pretty_print_current_responses(self)->str:
68
+ line = ""
69
+ for i, response in enumerate(self.response_state.current_responses):
70
+ line += "πŸ€– " if len(line) == 0 else ""
71
+ line += f"[{self.response_state.speech_chunks_per_response[i]}] {response} \n"
72
+ return line
73
+
74
+ def pretty_print_preview_text(self)->str:
75
+ robot_preview_text = ""
76
+ if len(self.response_state.llm_preview):
77
+ robot_preview_text = f"πŸ€–β“ {self.response_state.llm_preview}"
78
+ return robot_preview_text