Балаганский Никита Николаевич commited on
Commit
c07b0b4
1 Parent(s): 82298a9

add description to plots

Browse files
Files changed (2) hide show
  1. app.py +6 -0
  2. generator.py +2 -0
app.py CHANGED
@@ -127,6 +127,7 @@ def main():
127
  act_type = "softmax"
128
  st.write(WARNING_TEXT[language])
129
  show_pos_alpha = st.checkbox("Show positive alphas", value=False)
 
130
  st.latex(r"p(x_i|x_{<i}, c) \propto p(x_i|x_{<i})p(c|x_{\leq i})^{\alpha}")
131
  if act_type == "softmax":
132
  alpha = st.slider("α", min_value=-40, max_value=40 if show_pos_alpha else 0, step=1, value=0)
@@ -151,6 +152,10 @@ def main():
151
  figure = go.Figure(data=[scatter, scatter_tip, scatter_tip_lines], layout=layout)
152
  figure.update_layout(paper_bgcolor="#FFFFFF", plot_bgcolor='#FFFFFF', showlegend=False)
153
  st.plotly_chart(figure, use_container_width=True)
 
 
 
 
154
  auth_token = os.environ.get('TOKEN') or True
155
  fp16 = st.checkbox("FP16", value=True)
156
  st.session_state["generated_text"] = None
@@ -245,6 +250,7 @@ def inference(
245
  **kwargs
246
  )
247
  print(f"Output for prompt: {sequences}")
 
248
  return sequences[0]
249
 
250
 
 
127
  act_type = "softmax"
128
  st.write(WARNING_TEXT[language])
129
  show_pos_alpha = st.checkbox("Show positive alphas", value=False)
130
+ st.markdown("""In our method, we reweight the probability of the next token with the external classifier, namely, the Attribute model. If $\alpha$ parameter is equal to zero we can see that the distribution below collapses into a simple language model without any modification. If alpha is below zero then every generation step attribute model tries to minimize the probability of the desired attribute. Otherwise, the model is forced to produce text with a higher probability of the attribute.""")
131
  st.latex(r"p(x_i|x_{<i}, c) \propto p(x_i|x_{<i})p(c|x_{\leq i})^{\alpha}")
132
  if act_type == "softmax":
133
  alpha = st.slider("α", min_value=-40, max_value=40 if show_pos_alpha else 0, step=1, value=0)
 
152
  figure = go.Figure(data=[scatter, scatter_tip, scatter_tip_lines], layout=layout)
153
  figure.update_layout(paper_bgcolor="#FFFFFF", plot_bgcolor='#FFFFFF', showlegend=False)
154
  st.plotly_chart(figure, use_container_width=True)
155
+ with st.expander("What is it?"):
156
+ st.write("""
157
+ Text generation with an external classifier requires a huge amount of computation. Therefore text generating with CAIF could be slow. To overcome this issue, we can apply reweighting not for every step. Our hypothesis is that we can run reweighting only if entropy of the next token is above certain threshold. This strategy will reduce the amont of computation. Note that if entropy threshold is too high, we don't get desired attribute in generated text
158
+ """)
159
  auth_token = os.environ.get('TOKEN') or True
160
  fp16 = st.checkbox("FP16", value=True)
161
  st.session_state["generated_text"] = None
 
250
  **kwargs
251
  )
252
  print(f"Output for prompt: {sequences}")
253
+
254
  return sequences[0]
255
 
256
 
generator.py CHANGED
@@ -110,6 +110,8 @@ class Generator:
110
  })
111
  fig = go.Figure([scatter_data], layout=layout)
112
  plot.plotly_chart(fig, use_container_width=True)
 
 
113
  text.text(current_decoded)
114
 
115
  return (
 
110
  })
111
  fig = go.Figure([scatter_data], layout=layout)
112
  plot.plotly_chart(fig, use_container_width=True)
113
+ with st.expander("What is it?"):
114
+ st.write("You can see how the probability of the desired attribute varies for every generation step.")
115
  text.text(current_decoded)
116
 
117
  return (