jsulz HF staff commited on
Commit
48a9e00
1 Parent(s): eecbbae

little cleanup and lowering the model temp

Browse files
Files changed (1) hide show
  1. app.py +2 -26
app.py CHANGED
@@ -140,28 +140,6 @@ def plt_wordcloud(president, _df):
140
  return fig6
141
 
142
 
143
- def summarization(speech_key, _df):
144
- client = InferenceClient(model="facebook/bart-large-cnn")
145
- chunk_len = 4000
146
- speech = _df[_df["speech_key"] == speech_key]["speech_html"].values[0]
147
- sotu_chunks = int(math.ceil(len(speech) / chunk_len))
148
- response = []
149
- for chunk in range(1, sotu_chunks + 1):
150
- if chunk * 4000 < len(speech):
151
- chunk_text = speech[(chunk - 1) * chunk_len : chunk * chunk_len]
152
- else:
153
- chunk_text = speech[(chunk - 1) * chunk_len :]
154
- try:
155
- summarization_chunk = client.summarization(
156
- chunk_text, parameters={"truncation": "do_not_truncate"}
157
- )
158
- except Exception as e:
159
- print(e)
160
- response.append(summarization_chunk.summary_text)
161
-
162
- return "\n\n".join(response)
163
-
164
-
165
  def streaming(speech_key, _df):
166
  client = InferenceClient(token=os.environ["HF_TOKEN"])
167
  speech = _df[_df["speech_key"] == speech_key]["speech_html"].values[0]
@@ -181,10 +159,8 @@ def streaming(speech_key, _df):
181
  ],
182
  max_tokens=1200,
183
  stream=True,
184
- temperature=0.5,
185
  ):
186
- # yield message.choices[0].delta.content
187
- # print(message)
188
  messages.append(message.choices[0].delta.content)
189
  return "".join(messages)
190
 
@@ -203,7 +179,7 @@ with gr.Blocks() as demo:
203
  )
204
 
205
  gr.Markdown(
206
- "In addition to analyzing the content, this space also leverages the [Qwen/2.5-72B-Instruct](https://deepinfra.com/Qwen/Qwen2.5-72B-Instruct) model to summarize a speech. The model is tasked with providing a concise summary of a speech from a given president. To get a summary, go to the 'Summarize a Speech' tab."
207
  )
208
 
209
  with gr.Tab(label="Summarize a Speech"):
 
140
  return fig6
141
 
142
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
143
  def streaming(speech_key, _df):
144
  client = InferenceClient(token=os.environ["HF_TOKEN"])
145
  speech = _df[_df["speech_key"] == speech_key]["speech_html"].values[0]
 
159
  ],
160
  max_tokens=1200,
161
  stream=True,
162
+ temperature=0.25,
163
  ):
 
 
164
  messages.append(message.choices[0].delta.content)
165
  return "".join(messages)
166
 
 
179
  )
180
 
181
  gr.Markdown(
182
+ "In addition to analyzing the content, this space also leverages the [Qwen/2.5-72B-Instruct](https://deepinfra.com/Qwen/Qwen2.5-72B-Instruct) model to summarize a speech. The model is tasked with providing a concise summary of a speech from a given president. Pick a speech from the dropdown and click 'Summarize' on the 'Summarize a Speech' tab."
183
  )
184
 
185
  with gr.Tab(label="Summarize a Speech"):