unilm commited on
Commit
ef01237
1 Parent(s): 579986a

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +1 -1
app.py CHANGED
@@ -17,7 +17,7 @@ def generate(plain_text):
17
  # Just use 1 beam and get 1 output, this is much, much, much faster than 8 beams and 8 outputs and we're only using the first.
18
  outputs = prompter_model.generate(input_ids, do_sample=False, max_new_tokens=75, eos_token_id=eos_id, pad_token_id=eos_id, length_penalty=-1.0)
19
  # Use [input_ids.shape[-1]:] because the decoded tokenised version of plain_text may have a different number of characters to the original
20
- res = tokenizer.decode(outputs[0][input_ids.shape[-1]:])
21
  return res
22
 
23
  txt = grad.Textbox(lines=1, label="Initial Text", placeholder="Input Prompt")
 
17
  # Just use 1 beam and get 1 output, this is much, much, much faster than 8 beams and 8 outputs and we're only using the first.
18
  outputs = prompter_model.generate(input_ids, do_sample=False, max_new_tokens=75, eos_token_id=eos_id, pad_token_id=eos_id, length_penalty=-1.0)
19
  # Use [input_ids.shape[-1]:] because the decoded tokenised version of plain_text may have a different number of characters to the original
20
+ res = prompter_tokenizer.decode(outputs[0][input_ids.shape[-1]:], skip_special_tokens=True)
21
  return res
22
 
23
  txt = grad.Textbox(lines=1, label="Initial Text", placeholder="Input Prompt")