Spaces:
Build error
Build error
asd
Browse files
app.py
CHANGED
@@ -577,23 +577,33 @@ def generate_paraphrases(text, setting, output_format):
|
|
577 |
|
578 |
if setting == 1:
|
579 |
num_return_sequences = 3
|
580 |
-
|
|
|
|
|
581 |
max_length = 128
|
582 |
elif setting == 2:
|
583 |
num_return_sequences = 3
|
584 |
-
|
|
|
|
|
585 |
max_length = 192
|
586 |
elif setting == 3:
|
587 |
num_return_sequences = 3
|
588 |
-
|
|
|
|
|
589 |
max_length = 256
|
590 |
elif setting == 4:
|
591 |
num_return_sequences = 3
|
592 |
-
|
|
|
|
|
593 |
max_length = 320
|
594 |
else:
|
595 |
num_return_sequences = 3
|
596 |
-
|
|
|
|
|
597 |
max_length = 384
|
598 |
|
599 |
formatted_output = "Original text:\n" + text + "\n\n"
|
@@ -616,9 +626,11 @@ def generate_paraphrases(text, setting, output_format):
|
|
616 |
input_ids=input_ids,
|
617 |
attention_mask=attention_mask,
|
618 |
max_length=max_length,
|
619 |
-
|
620 |
-
|
621 |
-
|
|
|
|
|
622 |
)
|
623 |
|
624 |
paraphrases_texts = [paraphraser_tokenizer.decode(beam_output, skip_special_tokens=True, clean_up_tokenization_spaces=True) for beam_output in beam_outputs]
|
|
|
577 |
|
578 |
if setting == 1:
|
579 |
num_return_sequences = 3
|
580 |
+
temperature = 0.7
|
581 |
+
top_k = 50
|
582 |
+
top_p = 0.9
|
583 |
max_length = 128
|
584 |
elif setting == 2:
|
585 |
num_return_sequences = 3
|
586 |
+
temperature = 0.8
|
587 |
+
top_k = 50
|
588 |
+
top_p = 0.9
|
589 |
max_length = 192
|
590 |
elif setting == 3:
|
591 |
num_return_sequences = 3
|
592 |
+
temperature = 0.9
|
593 |
+
top_k = 50
|
594 |
+
top_p = 0.9
|
595 |
max_length = 256
|
596 |
elif setting == 4:
|
597 |
num_return_sequences = 3
|
598 |
+
temperature = 1.0
|
599 |
+
top_k = 50
|
600 |
+
top_p = 0.9
|
601 |
max_length = 320
|
602 |
else:
|
603 |
num_return_sequences = 3
|
604 |
+
temperature = 1.1
|
605 |
+
top_k = 50
|
606 |
+
top_p = 0.9
|
607 |
max_length = 384
|
608 |
|
609 |
formatted_output = "Original text:\n" + text + "\n\n"
|
|
|
626 |
input_ids=input_ids,
|
627 |
attention_mask=attention_mask,
|
628 |
max_length=max_length,
|
629 |
+
num_return_sequences=num_return_sequences,
|
630 |
+
do_sample=True,
|
631 |
+
top_k=top_k,
|
632 |
+
top_p=top_p,
|
633 |
+
temperature=temperature
|
634 |
)
|
635 |
|
636 |
paraphrases_texts = [paraphraser_tokenizer.decode(beam_output, skip_special_tokens=True, clean_up_tokenization_spaces=True) for beam_output in beam_outputs]
|