Update README.md
Browse files
README.md
CHANGED
@@ -29,7 +29,7 @@ model = GPT2LMHeadModel.from_pretrained("skytnt/gpt2-japanese-lyric-small")
|
|
29 |
|
30 |
|
31 |
def gen_lyric(prompt_text: str):
|
32 |
-
prompt_text = prompt_text.replace("\n", "
|
33 |
prompt_tokens = tokenizer.tokenize(prompt_text)
|
34 |
prompt_token_ids = tokenizer.convert_tokens_to_ids(prompt_tokens)
|
35 |
prompt_tensor = torch.LongTensor(prompt_token_ids)
|
@@ -53,7 +53,7 @@ def gen_lyric(prompt_text: str):
|
|
53 |
generated_sequence = output_sequences.tolist()[0]
|
54 |
generated_tokens = tokenizer.convert_ids_to_tokens(generated_sequence)
|
55 |
generated_text = tokenizer.convert_tokens_to_string(generated_tokens)
|
56 |
-
generated_text = "\n".join([s.strip() for s in generated_text.split('
|
57 |
'</s>', '\n\n---end---')
|
58 |
return generated_text
|
59 |
|
|
|
29 |
|
30 |
|
31 |
def gen_lyric(prompt_text: str):
|
32 |
+
prompt_text = prompt_text.replace("\n", "\\n")
|
33 |
prompt_tokens = tokenizer.tokenize(prompt_text)
|
34 |
prompt_token_ids = tokenizer.convert_tokens_to_ids(prompt_tokens)
|
35 |
prompt_tensor = torch.LongTensor(prompt_token_ids)
|
|
|
53 |
generated_sequence = output_sequences.tolist()[0]
|
54 |
generated_tokens = tokenizer.convert_ids_to_tokens(generated_sequence)
|
55 |
generated_text = tokenizer.convert_tokens_to_string(generated_tokens)
|
56 |
+
generated_text = "\n".join([s.strip() for s in generated_text.split('\\n')]).replace(' ', '\u3000').replace(
|
57 |
'</s>', '\n\n---end---')
|
58 |
return generated_text
|
59 |
|