skytnt commited on
Commit
d316eff
1 Parent(s): b4f04e8

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +2 -2
README.md CHANGED
@@ -29,7 +29,7 @@ model = GPT2LMHeadModel.from_pretrained("skytnt/gpt2-japanese-lyric-small")
29
 
30
 
31
  def gen_lyric(prompt_text: str):
32
- prompt_text = prompt_text.replace("\n", "[SEP]")
33
  prompt_tokens = tokenizer.tokenize(prompt_text)
34
  prompt_token_ids = tokenizer.convert_tokens_to_ids(prompt_tokens)
35
  prompt_tensor = torch.LongTensor(prompt_token_ids)
@@ -53,7 +53,7 @@ def gen_lyric(prompt_text: str):
53
  generated_sequence = output_sequences.tolist()[0]
54
  generated_tokens = tokenizer.convert_ids_to_tokens(generated_sequence)
55
  generated_text = tokenizer.convert_tokens_to_string(generated_tokens)
56
- generated_text = "\n".join([s.strip() for s in generated_text.split('[SEP]')]).replace(' ', '\u3000').replace(
57
  '</s>', '\n\n---end---')
58
  return generated_text
59
 
 
29
 
30
 
31
  def gen_lyric(prompt_text: str):
32
+ prompt_text = prompt_text.replace("\n", "\\n")
33
  prompt_tokens = tokenizer.tokenize(prompt_text)
34
  prompt_token_ids = tokenizer.convert_tokens_to_ids(prompt_tokens)
35
  prompt_tensor = torch.LongTensor(prompt_token_ids)
 
53
  generated_sequence = output_sequences.tolist()[0]
54
  generated_tokens = tokenizer.convert_ids_to_tokens(generated_sequence)
55
  generated_text = tokenizer.convert_tokens_to_string(generated_tokens)
56
+ generated_text = "\n".join([s.strip() for s in generated_text.split('\\n')]).replace(' ', '\u3000').replace(
57
  '</s>', '\n\n---end---')
58
  return generated_text
59