torotoki commited on
Commit
7fbb404
1 Parent(s): cecf105

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +11 -2
README.md CHANGED
@@ -25,8 +25,16 @@ Install the required libraries as follows:
25
 
26
  Execute the following python code:
27
  ```python
28
- tokenizer = AutoTokenizer.from_pretrained("pfnet/plamo-13b-instruct", trust_remote_code=True)
29
- model = AutoModelForCausalLM.from_pretrained("pfnet/plamo-13b-instruct", trust_remote_code=True)
 
 
 
 
 
 
 
 
30
 
31
  def completion(prompt: str, max_new_tokens: int = 128) -> str:
32
  inputs = tokenizer(prompt, return_tensors="pt").to("cuda")
@@ -37,6 +45,7 @@ def completion(prompt: str, max_new_tokens: int = 128) -> str:
37
  max_new_tokens=max_new_tokens,
38
  temperature=1,
39
  top_p=0.95,
 
40
  do_sample=True,
41
  )
42
  return tokenizer.decode(generated_ids[0], skip_special_tokens=True, clean_up_tokenization_spaces=True)
 
25
 
26
  Execute the following python code:
27
  ```python
28
+ tokenizer = AutoTokenizer.from_pretrained(
29
+ "pfnet/plamo-13b-instruct",
30
+ trust_remote_code=True,
31
+ )
32
+ model = AutoModelForCausalLM.from_pretrained(
33
+ "pfnet/plamo-13b-instruct",
34
+ trust_remote_code=True,
35
+ torch_dtype=torch.bfloat16,
36
+ device_map="auto",
37
+ )
38
 
39
  def completion(prompt: str, max_new_tokens: int = 128) -> str:
40
  inputs = tokenizer(prompt, return_tensors="pt").to("cuda")
 
45
  max_new_tokens=max_new_tokens,
46
  temperature=1,
47
  top_p=0.95,
48
+ top_k=50,
49
  do_sample=True,
50
  )
51
  return tokenizer.decode(generated_ids[0], skip_special_tokens=True, clean_up_tokenization_spaces=True)