Bearnardd commited on
Commit
a633eaa
1 Parent(s): 500d30b

highest reward model

Browse files
Files changed (2) hide show
  1. README.md +3 -3
  2. pytorch_model.bin +1 -1
README.md CHANGED
@@ -24,7 +24,7 @@ You can then generate text as follows:
24
  ```python
25
  from transformers import pipeline
26
 
27
- generator = pipeline("text-generation", model="Bearnardd//tmp/tmpqxnceiox/Bearnardd/gpt2-imdb")
28
  outputs = generator("Hello, my llama is cute")
29
  ```
30
 
@@ -34,8 +34,8 @@ If you want to use the model for training or to obtain the outputs from the valu
34
  from transformers import AutoTokenizer
35
  from trl import AutoModelForCausalLMWithValueHead
36
 
37
- tokenizer = AutoTokenizer.from_pretrained("Bearnardd//tmp/tmpqxnceiox/Bearnardd/gpt2-imdb")
38
- model = AutoModelForCausalLMWithValueHead.from_pretrained("Bearnardd//tmp/tmpqxnceiox/Bearnardd/gpt2-imdb")
39
 
40
  inputs = tokenizer("Hello, my llama is cute", return_tensors="pt")
41
  outputs = model(**inputs, labels=inputs["input_ids"])
 
24
  ```python
25
  from transformers import pipeline
26
 
27
+ generator = pipeline("text-generation", model="Bearnardd//tmp/tmpj7z4rtn6/Bearnardd/gpt2-imdb")
28
  outputs = generator("Hello, my llama is cute")
29
  ```
30
 
 
34
  from transformers import AutoTokenizer
35
  from trl import AutoModelForCausalLMWithValueHead
36
 
37
+ tokenizer = AutoTokenizer.from_pretrained("Bearnardd//tmp/tmpj7z4rtn6/Bearnardd/gpt2-imdb")
38
+ model = AutoModelForCausalLMWithValueHead.from_pretrained("Bearnardd//tmp/tmpj7z4rtn6/Bearnardd/gpt2-imdb")
39
 
40
  inputs = tokenizer("Hello, my llama is cute", return_tensors="pt")
41
  outputs = model(**inputs, labels=inputs["input_ids"])
pytorch_model.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:eccca47a9d62a8cc21c4e232d3fa6e2ea104e10cf213af76f96b101ead0c77f0
3
  size 510399237
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ed3a08a2388ff8fe19cba8821cc5d48565198a319e8637bb0054a690b7a8812e
3
  size 510399237