Update README.md
Browse files
README.md
CHANGED
@@ -23,7 +23,7 @@ library_name: peft
|
|
23 |
!pip install bitsandbytes
|
24 |
```
|
25 |
|
26 |
-
## Try the model in Google Colab
|
27 |
``` python
|
28 |
from transformers import AutoModel, AutoTokenizer, AutoModelForCausalLM, LlamaForCausalLM, LlamaTokenizerFast
|
29 |
from peft import PeftModel # 0.5.0
|
@@ -66,4 +66,18 @@ for sentiment in out_text:
|
|
66 |
# negative
|
67 |
```
|
68 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
69 |
- PEFT 0.5.0
|
|
|
23 |
!pip install bitsandbytes
|
24 |
```
|
25 |
|
26 |
+
## Inference: Try the model in Google Colab
|
27 |
``` python
|
28 |
from transformers import AutoModel, AutoTokenizer, AutoModelForCausalLM, LlamaForCausalLM, LlamaTokenizerFast
|
29 |
from peft import PeftModel # 0.5.0
|
|
|
66 |
# negative
|
67 |
```
|
68 |
|
69 |
+
## Training Script: [Our Code](https://github.com/AI4Finance-Foundation/FinGPT/tree/master/fingpt/FinGPT_Benchmark)
|
70 |
+
```
|
71 |
+
#llama2-13b-nr
|
72 |
+
deepspeed -i "localhost:2" train_lora.py \
|
73 |
+
--run_name sentiment-llama2-13b-20epoch-64batch \
|
74 |
+
--base_model llama2-13b-nr \
|
75 |
+
--dataset sentiment-train \
|
76 |
+
--max_length 512 \
|
77 |
+
--batch_size 64 \
|
78 |
+
--learning_rate 1e-4 \
|
79 |
+
--num_epochs 20 \
|
80 |
+
--from_remote True \
|
81 |
+
>train.log 2>&1 &
|
82 |
+
```
|
83 |
- PEFT 0.5.0
|