ozcur commited on
Commit
c9043a4
1 Parent(s): ad47d99

initial commit

Browse files
README.md ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Stanford Alpaca
2
+
3
+ FINETUNED USING THE ORIGINAL REPOSITORY: https://github.com/tatsu-lab/stanford_alpaca
4
+ NO LORA HAS BEEN USED
5
+
6
+ full model 3 epochs og data
7
+
8
+ CONFIGURATION (default):
9
+
10
+ ```shell
11
+ torchrun --nproc_per_node=4 --master_port=3045 train.py \
12
+ --model_name_or_path /workspace/llama-7b-hf \
13
+ --data_path ./alpaca_data.json \
14
+ --bf16 True \
15
+ --output_dir /workspace/output \
16
+ --num_train_epochs 3 \
17
+ --per_device_train_batch_size 4 \
18
+ --per_device_eval_batch_size 4 \
19
+ --gradient_accumulation_steps 8 \
20
+ --evaluation_strategy "no" \
21
+ --save_strategy "steps" \
22
+ --save_steps 200 \
23
+ --save_total_limit 1 \
24
+ --learning_rate 2e-5 \
25
+ --weight_decay 0. \
26
+ --warmup_ratio 0.03 \
27
+ --lr_scheduler_type "cosine" \
28
+ --logging_steps 1 \
29
+ --fsdp "full_shard auto_wrap" \
30
+ --fsdp_transformer_layer_cls_to_wrap 'LLaMADecoderLayer' \
31
+ --tf32 True --report_to="wandb"
32
+ ```
added_tokens.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ {
2
+ "[PAD]": 32000
3
+ }
alpaca7b-4bit.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:37fbb7f91919b08a203944e4450099f5896c486fb97af3f3b125cc64dcd048d9
3
+ size 4521138824
config.json ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "decapoda-research/llama-7b-hf",
3
+ "architectures": [
4
+ "LLaMAForCausalLM"
5
+ ],
6
+ "bos_token_id": 0,
7
+ "eos_token_id": 1,
8
+ "hidden_act": "silu",
9
+ "hidden_size": 4096,
10
+ "initializer_range": 0.02,
11
+ "intermediate_size": 11008,
12
+ "max_sequence_length": 2048,
13
+ "model_type": "llama",
14
+ "num_attention_heads": 32,
15
+ "num_hidden_layers": 32,
16
+ "pad_token_id": -1,
17
+ "rms_norm_eps": 1e-06,
18
+ "tie_word_embeddings": false,
19
+ "torch_dtype": "float32",
20
+ "transformers_version": "4.27.0.dev0",
21
+ "use_cache": true,
22
+ "vocab_size": 32001
23
+ }
generation_config.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "_from_model_config": true,
3
+ "bos_token_id": 0,
4
+ "eos_token_id": 1,
5
+ "pad_token_id": 0,
6
+ "transformers_version": "4.27.0.dev0"
7
+ }
special_tokens_map.json ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": "</s>",
3
+ "eos_token": "</s>",
4
+ "pad_token": "[PAD]",
5
+ "unk_token": "</s>"
6
+ }
tokenizer.model ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9e556afd44213b6bd1be2b850ebbbd98f5481437a8021afaf58ee7fb1818d347
3
+ size 499723
tokenizer_config.json ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": "",
3
+ "eos_token": "",
4
+ "model_max_length": 512,
5
+ "padding_side": "right",
6
+ "special_tokens_map_file": "/home/ubuntu/.cache/huggingface/hub/models--decapoda-research--llama-7b-hf/snapshots/5f98eefcc80e437ef68d457ad7bf167c2c6a1348/special_tokens_map.json",
7
+ "tokenizer_class": "LlamaTokenizer",
8
+ "unk_token": ""
9
+ }
trainer_state.json ADDED
The diff for this file is too large to render. See raw diff
 
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:77e749976892247caff7e8a6860fabeeac4d8b97c8d8a36665746cd35ed5a2bb
3
+ size 3707