vj1148 commited on
Commit
f9010e3
1 Parent(s): 4291035

End of training

Browse files
Files changed (1) hide show
  1. README.md +5 -5
README.md CHANGED
@@ -2,6 +2,8 @@
2
  license: llama2
3
  base_model: codellama/CodeLlama-7b-hf
4
  tags:
 
 
5
  - generated_from_trainer
6
  model-index:
7
  - name: codellama2-finetuned-codex
@@ -36,8 +38,6 @@ The following hyperparameters were used during training:
36
  - train_batch_size: 1
37
  - eval_batch_size: 8
38
  - seed: 42
39
- - gradient_accumulation_steps: 4
40
- - total_train_batch_size: 4
41
  - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
42
  - lr_scheduler_type: cosine
43
  - training_steps: 100
@@ -50,6 +50,6 @@ The following hyperparameters were used during training:
50
  ### Framework versions
51
 
52
  - Transformers 4.35.2
53
- - Pytorch 2.1.0+cu118
54
- - Datasets 2.15.0
55
- - Tokenizers 0.15.0
 
2
  license: llama2
3
  base_model: codellama/CodeLlama-7b-hf
4
  tags:
5
+ - trl
6
+ - sft
7
  - generated_from_trainer
8
  model-index:
9
  - name: codellama2-finetuned-codex
 
38
  - train_batch_size: 1
39
  - eval_batch_size: 8
40
  - seed: 42
 
 
41
  - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
42
  - lr_scheduler_type: cosine
43
  - training_steps: 100
 
50
  ### Framework versions
51
 
52
  - Transformers 4.35.2
53
+ - Pytorch 2.1.0+cu121
54
+ - Datasets 2.16.1
55
+ - Tokenizers 0.15.1