QianqianXie1994 commited on
Commit
be975df
1 Parent(s): d27dcfa

Upload 7 files

Browse files
config.json CHANGED
@@ -1,10 +1,8 @@
1
  {
2
- "_name_or_path": "/workspace/LLMindCraft/saved_models/finetuned_mix_pmcllama_new",
3
  "architectures": [
4
  "LlamaForCausalLM"
5
  ],
6
- "attention_bias": false,
7
- "attention_dropout": 0.0,
8
  "bos_token_id": 1,
9
  "eos_token_id": 2,
10
  "hidden_act": "silu",
@@ -19,10 +17,9 @@
19
  "pretraining_tp": 1,
20
  "rms_norm_eps": 1e-05,
21
  "rope_scaling": null,
22
- "rope_theta": 10000.0,
23
  "tie_word_embeddings": false,
24
- "torch_dtype": "float32",
25
- "transformers_version": "4.36.2",
26
  "use_cache": true,
27
  "vocab_size": 32000
28
  }
 
1
  {
2
+ "_name_or_path": "meta-llama/Llama-2-70b-hf",
3
  "architectures": [
4
  "LlamaForCausalLM"
5
  ],
 
 
6
  "bos_token_id": 1,
7
  "eos_token_id": 2,
8
  "hidden_act": "silu",
 
17
  "pretraining_tp": 1,
18
  "rms_norm_eps": 1e-05,
19
  "rope_scaling": null,
 
20
  "tie_word_embeddings": false,
21
+ "torch_dtype": "float16",
22
+ "transformers_version": "4.32.0.dev0",
23
  "use_cache": true,
24
  "vocab_size": 32000
25
  }
generation_config.json CHANGED
@@ -2,9 +2,9 @@
2
  "bos_token_id": 1,
3
  "do_sample": true,
4
  "eos_token_id": 2,
5
- "max_length": 4096,
6
  "pad_token_id": 0,
 
7
  "temperature": 0.6,
8
  "top_p": 0.9,
9
- "transformers_version": "4.36.2"
10
  }
 
2
  "bos_token_id": 1,
3
  "do_sample": true,
4
  "eos_token_id": 2,
 
5
  "pad_token_id": 0,
6
+ "max_length": 4096,
7
  "temperature": 0.6,
8
  "top_p": 0.9,
9
+ "transformers_version": "4.32.0.dev0"
10
  }
model.safetensors.index.json CHANGED
The diff for this file is too large to render. See raw diff
 
special_tokens_map.json ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": {
3
+ "content": "<s>",
4
+ "lstrip": false,
5
+ "normalized": false,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
+ "eos_token": {
10
+ "content": "</s>",
11
+ "lstrip": false,
12
+ "normalized": false,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ },
16
+ "unk_token": {
17
+ "content": "<unk>",
18
+ "lstrip": false,
19
+ "normalized": false,
20
+ "rstrip": false,
21
+ "single_word": false
22
+ }
23
+ }
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer.model ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9e556afd44213b6bd1be2b850ebbbd98f5481437a8021afaf58ee7fb1818d347
3
+ size 499723
tokenizer_config.json ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_bos_token": true,
3
+ "add_eos_token": false,
4
+ "bos_token": {
5
+ "__type": "AddedToken",
6
+ "content": "<s>",
7
+ "lstrip": false,
8
+ "normalized": false,
9
+ "rstrip": false,
10
+ "single_word": false
11
+ },
12
+ "clean_up_tokenization_spaces": false,
13
+ "eos_token": {
14
+ "__type": "AddedToken",
15
+ "content": "</s>",
16
+ "lstrip": false,
17
+ "normalized": false,
18
+ "rstrip": false,
19
+ "single_word": false
20
+ },
21
+ "legacy": false,
22
+ "model_max_length": 1000000000000000019884624838656,
23
+ "pad_token": null,
24
+ "padding_side": "right",
25
+ "sp_model_kwargs": {},
26
+ "tokenizer_class": "LlamaTokenizer",
27
+ "unk_token": {
28
+ "__type": "AddedToken",
29
+ "content": "<unk>",
30
+ "lstrip": false,
31
+ "normalized": false,
32
+ "rstrip": false,
33
+ "single_word": false
34
+ }
35
+ }