NewBreaker commited on
Commit
8f59db6
1 Parent(s): ee7aadd

add questions all

Browse files
.gitattributes CHANGED
@@ -1,34 +1,10 @@
1
- *.7z filter=lfs diff=lfs merge=lfs -text
2
- *.arrow filter=lfs diff=lfs merge=lfs -text
3
  *.bin filter=lfs diff=lfs merge=lfs -text
4
- *.bz2 filter=lfs diff=lfs merge=lfs -text
5
- *.ckpt filter=lfs diff=lfs merge=lfs -text
6
- *.ftz filter=lfs diff=lfs merge=lfs -text
7
- *.gz filter=lfs diff=lfs merge=lfs -text
8
  *.h5 filter=lfs diff=lfs merge=lfs -text
9
- *.joblib filter=lfs diff=lfs merge=lfs -text
10
- *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
- *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
- *.model filter=lfs diff=lfs merge=lfs -text
13
- *.msgpack filter=lfs diff=lfs merge=lfs -text
14
- *.npy filter=lfs diff=lfs merge=lfs -text
15
- *.npz filter=lfs diff=lfs merge=lfs -text
16
- *.onnx filter=lfs diff=lfs merge=lfs -text
17
- *.ot filter=lfs diff=lfs merge=lfs -text
18
- *.parquet filter=lfs diff=lfs merge=lfs -text
19
- *.pb filter=lfs diff=lfs merge=lfs -text
20
- *.pickle filter=lfs diff=lfs merge=lfs -text
21
- *.pkl filter=lfs diff=lfs merge=lfs -text
22
- *.pt filter=lfs diff=lfs merge=lfs -text
23
- *.pth filter=lfs diff=lfs merge=lfs -text
24
- *.rar filter=lfs diff=lfs merge=lfs -text
25
- *.safetensors filter=lfs diff=lfs merge=lfs -text
26
- saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
- *.tar.* filter=lfs diff=lfs merge=lfs -text
28
  *.tflite filter=lfs diff=lfs merge=lfs -text
29
- *.tgz filter=lfs diff=lfs merge=lfs -text
30
- *.wasm filter=lfs diff=lfs merge=lfs -text
31
- *.xz filter=lfs diff=lfs merge=lfs -text
32
- *.zip filter=lfs diff=lfs merge=lfs -text
33
- *.zst filter=lfs diff=lfs merge=lfs -text
34
- *tfevents* filter=lfs diff=lfs merge=lfs -text
 
1
+ *.bin.* filter=lfs diff=lfs merge=lfs -text
2
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
3
  *.bin filter=lfs diff=lfs merge=lfs -text
 
 
 
 
4
  *.h5 filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5
  *.tflite filter=lfs diff=lfs merge=lfs -text
6
+ *.tar.gz filter=lfs diff=lfs merge=lfs -text
7
+ *.ot filter=lfs diff=lfs merge=lfs -text
8
+ *.onnx filter=lfs diff=lfs merge=lfs -text
9
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
10
+ model.safetensors filter=lfs diff=lfs merge=lfs -text
 
ChatGLM-6b-int4(origin)/.gitattributes ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
+ *.model filter=lfs diff=lfs merge=lfs -text
13
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
14
+ *.npy filter=lfs diff=lfs merge=lfs -text
15
+ *.npz filter=lfs diff=lfs merge=lfs -text
16
+ *.onnx filter=lfs diff=lfs merge=lfs -text
17
+ *.ot filter=lfs diff=lfs merge=lfs -text
18
+ *.parquet filter=lfs diff=lfs merge=lfs -text
19
+ *.pb filter=lfs diff=lfs merge=lfs -text
20
+ *.pickle filter=lfs diff=lfs merge=lfs -text
21
+ *.pkl filter=lfs diff=lfs merge=lfs -text
22
+ *.pt filter=lfs diff=lfs merge=lfs -text
23
+ *.pth filter=lfs diff=lfs merge=lfs -text
24
+ *.rar filter=lfs diff=lfs merge=lfs -text
25
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
26
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
28
+ *.tflite filter=lfs diff=lfs merge=lfs -text
29
+ *.tgz filter=lfs diff=lfs merge=lfs -text
30
+ *.wasm filter=lfs diff=lfs merge=lfs -text
31
+ *.xz filter=lfs diff=lfs merge=lfs -text
32
+ *.zip filter=lfs diff=lfs merge=lfs -text
33
+ *.zst filter=lfs diff=lfs merge=lfs -text
34
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
LICENSE → ChatGLM-6b-int4(origin)/LICENSE RENAMED
File without changes
MODEL_LICENSE → ChatGLM-6b-int4(origin)/MODEL_LICENSE RENAMED
File without changes
ChatGLM-6b-int4(origin)/README.md ADDED
@@ -0,0 +1,111 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ language: en
3
+ license: cc-by-4.0
4
+ datasets:
5
+ - squad_v2
6
+ model-index:
7
+ - name: deepset/roberta-base-squad2
8
+ results:
9
+ - task:
10
+ type: question-answering
11
+ name: Question Answering
12
+ dataset:
13
+ name: squad_v2
14
+ type: squad_v2
15
+ config: squad_v2
16
+ split: validation
17
+ metrics:
18
+ - type: exact_match
19
+ value: 79.9309
20
+ name: Exact Match
21
+ verified: true
22
+ verifyToken: eyJhbGciOiJFZERTQSIsInR5cCI6IkpXVCJ9.eyJoYXNoIjoiMDhhNjg5YzNiZGQ1YTIyYTAwZGUwOWEzZTRiYzdjM2QzYjA3ZTUxNDM1NjE1MTUyMjE1MGY1YzEzMjRjYzVjYiIsInZlcnNpb24iOjF9.EH5JJo8EEFwU7osPz3s7qanw_tigeCFhCXjSfyN0Y1nWVnSfulSxIk_DbAEI5iE80V4EKLyp5-mYFodWvL2KDA
23
+ - type: f1
24
+ value: 82.9501
25
+ name: F1
26
+ verified: true
27
+ verifyToken: eyJhbGciOiJFZERTQSIsInR5cCI6IkpXVCJ9.eyJoYXNoIjoiMjk5ZDYwOGQyNjNkMWI0OTE4YzRmOTlkY2JjNjQ0YTZkNTMzMzNkYTA0MDFmNmI3NjA3NjNlMjhiMDQ2ZjJjNSIsInZlcnNpb24iOjF9.DDm0LNTkdLbGsue58bg1aH_s67KfbcmkvL-6ZiI2s8IoxhHJMSf29H_uV2YLyevwx900t-MwTVOW3qfFnMMEAQ
28
+ - type: total
29
+ value: 11869
30
+ name: total
31
+ verified: true
32
+ verifyToken: eyJhbGciOiJFZERTQSIsInR5cCI6IkpXVCJ9.eyJoYXNoIjoiMGFkMmI2ODM0NmY5NGNkNmUxYWViOWYxZDNkY2EzYWFmOWI4N2VhYzY5MGEzMTVhOTU4Zjc4YWViOGNjOWJjMCIsInZlcnNpb24iOjF9.fexrU1icJK5_MiifBtZWkeUvpmFISqBLDXSQJ8E6UnrRof-7cU0s4tX_dIsauHWtUpIHMPZCf5dlMWQKXZuAAA
33
+
34
+ ---
35
+
36
+
37
+
38
+
39
+
40
+ # ChatGLM-6B-INT4
41
+ <p align="center">
42
+ 👋 Join our <a href="https://join.slack.com/t/chatglm/shared_invite/zt-1th2q5u69-7tURzFuOPanmuHy9hsZnKA" target="_blank">Slack</a> and <a href="https://github.com/THUDM/ChatGLM-6B/blob/main/resources/WECHAT.md" target="_blank">WeChat</a>
43
+ </p>
44
+
45
+ ## 介绍
46
+ ChatGLM-6B 是一个开源的、支持中英双语问答的对话语言模型,基于 [General Language Model (GLM)](https://github.com/THUDM/GLM) 架构,具有 62 亿参数。结合模型量化技术,用户可以在消费级的显卡上进行本地部署(INT4 量化级别下最低只需 6GB 显存)。ChatGLM-6B 使用了和 [ChatGLM](https://chatglm.cn) 相同的技术,针对中文问答和对话进行了优化。经过约 1T 标识符的中英双语训练,辅以监督微调、反馈自助、人类反馈强化学习等技术的加持,62 亿参数的 ChatGLM-6B 已经能生成相当符合人类偏好的回答。
47
+
48
+ ChatGLM-6B-INT4 是 ChatGLM-6B 量化后的模型权重。具体的,ChatGLM-6B-INT4 对 ChatGLM-6B 中的 28 个 GLM Block 进行了 INT4 量化,没有对 Embedding 和 LM Head 进行量化。量化后的模型理论上 6G 显存(使用 CPU 即内存)即可推理,具有在嵌入式设备(如树莓派)上运行的可能。
49
+
50
+ 在 CPU 上运行时,会根据硬件自动编译 CPU Kernel ,请确保已安装 GCC 和 OpenMP (Linux一般已安装,对于Windows则需手动安装),以获得最佳并行计算能力。
51
+
52
+ ## 软件依赖
53
+
54
+ ```shell
55
+ pip install protobuf transformers==4.27.1 cpm_kernels
56
+ ```
57
+
58
+ ## 代码调用
59
+
60
+ 可以通过如下代码调用 ChatGLM-6B 模型来生成对话:
61
+
62
+ ```ipython
63
+ >>> from transformers import AutoTokenizer, AutoModel
64
+ >>> tokenizer = AutoTokenizer.from_pretrained("THUDM/chatglm-6b-int4", trust_remote_code=True)
65
+ >>> model = AutoModel.from_pretrained("THUDM/chatglm-6b-int4", trust_remote_code=True).half().cuda()
66
+ >>> response, history = model.chat(tokenizer, "你好", history=[])
67
+ >>> print(response)
68
+ 你好👋!我是人工智能助手 ChatGLM-6B,很高兴见到你,欢迎问我任何问题。
69
+ >>> response, history = model.chat(tokenizer, "晚上睡不着应该怎么办", history=history)
70
+ >>> print(response)
71
+ 晚上睡不着可能会让你感到焦虑或不舒服,但以下是一些可以帮助你入睡的方法:
72
+
73
+ 1. 制定规律的睡眠时间表:保持规律的睡眠时间表可以帮助你建立健康的睡眠习惯,使你更容易入睡。尽量在每天的相同时间上床,并在同一时间起床。
74
+ 2. 创造一个舒适的睡眠环境:确保睡眠环境舒适,安静,黑暗且温度适宜。可以使用舒适的床上用品,并保持房间通风。
75
+ 3. 放松身心:在睡前做些放松的活动,例如泡个热水澡,听些轻柔的音乐,阅读一些有趣的书籍等,有助于缓解紧张和焦虑,使你更容易入睡。
76
+ 4. 避免饮用含有咖啡因的饮料:咖啡因是一种刺激性物质,会影响你的睡眠质量。尽量避免在睡前饮用含有咖啡因的饮料,例如咖啡,茶和可乐。
77
+ 5. 避免在床上做与睡眠无关的事情:在床上做些与睡眠无关的事情,例如看电影,玩游戏或工作等,可能会干扰你的睡眠。
78
+ 6. 尝试呼吸技巧:深呼吸是一种放松技巧,可以帮助你缓解紧张和焦虑,使你更容易入睡。试着慢慢吸气,保持几秒钟,然后缓慢呼气。
79
+
80
+ 如果这些方法无法帮助你入睡,你可以考虑咨询医生或睡眠专家,寻求进一步的建议。
81
+ ```
82
+
83
+ 关于更多的使用说明,包括如何运行命令行和网页版本的 DEMO,以及使用模型量化以节省显存,请参考我们的 [Github Repo](https://github.com/THUDM/ChatGLM-6B)。
84
+
85
+ ## 协议
86
+
87
+ 本仓库���代码依照 [Apache-2.0](LICENSE) 协议开源,ChatGLM-6B 模型的权重的使用则需要遵循 [Model License](MODEL_LICENSE)。
88
+
89
+ ## 引用
90
+
91
+ 如果你觉得我们的工作有帮助的话,请考虑引用下列论文:
92
+
93
+ ```
94
+ @inproceedings{
95
+ zeng2023glm-130b,
96
+ title={{GLM}-130B: An Open Bilingual Pre-trained Model},
97
+ author={Aohan Zeng and Xiao Liu and Zhengxiao Du and Zihan Wang and Hanyu Lai and Ming Ding and Zhuoyi Yang and Yifan Xu and Wendi Zheng and Xiao Xia and Weng Lam Tam and Zixuan Ma and Yufei Xue and Jidong Zhai and Wenguang Chen and Zhiyuan Liu and Peng Zhang and Yuxiao Dong and Jie Tang},
98
+ booktitle={The Eleventh International Conference on Learning Representations (ICLR)},
99
+ year={2023},
100
+ url={https://openreview.net/forum?id=-Aw0rrrPUF}
101
+ }
102
+ ```
103
+ ```
104
+ @inproceedings{du2022glm,
105
+ title={GLM: General Language Model Pretraining with Autoregressive Blank Infilling},
106
+ author={Du, Zhengxiao and Qian, Yujie and Liu, Xiao and Ding, Ming and Qiu, Jiezhong and Yang, Zhilin and Tang, Jie},
107
+ booktitle={Proceedings of the 60th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)},
108
+ pages={320--335},
109
+ year={2022}
110
+ }
111
+ ```
ChatGLM-6b-int4(origin)/config.json ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "RobertaForQuestionAnswering"
4
+ ],
5
+ "attention_probs_dropout_prob": 0.1,
6
+ "bos_token_id": 0,
7
+ "eos_token_id": 2,
8
+ "gradient_checkpointing": false,
9
+ "hidden_act": "gelu",
10
+ "hidden_dropout_prob": 0.1,
11
+ "hidden_size": 768,
12
+ "initializer_range": 0.02,
13
+ "intermediate_size": 3072,
14
+ "language": "english",
15
+ "layer_norm_eps": 1e-05,
16
+ "max_position_embeddings": 514,
17
+ "model_type": "roberta",
18
+ "name": "Roberta",
19
+ "num_attention_heads": 12,
20
+ "num_hidden_layers": 12,
21
+ "pad_token_id": 1,
22
+ "type_vocab_size": 1,
23
+ "vocab_size": 50265
24
+ }
configuration_chatglm.py → ChatGLM-6b-int4(origin)/configuration_chatglm.py RENAMED
File without changes
demo_api.py → ChatGLM-6b-int4(origin)/demo_api.py RENAMED
File without changes
ice_text.model → ChatGLM-6b-int4(origin)/ice_text.model RENAMED
File without changes
modeling_chatglm.py → ChatGLM-6b-int4(origin)/modeling_chatglm.py RENAMED
File without changes
ChatGLM-6b-int4(origin)/pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:35828b49cf23cbae4c27788d4b04fc68c79a276300e09f14d72a49b0b738b4a9
3
+ size 3893083075
quantization.py → ChatGLM-6b-int4(origin)/quantization.py RENAMED
File without changes
quantization_kernels.c → ChatGLM-6b-int4(origin)/quantization_kernels.c RENAMED
File without changes
quantization_kernels_parallel.c → ChatGLM-6b-int4(origin)/quantization_kernels_parallel.c RENAMED
File without changes
tokenization_chatglm.py → ChatGLM-6b-int4(origin)/tokenization_chatglm.py RENAMED
File without changes
ChatGLM-6b-int4(origin)/tokenizer_config.json ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "name_or_path": "THUDM/chatglm-6b-int4",
3
+ "bos_token": "<sop>",
4
+ "eos_token": "<eop>",
5
+ "end_token": "</s>",
6
+ "gmask_token": "[gMASK]",
7
+ "mask_token": "[MASK]",
8
+ "pad_token": "<pad>",
9
+ "unk_token": "<unk>",
10
+ "remove_space": false,
11
+ "do_lower_case": false,
12
+ "tokenizer_class": "ChatGLMTokenizer",
13
+ "num_image_tokens": 0,
14
+ "auto_map": {
15
+ "AutoTokenizer": [
16
+ "tokenization_chatglm.ChatGLMTokenizer",
17
+ null
18
+ ]
19
+ }
20
+ }
README.md CHANGED
@@ -30,82 +30,116 @@ model-index:
30
  name: total
31
  verified: true
32
  verifyToken: eyJhbGciOiJFZERTQSIsInR5cCI6IkpXVCJ9.eyJoYXNoIjoiMGFkMmI2ODM0NmY5NGNkNmUxYWViOWYxZDNkY2EzYWFmOWI4N2VhYzY5MGEzMTVhOTU4Zjc4YWViOGNjOWJjMCIsInZlcnNpb24iOjF9.fexrU1icJK5_MiifBtZWkeUvpmFISqBLDXSQJ8E6UnrRof-7cU0s4tX_dIsauHWtUpIHMPZCf5dlMWQKXZuAAA
33
-
34
  ---
35
 
 
36
 
 
37
 
38
 
 
 
 
 
 
 
 
 
39
 
40
- # ChatGLM-6B-INT4
41
- <p align="center">
42
- 👋 Join our <a href="https://join.slack.com/t/chatglm/shared_invite/zt-1th2q5u69-7tURzFuOPanmuHy9hsZnKA" target="_blank">Slack</a> and <a href="https://github.com/THUDM/ChatGLM-6B/blob/main/resources/WECHAT.md" target="_blank">WeChat</a>
43
- </p>
44
 
45
- ## 介绍
46
- ChatGLM-6B 是一个开源的、支持中英双语问答的对话语言模型,基于 [General Language Model (GLM)](https://github.com/THUDM/GLM) 架构,具有 62 亿参数。结合模型量化技术,用户可以在消费级的显卡上进行本地部署(INT4 量化级别下最低只需 6GB 显存)。ChatGLM-6B 使用了和 [ChatGLM](https://chatglm.cn) 相同的技术,针对中文问答和对话进行了优化。经过约 1T 标识符的中英双语训练,辅以监督微调、反馈自助、人类反馈强化学习等技术的加持,62 亿参数的 ChatGLM-6B 已经能生成相当符合人类偏好的回答。
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
47
 
48
- ChatGLM-6B-INT4 ChatGLM-6B 量化后的模型权重。具体的,ChatGLM-6B-INT4 对 ChatGLM-6B 中的 28 个 GLM Block 进行了 INT4 量化,没有对 Embedding 和 LM Head 进行量化。量化后的模型理论上 6G 显存(使用 CPU 即内存)即可推理,具有在嵌入式设备(如树莓派)上运行的可能。
 
 
49
 
50
- CPU 上运行时,会根据硬件自动编译 CPU Kernel ,请确保已安装 GCC 和 OpenMP (Linux一般已安装,对于Windows则需手动安装),以获得最佳并行计算能力。
51
 
52
- ## 软件依赖
 
 
 
 
 
 
53
 
54
- ```shell
55
- pip install protobuf transformers==4.27.1 cpm_kernels
 
56
  ```
57
 
58
- ## 代码调用
59
-
60
- 可以通过如下代码调用 ChatGLM-6B 模型来生成对话:
61
-
62
- ```ipython
63
- >>> from transformers import AutoTokenizer, AutoModel
64
- >>> tokenizer = AutoTokenizer.from_pretrained("THUDM/chatglm-6b-int4", trust_remote_code=True)
65
- >>> model = AutoModel.from_pretrained("THUDM/chatglm-6b-int4", trust_remote_code=True).half().cuda()
66
- >>> response, history = model.chat(tokenizer, "你好", history=[])
67
- >>> print(response)
68
- 你好👋!我是人工智能助手 ChatGLM-6B,很高兴见到你,欢迎问我任何问题。
69
- >>> response, history = model.chat(tokenizer, "晚上睡不着应该怎么办", history=history)
70
- >>> print(response)
71
- 晚上睡不着可能会让你感到焦虑或不舒服,但以下是一些可以帮���你入睡的方法:
72
-
73
- 1. 制定规律的睡眠时间表:保持规律的睡眠时间表可以帮助你建立健康的睡眠习惯,使你更容易入睡。尽量在每天的相同时间上床,并在同一时间起床。
74
- 2. 创造一个舒适的睡眠环境:确保睡眠环境舒适,安静,黑暗且温度适宜。可以使用舒适的床上用品,并保持房间通风。
75
- 3. 放松身心:在睡前做些放松的活动,例如泡个热水澡,听些轻柔的音乐,阅读一些有趣的书籍等,有助于缓解紧张和焦虑,使你更容易入睡。
76
- 4. 避免饮用含有咖啡因的饮料:咖啡因是一种刺激性物质,会影响你的睡眠质量。尽量避免在睡前饮用含有咖啡因的饮料,例如咖啡,茶和可乐。
77
- 5. 避免在床上做与睡眠无关的事情:在床上做些与睡眠无关的事情,例如看电影,玩游戏或工作等,可能会干扰你的睡眠。
78
- 6. 尝试呼吸技巧:深呼吸是一种放松技巧,可以帮助你缓解紧张和焦虑,使你更容易入睡。试着慢慢吸气,保持几秒钟,然后缓慢呼气。
79
-
80
- 如果这些方法无法帮助你入睡,你可以考虑咨询医生或睡眠专家,寻求进一步的建议。
81
  ```
82
 
83
- 关于更多的使用说明,包括如何运行命令行和网页版本的 DEMO,以及使用模型量化以节省显存,请参考我们的 [Github Repo](https://github.com/THUDM/ChatGLM-6B)。
 
 
 
 
84
 
85
- ## 协议
86
 
87
- 本仓库的代码依照 [Apache-2.0](LICENSE) 协议开源,ChatGLM-6B 模型的权重的使用则需要遵循 [Model License](MODEL_LICENSE)。
 
 
 
 
 
 
 
88
 
89
- ## 引用
90
 
91
- 如果你觉得我们的工作有帮助的话,请考虑引用下列论文:
92
 
93
- ```
94
- @inproceedings{
95
- zeng2023glm-130b,
96
- title={{GLM}-130B: An Open Bilingual Pre-trained Model},
97
- author={Aohan Zeng and Xiao Liu and Zhengxiao Du and Zihan Wang and Hanyu Lai and Ming Ding and Zhuoyi Yang and Yifan Xu and Wendi Zheng and Xiao Xia and Weng Lam Tam and Zixuan Ma and Yufei Xue and Jidong Zhai and Wenguang Chen and Zhiyuan Liu and Peng Zhang and Yuxiao Dong and Jie Tang},
98
- booktitle={The Eleventh International Conference on Learning Representations (ICLR)},
99
- year={2023},
100
- url={https://openreview.net/forum?id=-Aw0rrrPUF}
101
- }
102
- ```
103
- ```
104
- @inproceedings{du2022glm,
105
- title={GLM: General Language Model Pretraining with Autoregressive Blank Infilling},
106
- author={Du, Zhengxiao and Qian, Yujie and Liu, Xiao and Ding, Ming and Qiu, Jiezhong and Yang, Zhilin and Tang, Jie},
107
- booktitle={Proceedings of the 60th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)},
108
- pages={320--335},
109
- year={2022}
110
- }
111
- ```
 
30
  name: total
31
  verified: true
32
  verifyToken: eyJhbGciOiJFZERTQSIsInR5cCI6IkpXVCJ9.eyJoYXNoIjoiMGFkMmI2ODM0NmY5NGNkNmUxYWViOWYxZDNkY2EzYWFmOWI4N2VhYzY5MGEzMTVhOTU4Zjc4YWViOGNjOWJjMCIsInZlcnNpb24iOjF9.fexrU1icJK5_MiifBtZWkeUvpmFISqBLDXSQJ8E6UnrRof-7cU0s4tX_dIsauHWtUpIHMPZCf5dlMWQKXZuAAA
 
33
  ---
34
 
35
+ # roberta-base for QA
36
 
37
+ This is the [roberta-base](https://huggingface.co/roberta-base) model, fine-tuned using the [SQuAD2.0](https://huggingface.co/datasets/squad_v2) dataset. It's been trained on question-answer pairs, including unanswerable questions, for the task of Question Answering.
38
 
39
 
40
+ ## Overview
41
+ **Language model:** roberta-base
42
+ **Language:** English
43
+ **Downstream-task:** Extractive QA
44
+ **Training data:** SQuAD 2.0
45
+ **Eval data:** SQuAD 2.0
46
+ **Code:** See [an example QA pipeline on Haystack](https://haystack.deepset.ai/tutorials/first-qa-system)
47
+ **Infrastructure**: 4x Tesla v100
48
 
49
+ ## Hyperparameters
 
 
 
50
 
51
+ ```
52
+ batch_size = 96
53
+ n_epochs = 2
54
+ base_LM_model = "roberta-base"
55
+ max_seq_len = 386
56
+ learning_rate = 3e-5
57
+ lr_schedule = LinearWarmup
58
+ warmup_proportion = 0.2
59
+ doc_stride=128
60
+ max_query_length=64
61
+ ```
62
+
63
+ ## Using a distilled model instead
64
+ Please note that we have also released a distilled version of this model called [deepset/tinyroberta-squad2](https://huggingface.co/deepset/tinyroberta-squad2). The distilled model has a comparable prediction quality and runs at twice the speed of the base model.
65
+
66
+ ## Usage
67
+
68
+ ### In Haystack
69
+ Haystack is an NLP framework by deepset. You can use this model in a Haystack pipeline to do question answering at scale (over many documents). To load the model in [Haystack](https://github.com/deepset-ai/haystack/):
70
+ ```python
71
+ reader = FARMReader(model_name_or_path="deepset/roberta-base-squad2")
72
+ # or
73
+ reader = TransformersReader(model_name_or_path="deepset/roberta-base-squad2",tokenizer="deepset/roberta-base-squad2")
74
+ ```
75
+ For a complete example of ``roberta-base-squad2`` being used for Question Answering, check out the [Tutorials in Haystack Documentation](https://haystack.deepset.ai/tutorials/first-qa-system)
76
 
77
+ ### In Transformers
78
+ ```python
79
+ from transformers import AutoModelForQuestionAnswering, AutoTokenizer, pipeline
80
 
81
+ model_name = "deepset/roberta-base-squad2"
82
 
83
+ # a) Get predictions
84
+ nlp = pipeline('question-answering', model=model_name, tokenizer=model_name)
85
+ QA_input = {
86
+ 'question': 'Why is model conversion important?',
87
+ 'context': 'The option to convert models between FARM and transformers gives freedom to the user and let people easily switch between frameworks.'
88
+ }
89
+ res = nlp(QA_input)
90
 
91
+ # b) Load model & tokenizer
92
+ model = AutoModelForQuestionAnswering.from_pretrained(model_name)
93
+ tokenizer = AutoTokenizer.from_pretrained(model_name)
94
  ```
95
 
96
+ ## Performance
97
+ Evaluated on the SQuAD 2.0 dev set with the [official eval script](https://worksheets.codalab.org/rest/bundles/0x6b567e1cf2e041ec80d7098f031c5c9e/contents/blob/).
98
+
99
+ ```
100
+ "exact": 79.87029394424324,
101
+ "f1": 82.91251169582613,
102
+
103
+ "total": 11873,
104
+ "HasAns_exact": 77.93522267206478,
105
+ "HasAns_f1": 84.02838248389763,
106
+ "HasAns_total": 5928,
107
+ "NoAns_exact": 81.79983179142137,
108
+ "NoAns_f1": 81.79983179142137,
109
+ "NoAns_total": 5945
 
 
 
 
 
 
 
 
 
110
  ```
111
 
112
+ ## Authors
113
+ **Branden Chan:** [email protected]
114
+ **Timo Möller:** [email protected]
115
+ **Malte Pietsch:** [email protected]
116
+ **Tanay Soni:** [email protected]
117
 
118
+ ## About us
119
 
120
+ <div class="grid lg:grid-cols-2 gap-x-4 gap-y-3">
121
+ <div class="w-full h-40 object-cover mb-2 rounded-lg flex items-center justify-center">
122
+ <img alt="" src="https://raw.githubusercontent.com/deepset-ai/.github/main/deepset-logo-colored.png" class="w-40"/>
123
+ </div>
124
+ <div class="w-full h-40 object-cover mb-2 rounded-lg flex items-center justify-center">
125
+ <img alt="" src="https://raw.githubusercontent.com/deepset-ai/.github/main/haystack-logo-colored.png" class="w-40"/>
126
+ </div>
127
+ </div>
128
 
129
+ [deepset](http://deepset.ai/) is the company behind the open-source NLP framework [Haystack](https://haystack.deepset.ai/) which is designed to help you build production ready NLP systems that use: Question answering, summarization, ranking etc.
130
 
 
131
 
132
+ Some of our other work:
133
+ - [Distilled roberta-base-squad2 (aka "tinyroberta-squad2")]([https://huggingface.co/deepset/tinyroberta-squad2)
134
+ - [German BERT (aka "bert-base-german-cased")](https://deepset.ai/german-bert)
135
+ - [GermanQuAD and GermanDPR datasets and models (aka "gelectra-base-germanquad", "gbert-base-germandpr")](https://deepset.ai/germanquad)
136
+
137
+ ## Get in touch and join the Haystack community
138
+
139
+ <p>For more info on Haystack, visit our <strong><a href="https://github.com/deepset-ai/haystack">GitHub</a></strong> repo and <strong><a href="https://docs.haystack.deepset.ai">Documentation</a></strong>.
140
+
141
+ We also have a <strong><a class="h-7" href="https://haystack.deepset.ai/community">Discord community open to everyone!</a></strong></p>
142
+
143
+ [Twitter](https://twitter.com/deepset_ai) | [LinkedIn](https://www.linkedin.com/company/deepset-ai/) | [Discord](https://haystack.deepset.ai/community) | [GitHub Discussions](https://github.com/deepset-ai/haystack/discussions) | [Website](https://deepset.ai)
144
+
145
+ By the way: [we're hiring!](http://www.deepset.ai/jobs)
 
 
 
 
 
flax_model.msgpack ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6a8d759d881d9c1b39dbf4ee451fb8a8c2d43ccbd180218863a54ffd9b4d2447
3
+ size 496233457
merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ac5db66fdcfecb400345d09787b71009d60805ef9883451071669cf951b5e2c7
3
+ size 496254442
pytorch_model.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:35828b49cf23cbae4c27788d4b04fc68c79a276300e09f14d72a49b0b738b4a9
3
- size 3893083075
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e0b64ccefc1bcb569b604baea27eb873e5482fdf6eb3ceff1fb5368397db5aed
3
+ size 496313727
rust_model.ot ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5a16ed126bbc8c4cf794406bac0c7946f62d0f175c02dc54d77a00a6255597ed
3
+ size 498638704
special_tokens_map.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"bos_token": {"content": "<s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, "eos_token": {"content": "</s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, "unk_token": {"content": "<unk>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, "sep_token": {"content": "</s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, "pad_token": {"content": "<pad>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, "cls_token": {"content": "<s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, "mask_token": {"content": "<mask>", "single_word": false, "lstrip": true, "rstrip": false, "normalized": true}}
tf_model.h5 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9b672dd16f09f6f805d407800278e60217b9d7c040df1dde5098765a40cdc88a
3
+ size 496513256
tokenizer_config.json CHANGED
@@ -1,20 +1 @@
1
- {
2
- "name_or_path": "THUDM/chatglm-6b-int4",
3
- "bos_token": "<sop>",
4
- "eos_token": "<eop>",
5
- "end_token": "</s>",
6
- "gmask_token": "[gMASK]",
7
- "mask_token": "[MASK]",
8
- "pad_token": "<pad>",
9
- "unk_token": "<unk>",
10
- "remove_space": false,
11
- "do_lower_case": false,
12
- "tokenizer_class": "ChatGLMTokenizer",
13
- "num_image_tokens": 0,
14
- "auto_map": {
15
- "AutoTokenizer": [
16
- "tokenization_chatglm.ChatGLMTokenizer",
17
- null
18
- ]
19
- }
20
- }
 
1
+ {"do_lower_case": false, "model_max_length": 512, "full_tokenizer_file": null}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
vocab.json ADDED
The diff for this file is too large to render. See raw diff