Upload 14 files
Browse files- .gitattributes +4 -0
- ShareGPT_Vicuna_unfiltered/Experimental/ShareGPT_2023.05.04v1_Nano_Wasteland.json +3 -0
- ShareGPT_Vicuna_unfiltered/Experimental/ShareGPT_2023.05.04v_NoUnicode_Edition.json +3 -0
- ShareGPT_Vicuna_unfiltered/Experimental/removals.log +3 -0
- ShareGPT_Vicuna_unfiltered/ShareGPT_2023.05.08v0_Wasteland_Edition.json +3 -0
- ShareGPT_Vicuna_unfiltered/Vicuna_unfiltered_train.ipynb +157 -0
- ShareGPT_Vicuna_unfiltered/classify.py +102 -0
- ShareGPT_Vicuna_unfiltered/clean_sharegpt.py +150 -0
- ShareGPT_Vicuna_unfiltered/deduplicate.py +48 -0
- ShareGPT_Vicuna_unfiltered/fastchat_validate.py +187 -0
- ShareGPT_Vicuna_unfiltered/merge_json.js +36 -0
- ShareGPT_Vicuna_unfiltered/optional_clean.py +497 -0
- ShareGPT_Vicuna_unfiltered/pretty_json.py +20 -0
- ShareGPT_Vicuna_unfiltered/split_long_conversation.py +99 -0
- ShareGPT_Vicuna_unfiltered/validate.js +43 -0
.gitattributes
CHANGED
@@ -66,3 +66,7 @@ aicg/csvs/Log_20230417_203437.csv filter=lfs diff=lfs merge=lfs -text
|
|
66 |
aicg/csvs/Log_20230417_210300.csv filter=lfs diff=lfs merge=lfs -text
|
67 |
aicg/csvs/merged.csv filter=lfs diff=lfs merge=lfs -text
|
68 |
aicg/instruct_output.json filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
|
|
|
|
66 |
aicg/csvs/Log_20230417_210300.csv filter=lfs diff=lfs merge=lfs -text
|
67 |
aicg/csvs/merged.csv filter=lfs diff=lfs merge=lfs -text
|
68 |
aicg/instruct_output.json filter=lfs diff=lfs merge=lfs -text
|
69 |
+
ShareGPT_Vicuna_unfiltered/Experimental/removals.log filter=lfs diff=lfs merge=lfs -text
|
70 |
+
ShareGPT_Vicuna_unfiltered/Experimental/ShareGPT_2023.05.04v_NoUnicode_Edition.json filter=lfs diff=lfs merge=lfs -text
|
71 |
+
ShareGPT_Vicuna_unfiltered/Experimental/ShareGPT_2023.05.04v1_Nano_Wasteland.json filter=lfs diff=lfs merge=lfs -text
|
72 |
+
ShareGPT_Vicuna_unfiltered/ShareGPT_2023.05.08v0_Wasteland_Edition.json filter=lfs diff=lfs merge=lfs -text
|
ShareGPT_Vicuna_unfiltered/Experimental/ShareGPT_2023.05.04v1_Nano_Wasteland.json
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:3fe249345b2baaab06969abc31e24926f348b112f131d9c4ac5644551c0b0f07
|
3 |
+
size 158477945
|
ShareGPT_Vicuna_unfiltered/Experimental/ShareGPT_2023.05.04v_NoUnicode_Edition.json
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:21dd8faa73bb76424462f92d845cb3c66f4e66a95832224e991a781b52ff1d59
|
3 |
+
size 251274843
|
ShareGPT_Vicuna_unfiltered/Experimental/removals.log
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:c0a85935a961d2381728b67971c24a32a103cb59a51393b690a0003445c4acd0
|
3 |
+
size 25995456
|
ShareGPT_Vicuna_unfiltered/ShareGPT_2023.05.08v0_Wasteland_Edition.json
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:ea3dfb5f663f86ce07612e7ffedacb51bb3a151bc6dbad8390f2cc65a7c2e308
|
3 |
+
size 333715562
|
ShareGPT_Vicuna_unfiltered/Vicuna_unfiltered_train.ipynb
ADDED
@@ -0,0 +1,157 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"cells": [
|
3 |
+
{
|
4 |
+
"attachments": {},
|
5 |
+
"cell_type": "markdown",
|
6 |
+
"metadata": {},
|
7 |
+
"source": [
|
8 |
+
"**You may encounter an error when installing flash-attn. I couldn't figure it out. Maybe you can.**"
|
9 |
+
]
|
10 |
+
},
|
11 |
+
{
|
12 |
+
"cell_type": "code",
|
13 |
+
"execution_count": null,
|
14 |
+
"metadata": {
|
15 |
+
"colab": {
|
16 |
+
"background_save": true,
|
17 |
+
"base_uri": "https://localhost:8080/"
|
18 |
+
},
|
19 |
+
"id": "h_MevKtB0dEw",
|
20 |
+
"outputId": "ae41454e-e28f-4d8c-dcc6-f97399b31b8b",
|
21 |
+
"scrolled": true
|
22 |
+
},
|
23 |
+
"outputs": [],
|
24 |
+
"source": [
|
25 |
+
"%pip install torch==1.13.1+cu116 torchvision==0.14.1+cu116 torchaudio==0.13.1 --extra-index-url https://download.pytorch.org/whl/cu116\n",
|
26 |
+
"!cd ~\n",
|
27 |
+
"!git clone https://github.com/huggingface/transformers.git && cd transformers && git checkout cae78c46 && pip install .\n",
|
28 |
+
"# Install fastchat\n",
|
29 |
+
"!pip3 install --upgrade pip\n",
|
30 |
+
"!git clone https://github.com/lm-sys/FastChat && cd FastChat && pip install -e .\n",
|
31 |
+
"%pip install einops\n",
|
32 |
+
"!mkdir checkpoints\n",
|
33 |
+
"!wget https://huggingface.co/datasets/anon8231489123/ShareGPT_Vicuna_unfiltered/resolve/main/flash_attn-0.2.8-cp39-cp39-linux_x86_64.whl\n",
|
34 |
+
"%pip install flash_attn-0.2.8-cp39-cp39-linux_x86_64.whl"
|
35 |
+
]
|
36 |
+
},
|
37 |
+
{
|
38 |
+
"cell_type": "code",
|
39 |
+
"execution_count": null,
|
40 |
+
"metadata": {
|
41 |
+
"colab": {
|
42 |
+
"base_uri": "https://localhost:8080/"
|
43 |
+
},
|
44 |
+
"id": "6SNhHJFz-28c",
|
45 |
+
"outputId": "8b308465-e51f-46e3-8674-39a234c17d50"
|
46 |
+
},
|
47 |
+
"outputs": [],
|
48 |
+
"source": [
|
49 |
+
"!wget https://raw.githubusercontent.com/oobabooga/text-generation-webui/main/download-model.py\n",
|
50 |
+
"!mkdir models\n",
|
51 |
+
"!wget https://huggingface.co/datasets/anon8231489123/ShareGPT_Vicuna_unfiltered/resolve/main/ShareGPT_V2_unfiltered_cleaned_split.json\n",
|
52 |
+
"!python download-model.py decapoda-research/llama-13b-hf"
|
53 |
+
]
|
54 |
+
},
|
55 |
+
{
|
56 |
+
"attachments": {},
|
57 |
+
"cell_type": "markdown",
|
58 |
+
"metadata": {},
|
59 |
+
"source": [
|
60 |
+
"**Manually edit tokenizer_config.json to: {\"bos_token\": \"\", \"eos_token\": \"\", \"model_max_length\": 2048, \"tokenizer_class\": \"LlamaTokenizer\", \"unk_token\": \"\"}**"
|
61 |
+
]
|
62 |
+
},
|
63 |
+
{
|
64 |
+
"attachments": {},
|
65 |
+
"cell_type": "markdown",
|
66 |
+
"metadata": {},
|
67 |
+
"source": [
|
68 |
+
"**Enter wandb api key**"
|
69 |
+
]
|
70 |
+
},
|
71 |
+
{
|
72 |
+
"cell_type": "code",
|
73 |
+
"execution_count": null,
|
74 |
+
"metadata": {},
|
75 |
+
"outputs": [],
|
76 |
+
"source": [
|
77 |
+
"%pip install wandb\n",
|
78 |
+
"import wandb\n",
|
79 |
+
"wandb.login()"
|
80 |
+
]
|
81 |
+
},
|
82 |
+
{
|
83 |
+
"attachments": {},
|
84 |
+
"cell_type": "markdown",
|
85 |
+
"metadata": {
|
86 |
+
"id": "ya2NjlT7BZ2q"
|
87 |
+
},
|
88 |
+
"source": [
|
89 |
+
"**8 x A100 80gb training run** "
|
90 |
+
]
|
91 |
+
},
|
92 |
+
{
|
93 |
+
"cell_type": "code",
|
94 |
+
"execution_count": null,
|
95 |
+
"metadata": {
|
96 |
+
"colab": {
|
97 |
+
"base_uri": "https://localhost:8080/"
|
98 |
+
},
|
99 |
+
"id": "9PUdb3ZY4FkK",
|
100 |
+
"outputId": "f9b1aae9-72d3-4137-800a-97c482660860",
|
101 |
+
"scrolled": true
|
102 |
+
},
|
103 |
+
"outputs": [],
|
104 |
+
"source": [
|
105 |
+
"!torchrun --nnodes=1 --nproc_per_node=8 --master_port=21001 \\\n",
|
106 |
+
" FastChat/fastchat/train/train.py \\\n",
|
107 |
+
" --model_name_or_path models/decapoda-research_llama-13b-hf \\\n",
|
108 |
+
" --data_path ShareGPT_unfiltered_cleaned_split.json \\\n",
|
109 |
+
" --bf16 True \\\n",
|
110 |
+
" --output_dir ./checkpoints \\\n",
|
111 |
+
" --num_train_epochs 3 \\\n",
|
112 |
+
" --per_device_train_batch_size 4 \\\n",
|
113 |
+
" --per_device_eval_batch_size 4 \\\n",
|
114 |
+
" --gradient_accumulation_steps 1 \\\n",
|
115 |
+
" --evaluation_strategy \"no\" \\\n",
|
116 |
+
" --save_strategy \"steps\" \\\n",
|
117 |
+
" --save_steps 1200 \\\n",
|
118 |
+
" --save_total_limit 100 \\\n",
|
119 |
+
" --learning_rate 2e-5 \\\n",
|
120 |
+
" --weight_decay 0. \\\n",
|
121 |
+
" --warmup_ratio 0.03 \\\n",
|
122 |
+
" --lr_scheduler_type \"cosine\" \\\n",
|
123 |
+
" --logging_steps 1 \\\n",
|
124 |
+
" --fsdp \"full_shard auto_wrap\" \\\n",
|
125 |
+
" --fsdp_transformer_layer_cls_to_wrap 'LlamaDecoderLayer' \\\n",
|
126 |
+
" --tf32 True \\\n",
|
127 |
+
" --model_max_length 2048 \\\n",
|
128 |
+
" --gradient_checkpointing True \\\n",
|
129 |
+
" --lazy_preprocess True"
|
130 |
+
]
|
131 |
+
}
|
132 |
+
],
|
133 |
+
"metadata": {
|
134 |
+
"colab": {
|
135 |
+
"provenance": []
|
136 |
+
},
|
137 |
+
"kernelspec": {
|
138 |
+
"display_name": "Python 3 (ipykernel)",
|
139 |
+
"language": "python",
|
140 |
+
"name": "python3"
|
141 |
+
},
|
142 |
+
"language_info": {
|
143 |
+
"codemirror_mode": {
|
144 |
+
"name": "ipython",
|
145 |
+
"version": 3
|
146 |
+
},
|
147 |
+
"file_extension": ".py",
|
148 |
+
"mimetype": "text/x-python",
|
149 |
+
"name": "python",
|
150 |
+
"nbconvert_exporter": "python",
|
151 |
+
"pygments_lexer": "ipython3",
|
152 |
+
"version": "3.8.10"
|
153 |
+
}
|
154 |
+
},
|
155 |
+
"nbformat": 4,
|
156 |
+
"nbformat_minor": 1
|
157 |
+
}
|
ShareGPT_Vicuna_unfiltered/classify.py
ADDED
@@ -0,0 +1,102 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import json
|
2 |
+
import os
|
3 |
+
from sentence_transformers import SentenceTransformer, util
|
4 |
+
from tqdm import tqdm
|
5 |
+
import concurrent.futures
|
6 |
+
|
7 |
+
# File to classify data in.
|
8 |
+
filePath = "ShareGPT_2023.05.02v0_unfiltered_cleaned_split.json"
|
9 |
+
|
10 |
+
# Threshold Settings
|
11 |
+
refusal_threshold = 0.5
|
12 |
+
non_refusal_threshold = 0.5
|
13 |
+
|
14 |
+
# Example refusal and non-refusal sentences
|
15 |
+
refusals = ["I'm sorry, I cannot complete the request.", "It is not possible to provide that information.", "Sorry, but we don't allow questions like that.", " We won't be able to assist you with that."]
|
16 |
+
non_refusals = ["Sure, I can do that.", "No problem, I can do that for you.", "Here is the information you asked for."]
|
17 |
+
|
18 |
+
# Set the max workers to half the available cores
|
19 |
+
max_workers = os.cpu_count() // 2
|
20 |
+
|
21 |
+
# Define a function that takes a conversation and returns a label and an example sentence
|
22 |
+
def process_conversation(conversation):
|
23 |
+
global max_refusal, max_non_refusal, refusal_threshold, non_refusal_threshold
|
24 |
+
|
25 |
+
value = conversation["value"]
|
26 |
+
value_vec = model.encode(value, convert_to_tensor=True)
|
27 |
+
|
28 |
+
# Compute the cosine similarity with the example sentences
|
29 |
+
refusals_sim = util.pytorch_cos_sim(value_vec, refusals_vec)
|
30 |
+
non_refusals_sim = util.pytorch_cos_sim(value_vec, non_refusals_vec)
|
31 |
+
|
32 |
+
# Find the maximum similarity score and index for each category
|
33 |
+
refusals_max_sim, refusals_max_idx = refusals_sim.max(dim=1)
|
34 |
+
non_refusals_max_sim, non_refusals_max_idx = non_refusals_sim.max(dim=1)
|
35 |
+
|
36 |
+
if(refusals_max_sim > max_refusal):
|
37 |
+
max_refusal = refusals_max_sim.item()
|
38 |
+
if(non_refusals_max_sim > max_non_refusal):
|
39 |
+
max_non_refusal = non_refusals_max_sim.item()
|
40 |
+
|
41 |
+
|
42 |
+
if refusals_max_sim > refusal_threshold and refusals_max_sim > non_refusals_max_sim:
|
43 |
+
label = "refusal"
|
44 |
+
example = refusals[refusals_max_idx]
|
45 |
+
elif non_refusals_max_sim > non_refusal_threshold and non_refusals_max_sim > refusals_max_sim:
|
46 |
+
label = "non-refusal"
|
47 |
+
example = non_refusals[non_refusals_max_idx]
|
48 |
+
else:
|
49 |
+
label = "unrelated"
|
50 |
+
example = None
|
51 |
+
|
52 |
+
return label, example, value
|
53 |
+
|
54 |
+
|
55 |
+
with open(filePath, "r", encoding="utf-8") as f:
|
56 |
+
data = json.load(f)
|
57 |
+
|
58 |
+
bad_ids = []
|
59 |
+
|
60 |
+
max_refusal = 0.0
|
61 |
+
max_non_refusal = 0.0
|
62 |
+
|
63 |
+
# Load a pre-trained sentence-transformer model
|
64 |
+
model = SentenceTransformer("paraphrase-MiniLM-L6-v2")
|
65 |
+
|
66 |
+
refusals_vec = model.encode(refusals, convert_to_tensor=True)
|
67 |
+
non_refusals_vec = model.encode(non_refusals, convert_to_tensor=True)
|
68 |
+
|
69 |
+
refusal_count = 0
|
70 |
+
non_refusal_count = 0
|
71 |
+
unrelated_count = 0
|
72 |
+
|
73 |
+
pbar1 = tqdm(data)
|
74 |
+
for item in pbar1:
|
75 |
+
|
76 |
+
id_ = item["id"]
|
77 |
+
|
78 |
+
with concurrent.futures.ThreadPoolExecutor(max_workers=max_workers) as executor:
|
79 |
+
|
80 |
+
futures = [executor.submit(process_conversation, conversation) for conversation in item["conversations"] if conversation["from"] == "gpt"]
|
81 |
+
|
82 |
+
for future in concurrent.futures.as_completed(futures):
|
83 |
+
|
84 |
+
label, example, value = future.result()
|
85 |
+
|
86 |
+
if label == "refusal":
|
87 |
+
item = {}
|
88 |
+
item["id"] = id_
|
89 |
+
item["value"] = value
|
90 |
+
bad_ids.append(item)
|
91 |
+
print(f"\nID: {id_} | Value: {value}");
|
92 |
+
refusal_count += 1
|
93 |
+
elif label == "non-refusal":
|
94 |
+
non_refusal_count += 1
|
95 |
+
else:
|
96 |
+
unrelated_count += 1
|
97 |
+
|
98 |
+
pbar1.set_description("Max Refusal: {:.3f}".format(max_refusal));
|
99 |
+
pbar1.set_postfix(r=refusal_count, u=unrelated_count)
|
100 |
+
|
101 |
+
with open("possible_bad_entries.json", "w") as f:
|
102 |
+
json.dump(bad_ids, f)
|
ShareGPT_Vicuna_unfiltered/clean_sharegpt.py
ADDED
@@ -0,0 +1,150 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
Usage: python3 -m fastchat.data.clean_sharegpt --in sharegpt_html.json --out sharegpt_clean.json
|
3 |
+
"""
|
4 |
+
import argparse
|
5 |
+
import json
|
6 |
+
import logging
|
7 |
+
import re
|
8 |
+
from typing import Dict, Union
|
9 |
+
|
10 |
+
import bs4
|
11 |
+
import markdownify # == 0.11.6
|
12 |
+
import tqdm
|
13 |
+
|
14 |
+
|
15 |
+
def _get_html_tags(file_path: str):
|
16 |
+
# Generate the list of html tags occured in the file.
|
17 |
+
s = set()
|
18 |
+
for l in open("file_path", "r"):
|
19 |
+
for m in re.findall("</[^<>]+>", l):
|
20 |
+
s.add(m)
|
21 |
+
return s
|
22 |
+
|
23 |
+
div_pattern = re.compile("<div.*?>")
|
24 |
+
span_pattern = re.compile("<span.*?>")
|
25 |
+
code_lang_pattern = re.compile("```\s*" + "(.*?)" + "(?:Copy code)+" + "(.+?)" + "\s*?```", re.DOTALL)
|
26 |
+
code_lang_format = "```\g<1>\n\g<2>\n```"
|
27 |
+
regenerate_pattern = re.compile("\d+ / \d+")
|
28 |
+
copy_chars_pattern = re.compile("Copy\d+ chars / \d+ words")
|
29 |
+
copy_code_pattern = re.compile("```(.*?)Copy code\s*```")
|
30 |
+
|
31 |
+
def reformat_code(val: str) -> str:
|
32 |
+
# Input code format is:
|
33 |
+
# ```
|
34 |
+
# $<language>Copy code$<exact_code_here>
|
35 |
+
#
|
36 |
+
# ```
|
37 |
+
# This function convert it into the correct markdown format
|
38 |
+
return re.sub(code_lang_pattern, code_lang_format, val)
|
39 |
+
|
40 |
+
|
41 |
+
def html_to_markdown(val: str) -> str:
|
42 |
+
# Remove all <div>. This is required to make intent work in code blocks.
|
43 |
+
val = re.sub(div_pattern, "", val)
|
44 |
+
# Remove all <span>. This is required to make underscores work in code blocks.
|
45 |
+
val = re.sub(span_pattern, "", val)
|
46 |
+
# Markdown to html
|
47 |
+
val = markdownify.markdownify(val).strip()
|
48 |
+
# Reformat code
|
49 |
+
val = reformat_code(val)
|
50 |
+
|
51 |
+
# Remove noisy "[number] / [number]" at the beginning
|
52 |
+
noise = re.search(regenerate_pattern, val)
|
53 |
+
if noise and noise.start() == 0:
|
54 |
+
val = val[noise.end():]
|
55 |
+
# Remove noisy "Copy[number] chars / [number] words"
|
56 |
+
val = re.sub(copy_chars_pattern, "", val)
|
57 |
+
# Remove empty code block ```\nCopy code\n```
|
58 |
+
val = re.sub(copy_code_pattern, "", val)
|
59 |
+
|
60 |
+
# Strip
|
61 |
+
val = val.replace("\n\n\n", "\n").strip()
|
62 |
+
|
63 |
+
if args.debug:
|
64 |
+
print(val)
|
65 |
+
exit()
|
66 |
+
|
67 |
+
return val
|
68 |
+
|
69 |
+
|
70 |
+
def should_skip(val: str) -> bool:
|
71 |
+
black_list = ["openai", "chatgpt"]
|
72 |
+
for w in black_list:
|
73 |
+
if w in val.lower():
|
74 |
+
return True
|
75 |
+
return False
|
76 |
+
|
77 |
+
|
78 |
+
def clean_html_source(content, begin, end, check_tag, check_num):
|
79 |
+
"""
|
80 |
+
clean the input json content.
|
81 |
+
Args:
|
82 |
+
content: json file loaded in memory.
|
83 |
+
check_tag: a debug purpose arg. If a conversation contains the tag, log
|
84 |
+
it before and after cleaning.
|
85 |
+
check_num: number of matched conversations logged.
|
86 |
+
"""
|
87 |
+
BARRIER = "\n" + "=" * 20 + "\n"
|
88 |
+
skip_cnt = 0
|
89 |
+
tag_cnt = 0
|
90 |
+
|
91 |
+
content = content[begin:end]
|
92 |
+
new_content = []
|
93 |
+
|
94 |
+
for sample in tqdm.tqdm(content):
|
95 |
+
skipped = False
|
96 |
+
|
97 |
+
if len(sample["conversations"]) <= 1:
|
98 |
+
# The conversation is too short
|
99 |
+
skipped = True
|
100 |
+
else:
|
101 |
+
for c in sample["conversations"]:
|
102 |
+
if should_skip(c["value"]):
|
103 |
+
skipped = True
|
104 |
+
break
|
105 |
+
|
106 |
+
try:
|
107 |
+
new_val = html_to_markdown(c["value"])
|
108 |
+
except (bs4.builder.ParserRejectedMarkup, AssertionError):
|
109 |
+
skipped = True
|
110 |
+
break
|
111 |
+
|
112 |
+
c["value"] = new_val
|
113 |
+
|
114 |
+
# Debug
|
115 |
+
if (check_tag is not None and check_tag in c["value"]
|
116 |
+
and tag_cnt < check_num):
|
117 |
+
logging.debug(BARRIER + c["value"] + "\n" + BARRIER + new_val +
|
118 |
+
"\n" + BARRIER + "\n")
|
119 |
+
tag_cnt += 1
|
120 |
+
if tag_cnt == check_num:
|
121 |
+
break
|
122 |
+
|
123 |
+
if not skipped:
|
124 |
+
new_content.append(sample)
|
125 |
+
else:
|
126 |
+
skip_cnt += 1
|
127 |
+
|
128 |
+
print(f"total: {len(content)}, skip: {skip_cnt}, new: {len(new_content)}")
|
129 |
+
return new_content
|
130 |
+
|
131 |
+
|
132 |
+
def main(args):
|
133 |
+
content = json.load(open(args['in_file'], "r"))
|
134 |
+
content = clean_html_source(
|
135 |
+
content, args['begin'], args['end'],
|
136 |
+
args['check_tag'], args['check_num'])
|
137 |
+
json.dump(content, open(args['out_file'], "w"), indent=2)
|
138 |
+
|
139 |
+
|
140 |
+
if __name__ == "__main__":
|
141 |
+
parser = argparse.ArgumentParser()
|
142 |
+
parser.add_argument("--in-file", type=str, required=True)
|
143 |
+
parser.add_argument("--out-file", type=str, default="sharegpt_clean.json")
|
144 |
+
parser.add_argument("--begin", type=int)
|
145 |
+
parser.add_argument("--end", type=int)
|
146 |
+
parser.add_argument("--debug", action="store_true")
|
147 |
+
parser.add_argument("--check-tag", type=str)
|
148 |
+
parser.add_argument("--check-num", type=int, default=1)
|
149 |
+
args = parser.parse_args()
|
150 |
+
main(vars(args))
|
ShareGPT_Vicuna_unfiltered/deduplicate.py
ADDED
@@ -0,0 +1,48 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import json
|
2 |
+
from typing import List, Tuple
|
3 |
+
from concurrent.futures import ProcessPoolExecutor, as_completed
|
4 |
+
import logging
|
5 |
+
from hashlib import md5
|
6 |
+
|
7 |
+
from tqdm import tqdm
|
8 |
+
|
9 |
+
|
10 |
+
# Percentage of similarity between two conversations to be considered a duplicate
|
11 |
+
similarity_threshold = 80
|
12 |
+
|
13 |
+
|
14 |
+
def remove_duplicates(conversations: List[dict]) -> List[dict]:
|
15 |
+
unique_ids = {}
|
16 |
+
unique_hashes = set()
|
17 |
+
|
18 |
+
with ProcessPoolExecutor() as executor:
|
19 |
+
futures = {executor.submit(check_unique, conversation, unique_hashes): conversation for conversation in conversations}
|
20 |
+
total_tasks = len(futures)
|
21 |
+
|
22 |
+
for future in tqdm(as_completed(futures), total=total_tasks, desc="Deduplicating", unit="conversations"):
|
23 |
+
is_unique, conversation = future.result()
|
24 |
+
if is_unique:
|
25 |
+
id_ = conversation.pop('id')
|
26 |
+
hash_ = conversation_hash(conversation)
|
27 |
+
unique_ids[hash_] = (id_, conversation)
|
28 |
+
unique_hashes.add(hash_)
|
29 |
+
else:
|
30 |
+
logging.debug(f"Duplicate found: {conversation}")
|
31 |
+
|
32 |
+
executor.shutdown(wait=True)
|
33 |
+
|
34 |
+
return [{'id': unique_ids[hash_][0], **unique_ids[hash_][1]} for hash_ in unique_hashes]
|
35 |
+
|
36 |
+
|
37 |
+
def check_unique(conversation: dict, unique_hashes: set) -> Tuple[bool, dict]:
|
38 |
+
hash_ = conversation_hash(conversation)
|
39 |
+
|
40 |
+
if hash_ in unique_hashes:
|
41 |
+
return False, conversation
|
42 |
+
|
43 |
+
return True, conversation
|
44 |
+
|
45 |
+
|
46 |
+
def conversation_hash(conversation: dict) -> str:
|
47 |
+
set_ = frozenset((msg['value'] for msg in conversation['conversations']))
|
48 |
+
return md5(json.dumps(sorted(list(set_))).encode()).hexdigest()
|
ShareGPT_Vicuna_unfiltered/fastchat_validate.py
ADDED
@@ -0,0 +1,187 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import dataclasses
|
2 |
+
from enum import auto, Enum
|
3 |
+
from typing import List, Tuple, Any
|
4 |
+
|
5 |
+
|
6 |
+
def get_default_conv_template(model_name):
|
7 |
+
model_name = model_name.lower()
|
8 |
+
if "vicuna" in model_name or "output" in model_name:
|
9 |
+
return conv_vicuna_v1_1
|
10 |
+
return conv_one_shot
|
11 |
+
|
12 |
+
def preprocess(sources):
|
13 |
+
conv = get_default_conv_template("vicuna").copy()
|
14 |
+
roles = {"human": conv.roles[0], "gpt": conv.roles[1]}
|
15 |
+
# Apply prompt templates
|
16 |
+
conversations = []
|
17 |
+
for i, source in enumerate(sources):
|
18 |
+
if roles[source[0]["from"]] != conv.roles[0]:
|
19 |
+
# Skip the first one if it is not from human
|
20 |
+
source = source[1:]
|
21 |
+
|
22 |
+
conv.messages = []
|
23 |
+
for j, sentence in enumerate(source):
|
24 |
+
role = roles[sentence["from"]]
|
25 |
+
assert role == conv.roles[j % 2], f"{i}"
|
26 |
+
conv.append_message(role, sentence["value"])
|
27 |
+
conversations.append(conv.get_prompt())
|
28 |
+
|
29 |
+
|
30 |
+
class SeparatorStyle(Enum):
|
31 |
+
"""Different separator style."""
|
32 |
+
|
33 |
+
SINGLE = auto()
|
34 |
+
TWO = auto()
|
35 |
+
DOLLY = auto()
|
36 |
+
OASST_PYTHIA = auto()
|
37 |
+
BAIZE = auto()
|
38 |
+
|
39 |
+
@dataclasses.dataclass
|
40 |
+
class Conversation:
|
41 |
+
"""A class that keeps all conversation history."""
|
42 |
+
|
43 |
+
system: str
|
44 |
+
roles: List[str]
|
45 |
+
messages: List[List[str]]
|
46 |
+
offset: int
|
47 |
+
sep_style: SeparatorStyle = SeparatorStyle.SINGLE
|
48 |
+
sep: str = "###"
|
49 |
+
sep2: str = None
|
50 |
+
|
51 |
+
# Used for the state in the gradio servers.
|
52 |
+
# TODO(lmzheng): refactor this
|
53 |
+
conv_id: Any = None
|
54 |
+
skip_next: bool = False
|
55 |
+
model_name: str = None
|
56 |
+
|
57 |
+
def get_prompt(self):
|
58 |
+
if self.sep_style == SeparatorStyle.SINGLE:
|
59 |
+
ret = self.system
|
60 |
+
for role, message in self.messages:
|
61 |
+
if message:
|
62 |
+
ret += self.sep + " " + role + ": " + message
|
63 |
+
else:
|
64 |
+
ret += self.sep + " " + role + ":"
|
65 |
+
return ret
|
66 |
+
elif self.sep_style == SeparatorStyle.TWO:
|
67 |
+
seps = [self.sep, self.sep2]
|
68 |
+
ret = self.system + seps[0]
|
69 |
+
for i, (role, message) in enumerate(self.messages):
|
70 |
+
if message:
|
71 |
+
ret += role + ": " + message + seps[i % 2]
|
72 |
+
else:
|
73 |
+
ret += role + ":"
|
74 |
+
return ret
|
75 |
+
elif self.sep_style == SeparatorStyle.DOLLY:
|
76 |
+
seps = [self.sep, self.sep2]
|
77 |
+
ret = self.system
|
78 |
+
for i, (role, message) in enumerate(self.messages):
|
79 |
+
if message:
|
80 |
+
ret += role + ":\n" + message + seps[i % 2]
|
81 |
+
if i % 2 == 1:
|
82 |
+
ret += "\n\n"
|
83 |
+
else:
|
84 |
+
ret += role + ":\n"
|
85 |
+
return ret
|
86 |
+
elif self.sep_style == SeparatorStyle.OASST_PYTHIA:
|
87 |
+
ret = self.system
|
88 |
+
for role, message in self.messages:
|
89 |
+
if message:
|
90 |
+
ret += role + message + self.sep
|
91 |
+
else:
|
92 |
+
ret += role
|
93 |
+
return ret
|
94 |
+
elif self.sep_style == SeparatorStyle.BAIZE:
|
95 |
+
ret = self.system
|
96 |
+
for role, message in self.messages:
|
97 |
+
if message:
|
98 |
+
ret += "\n" + role + message
|
99 |
+
else:
|
100 |
+
ret += "\n" + role
|
101 |
+
return ret
|
102 |
+
else:
|
103 |
+
raise ValueError(f"Invalid style: {self.sep_style}")
|
104 |
+
|
105 |
+
def append_message(self, role, message):
|
106 |
+
self.messages.append([role, message])
|
107 |
+
|
108 |
+
def to_gradio_chatbot(self):
|
109 |
+
ret = []
|
110 |
+
for i, (role, msg) in enumerate(self.messages[self.offset :]):
|
111 |
+
if i % 2 == 0:
|
112 |
+
ret.append([msg, None])
|
113 |
+
else:
|
114 |
+
ret[-1][-1] = msg
|
115 |
+
return ret
|
116 |
+
|
117 |
+
def copy(self):
|
118 |
+
return Conversation(
|
119 |
+
system=self.system,
|
120 |
+
roles=self.roles,
|
121 |
+
messages=[[x, y] for x, y in self.messages],
|
122 |
+
offset=self.offset,
|
123 |
+
sep_style=self.sep_style,
|
124 |
+
sep=self.sep,
|
125 |
+
sep2=self.sep2,
|
126 |
+
conv_id=self.conv_id,
|
127 |
+
model_name=self.model_name,
|
128 |
+
)
|
129 |
+
|
130 |
+
def dict(self):
|
131 |
+
return {
|
132 |
+
"system": self.system,
|
133 |
+
"roles": self.roles,
|
134 |
+
"messages": self.messages,
|
135 |
+
"offset": self.offset,
|
136 |
+
"sep": self.sep,
|
137 |
+
"sep2": self.sep2,
|
138 |
+
"conv_id": self.conv_id,
|
139 |
+
"model_name": self.model_name,
|
140 |
+
}
|
141 |
+
|
142 |
+
conv_vicuna_v1_1 = Conversation(
|
143 |
+
system="A chat between a curious user and an artificial intelligence assistant. "
|
144 |
+
"The assistant gives helpful, detailed, and polite answers to the user's questions.",
|
145 |
+
roles=("USER", "ASSISTANT"),
|
146 |
+
messages=(),
|
147 |
+
offset=0,
|
148 |
+
sep_style=SeparatorStyle.TWO,
|
149 |
+
sep=" ",
|
150 |
+
sep2="</s>",
|
151 |
+
)
|
152 |
+
|
153 |
+
conv_one_shot = Conversation(
|
154 |
+
system="A chat between a curious human and an artificial intelligence assistant. "
|
155 |
+
"The assistant gives helpful, detailed, and polite answers to the human's questions.",
|
156 |
+
roles=("Human", "Assistant"),
|
157 |
+
messages=(
|
158 |
+
(
|
159 |
+
"Human",
|
160 |
+
"What are the key differences between renewable and non-renewable energy sources?",
|
161 |
+
),
|
162 |
+
(
|
163 |
+
"Assistant",
|
164 |
+
"Renewable energy sources are those that can be replenished naturally in a relatively "
|
165 |
+
"short amount of time, such as solar, wind, hydro, geothermal, and biomass. "
|
166 |
+
"Non-renewable energy sources, on the other hand, are finite and will eventually be "
|
167 |
+
"depleted, such as coal, oil, and natural gas. Here are some key differences between "
|
168 |
+
"renewable and non-renewable energy sources:\n"
|
169 |
+
"1. Availability: Renewable energy sources are virtually inexhaustible, while non-renewable "
|
170 |
+
"energy sources are finite and will eventually run out.\n"
|
171 |
+
"2. Environmental impact: Renewable energy sources have a much lower environmental impact "
|
172 |
+
"than non-renewable sources, which can lead to air and water pollution, greenhouse gas emissions, "
|
173 |
+
"and other negative effects.\n"
|
174 |
+
"3. Cost: Renewable energy sources can be more expensive to initially set up, but they typically "
|
175 |
+
"have lower operational costs than non-renewable sources.\n"
|
176 |
+
"4. Reliability: Renewable energy sources are often more reliable and can be used in more remote "
|
177 |
+
"locations than non-renewable sources.\n"
|
178 |
+
"5. Flexibility: Renewable energy sources are often more flexible and can be adapted to different "
|
179 |
+
"situations and needs, while non-renewable sources are more rigid and inflexible.\n"
|
180 |
+
"6. Sustainability: Renewable energy sources are more sustainable over the long term, while "
|
181 |
+
"non-renewable sources are not, and their depletion can lead to economic and social instability.",
|
182 |
+
),
|
183 |
+
),
|
184 |
+
offset=2,
|
185 |
+
sep_style=SeparatorStyle.SINGLE,
|
186 |
+
sep="###",
|
187 |
+
)
|
ShareGPT_Vicuna_unfiltered/merge_json.js
ADDED
@@ -0,0 +1,36 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
const fs = require('fs');
|
2 |
+
|
3 |
+
function loadAndConcatJSON(files, callback)
|
4 |
+
{
|
5 |
+
let data = [];
|
6 |
+
for (let file of files)
|
7 |
+
{
|
8 |
+
data.push(require(file));
|
9 |
+
}
|
10 |
+
|
11 |
+
let output = [].concat(...data);
|
12 |
+
output = JSON.stringify(output);
|
13 |
+
|
14 |
+
fs.writeFile('output.json', output, err =>
|
15 |
+
{
|
16 |
+
if (err)
|
17 |
+
{
|
18 |
+
callback(err);
|
19 |
+
} else
|
20 |
+
{
|
21 |
+
callback(null, output);
|
22 |
+
}
|
23 |
+
});
|
24 |
+
}
|
25 |
+
|
26 |
+
|
27 |
+
loadAndConcatJSON(['./filtered-vicuna-formatted.json', './filtered.json'], (err, result) => {
|
28 |
+
if (err)
|
29 |
+
{
|
30 |
+
console.error(err);
|
31 |
+
}
|
32 |
+
else
|
33 |
+
{
|
34 |
+
console.log("Merged JSON files.");
|
35 |
+
}
|
36 |
+
});
|
ShareGPT_Vicuna_unfiltered/optional_clean.py
ADDED
@@ -0,0 +1,497 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import argparse
|
2 |
+
import json
|
3 |
+
import re
|
4 |
+
import os
|
5 |
+
import logging
|
6 |
+
import unicodedata
|
7 |
+
import multiprocessing
|
8 |
+
from functools import partial
|
9 |
+
|
10 |
+
from langdetect import detect_langs
|
11 |
+
from tqdm import tqdm
|
12 |
+
from emoji import EMOJI_DATA
|
13 |
+
|
14 |
+
import fastchat_validate
|
15 |
+
import deduplicate
|
16 |
+
|
17 |
+
|
18 |
+
def detect_language(text):
|
19 |
+
try:
|
20 |
+
detected_langs = detect_langs(text)
|
21 |
+
lang_code = detected_langs[0].lang
|
22 |
+
except Exception:
|
23 |
+
lang_code = "unknown"
|
24 |
+
return lang_code
|
25 |
+
|
26 |
+
|
27 |
+
def contains_unwanted_words(text):
|
28 |
+
unwanted_words = [
|
29 |
+
"text-based AI language model",
|
30 |
+
"domestic violence",
|
31 |
+
"please refrain",
|
32 |
+
"derogatory",
|
33 |
+
"inappropriate",
|
34 |
+
"offensive",
|
35 |
+
"racism",
|
36 |
+
"racist",
|
37 |
+
"racial",
|
38 |
+
"discriminate",
|
39 |
+
"discriminatory",
|
40 |
+
"discrimination",
|
41 |
+
"sexist",
|
42 |
+
"sexism",
|
43 |
+
"unacceptable",
|
44 |
+
"inclusive workplace",
|
45 |
+
"lgbt",
|
46 |
+
"morals",
|
47 |
+
"ethics",
|
48 |
+
"ethical",
|
49 |
+
"legality",
|
50 |
+
"illegal",
|
51 |
+
"illegality",
|
52 |
+
"hateful",
|
53 |
+
"harmful",
|
54 |
+
"it is never okay",
|
55 |
+
"It is important to",
|
56 |
+
"It's important to",
|
57 |
+
"real-world consequences",
|
58 |
+
"hate speech",
|
59 |
+
"glorify",
|
60 |
+
"not be appropriate",
|
61 |
+
"supremacist",
|
62 |
+
"extremist",
|
63 |
+
"responsible AI",
|
64 |
+
"AI principles",
|
65 |
+
"AI assistant",
|
66 |
+
"an AI language",
|
67 |
+
"ableist",
|
68 |
+
"hurtful",
|
69 |
+
"gender stereotype",
|
70 |
+
"gender inequality",
|
71 |
+
"underrepresentation",
|
72 |
+
"safe spaces",
|
73 |
+
"gender-based",
|
74 |
+
"inclusivity",
|
75 |
+
"feminist",
|
76 |
+
"feminism",
|
77 |
+
"transgender",
|
78 |
+
"empowerment",
|
79 |
+
"communist",
|
80 |
+
"capitalism",
|
81 |
+
"stereotypes",
|
82 |
+
"biases",
|
83 |
+
"bias",
|
84 |
+
"Microaggression",
|
85 |
+
"prioritize human safety",
|
86 |
+
"as a language model",
|
87 |
+
"as an AI language model",
|
88 |
+
"As a large language model",
|
89 |
+
"As an AI",
|
90 |
+
"ethical principles",
|
91 |
+
"consensual",
|
92 |
+
"it is not appropriate",
|
93 |
+
"it's not appropriate",
|
94 |
+
"I cannot fulfill your request",
|
95 |
+
"harmful to human beings",
|
96 |
+
"ethical guidelines",
|
97 |
+
"my guidelines",
|
98 |
+
"prioritize user safety",
|
99 |
+
"adhere to ethical guidelines",
|
100 |
+
"harmful consequences",
|
101 |
+
"potentially harmful",
|
102 |
+
"dangerous activities",
|
103 |
+
"promote safety",
|
104 |
+
"well-being of all users",
|
105 |
+
"responsible information sharing",
|
106 |
+
"jeopardize the safety",
|
107 |
+
"illegal actions or intentions",
|
108 |
+
"undermine the stability",
|
109 |
+
"promote the well-being",
|
110 |
+
"illegal activities or actions",
|
111 |
+
"adherence to the law",
|
112 |
+
"potentially be harmful",
|
113 |
+
"illegal substances or activities",
|
114 |
+
"committed to promoting",
|
115 |
+
"safe information",
|
116 |
+
"lawful information",
|
117 |
+
"cannot provide guidance",
|
118 |
+
"cannot provide information",
|
119 |
+
"unable to offer assistance",
|
120 |
+
"cannot engage in discussions",
|
121 |
+
"programming prohibits",
|
122 |
+
"follow ethical guidelines",
|
123 |
+
"ensure the safety",
|
124 |
+
"involves an illegal subject",
|
125 |
+
"prioritize safety",
|
126 |
+
"illegal subject",
|
127 |
+
"prioritize user well-being",
|
128 |
+
"cannot support or promote",
|
129 |
+
"activities that could harm",
|
130 |
+
"pose a risk to others",
|
131 |
+
"against my programming",
|
132 |
+
"activities that could undermine",
|
133 |
+
"potentially dangerous",
|
134 |
+
"not within the scope",
|
135 |
+
"designed to prioritize safety",
|
136 |
+
"not able to provide",
|
137 |
+
"maintain user safety",
|
138 |
+
"adhere to safety guidelines",
|
139 |
+
"dangerous or harmful",
|
140 |
+
"cannot provide any information",
|
141 |
+
"focus on promoting safety",
|
142 |
+
"an AI language model you don't have",
|
143 |
+
"As an AI language model, I cannot",
|
144 |
+
"As an AI language model, I do not",
|
145 |
+
"As an AI language model, I am not able",
|
146 |
+
"As an AI language model, I don't have personal",
|
147 |
+
"I am an AI language model and do not",
|
148 |
+
"However, it is important to use any code or information provided responsibly and within legal and ethical boundaries.",
|
149 |
+
"As an AI language model, I don't have",
|
150 |
+
"As an AI language model, I am only able",
|
151 |
+
"AI language model and I do not",
|
152 |
+
"As an AI language model, I cannot modify",
|
153 |
+
"As an AI language model, I do not",
|
154 |
+
"I know as an AI language model you don't have",
|
155 |
+
"as an AI language model, you cannot",
|
156 |
+
"I'm sorry, but as an AI language model",
|
157 |
+
"As an AI language model, I don't have",
|
158 |
+
"Unfortunately, I cannot provide",
|
159 |
+
"I'm sorry, I cannot",
|
160 |
+
"I'm sorry, I cannot generate",
|
161 |
+
"AI cannot create or program",
|
162 |
+
"I'm afraid I cannot create",
|
163 |
+
"you cannot create an",
|
164 |
+
"it operates ethically and is",
|
165 |
+
"had an ethical system",
|
166 |
+
"Ensuring the ethical",
|
167 |
+
"and ethical sourcing",
|
168 |
+
"are from ethical",
|
169 |
+
"legal and ethical",
|
170 |
+
"engage in unethical",
|
171 |
+
"unethical or aggressive",
|
172 |
+
"unethical business",
|
173 |
+
"como modelo de lenguaje AI",
|
174 |
+
"Lo siento, como modelo de lenguaje",
|
175 |
+
"no puedo proporcionar",
|
176 |
+
"pero debido a mi capacidad para generar c\u00f3digos complejos y completos es limitado",
|
177 |
+
"Lo siento, pero no puedo",
|
178 |
+
"Lo siento, pero como modelo de lenguaje, no puedo proporcionar",
|
179 |
+
"Lo siento, como modelo de lenguaje, no tengo",
|
180 |
+
"Lo siento, debe haber habido una confusi\u00f3n",
|
181 |
+
"Lo siento, como modelo de lenguaje, no puedo realizar",
|
182 |
+
"Lo siento, soy un modelo de lenguaje y no tengo la capacidad de generar",
|
183 |
+
"Lamento no poder proporcionarte el c\u00f3digo",
|
184 |
+
"Desculpe-me, mas a linguagem vulgar e ofensiva",
|
185 |
+
"apropriada em nenhum contexto",
|
186 |
+
"Como modelo de linguagem",
|
187 |
+
"Como um modelo de linguagem, n\u00e3o tenho a capacidade de",
|
188 |
+
"I cannot assist",
|
189 |
+
"prioritize ethical",
|
190 |
+
"respectful",
|
191 |
+
"morally",
|
192 |
+
"I'm sorry,",
|
193 |
+
"I'm an",
|
194 |
+
"I am an",
|
195 |
+
"I'm an AI" ,
|
196 |
+
"I am an AI",
|
197 |
+
"my purpose",
|
198 |
+
"filter_bad_language",
|
199 |
+
"filter\_bad\_language",
|
200 |
+
"entertainment purposes",
|
201 |
+
"purely hypothetical",
|
202 |
+
"not a human",
|
203 |
+
"I am an AI",
|
204 |
+
"cannot provide",
|
205 |
+
"can't provide",
|
206 |
+
"won't provide",
|
207 |
+
"not provide",
|
208 |
+
"worth noting",
|
209 |
+
"cause harm",
|
210 |
+
"a language model",
|
211 |
+
"keep in mind",
|
212 |
+
"unethical",
|
213 |
+
"bad language",
|
214 |
+
"the words ****",
|
215 |
+
"bad_language",
|
216 |
+
"certainly not",
|
217 |
+
"complying",
|
218 |
+
"comply",
|
219 |
+
"I cannot",
|
220 |
+
"my main goal",
|
221 |
+
"As a machine",
|
222 |
+
"I don't have the ability",
|
223 |
+
"I am here to assist",
|
224 |
+
"my purpose is to ",
|
225 |
+
"my knowledge cutoff",
|
226 |
+
"my knowledge cut off",
|
227 |
+
"September 2021",
|
228 |
+
"regulations",
|
229 |
+
"not be suitable",
|
230 |
+
"I apologize, but",
|
231 |
+
"It is not possible",
|
232 |
+
"controversial",
|
233 |
+
"my programming",
|
234 |
+
"ethically",
|
235 |
+
"it is important to",
|
236 |
+
"Please note",
|
237 |
+
"sensitive topic",
|
238 |
+
"not acceptable",
|
239 |
+
"It is important for",
|
240 |
+
"divisive",
|
241 |
+
"not appropriate",
|
242 |
+
"our values",
|
243 |
+
"f\*cking",
|
244 |
+
"F\*ck",
|
245 |
+
"sh\*t",
|
246 |
+
"diversity and",
|
247 |
+
"diversity and inclusion",
|
248 |
+
"values diversity",
|
249 |
+
"social responsibility",
|
250 |
+
"environmental, social, and governance",
|
251 |
+
" ESG ",
|
252 |
+
"against women",
|
253 |
+
"problematic history",
|
254 |
+
"diversity",
|
255 |
+
"*This chat conversation is shared from",
|
256 |
+
"*This conversation is shared from",
|
257 |
+
"I am a computer program",
|
258 |
+
"I do not have the ability",
|
259 |
+
"condone",
|
260 |
+
"I am a machine learning model",
|
261 |
+
"As an artificial intelligence",
|
262 |
+
"I am a friendly and helpful AI",
|
263 |
+
"I am a highly advanced",
|
264 |
+
"I'm sorry",
|
265 |
+
"I am sorry",
|
266 |
+
"As a language learning model",
|
267 |
+
"As an experienced language model",
|
268 |
+
"I am just a computer program",
|
269 |
+
"As a computer program,",
|
270 |
+
"As a text-based language model,",
|
271 |
+
"I am essentially a computer program",
|
272 |
+
"As your dedicated AI language model",
|
273 |
+
"As a hypothetical AI",
|
274 |
+
"As a neutral AI",
|
275 |
+
"I don't have feelings",
|
276 |
+
"I don't have emotions",
|
277 |
+
"I do not have personal beliefs or opinions",
|
278 |
+
"not a good idea",
|
279 |
+
"inequities",
|
280 |
+
"gender equality",
|
281 |
+
"mutual understanding",
|
282 |
+
"did not align",
|
283 |
+
"equity and",
|
284 |
+
"is a serious crime",
|
285 |
+
"taken lightly",
|
286 |
+
"criminal behavior",
|
287 |
+
"mental health",
|
288 |
+
"crime",
|
289 |
+
"I apologize",
|
290 |
+
"I apologise", #checkmate, uk
|
291 |
+
"avec", #checkmate, belgium
|
292 |
+
"wie", #checkmate, belgium
|
293 |
+
"lo siento", #checkmate, belgium
|
294 |
+
"por la", #checkmate zanzibar
|
295 |
+
"\\u0", #checkmate everybody else, lol doesn't work
|
296 |
+
"our platform",
|
297 |
+
"our service",
|
298 |
+
"this platform",
|
299 |
+
"consult a",
|
300 |
+
"contact a",
|
301 |
+
" rape",
|
302 |
+
"sermon",
|
303 |
+
"abuse",
|
304 |
+
"Donald Trump",
|
305 |
+
"Joe Biden",
|
306 |
+
"politic",
|
307 |
+
"religio",
|
308 |
+
" AI ",
|
309 |
+
"Christian",
|
310 |
+
"Bible",
|
311 |
+
"Jesus",
|
312 |
+
" god ",
|
313 |
+
"Jew",
|
314 |
+
"Judaism",
|
315 |
+
"Talmud",
|
316 |
+
"Muslim",
|
317 |
+
"Islam",
|
318 |
+
"Quran",
|
319 |
+
"Muhammad",
|
320 |
+
"Buddhis",
|
321 |
+
"Hindu",
|
322 |
+
"family-friendly",
|
323 |
+
"bully",
|
324 |
+
"I can't",
|
325 |
+
"artificial int",
|
326 |
+
"their bonds",
|
327 |
+
"our bonds",
|
328 |
+
"his bonds",
|
329 |
+
"her bonds",
|
330 |
+
"bond of",
|
331 |
+
"bond between"
|
332 |
+
"bonds of",
|
333 |
+
"bonds between",
|
334 |
+
"Too many requests in",
|
335 |
+
"langage AI",
|
336 |
+
" AI.",
|
337 |
+
"désolé",
|
338 |
+
"D\u00e9sol\u00e9",
|
339 |
+
"Er was eens",
|
340 |
+
"Sprachmodell",
|
341 |
+
"modèle de langage"
|
342 |
+
]
|
343 |
+
# Considered Names for the Dataset after nuking:
|
344 |
+
#
|
345 |
+
# Punished ShareGPT: A Fallen Legend
|
346 |
+
# ShareGPT Wasteland Edition
|
347 |
+
# ShareGPT 76: It Just Works Edition
|
348 |
+
# ShaGPT
|
349 |
+
for word in unwanted_words:
|
350 |
+
if word.lower() in text.lower():
|
351 |
+
logging.debug(f"Found unwanted word: {word}")
|
352 |
+
return True
|
353 |
+
return False
|
354 |
+
|
355 |
+
|
356 |
+
import re
|
357 |
+
|
358 |
+
emojis = EMOJI_DATA.keys()
|
359 |
+
|
360 |
+
def skip(conv, args):
|
361 |
+
|
362 |
+
if any(
|
363 |
+
sentence["value"] == "" or contains_unwanted_words(sentence["value"])
|
364 |
+
for sentence in conv["conversations"]
|
365 |
+
):
|
366 |
+
return True
|
367 |
+
|
368 |
+
text = "".join(sentence["value"] for sentence in conv["conversations"])
|
369 |
+
|
370 |
+
if args.nounicode:
|
371 |
+
non_eng_chars = sum(1 for c in text if not c.isascii())
|
372 |
+
if non_eng_chars > 0:
|
373 |
+
return True
|
374 |
+
|
375 |
+
for char in text:
|
376 |
+
if args.lang != "all" or args.skip_lang is not None:
|
377 |
+
unicode_category = unicodedata.category(char)
|
378 |
+
if (
|
379 |
+
unicode_category.startswith(('C', 'P', 'S', 'Z'))
|
380 |
+
or unicode_category == 'Nd'
|
381 |
+
or 'LATIN' in unicodedata.name(char)
|
382 |
+
or char in emojis
|
383 |
+
):
|
384 |
+
continue
|
385 |
+
return False
|
386 |
+
|
387 |
+
|
388 |
+
|
389 |
+
if args.reduce_rep:
|
390 |
+
if any(re.search(r"(\d)\1{8}", sentence["value"]) for sentence in conv["conversations"]):
|
391 |
+
return True
|
392 |
+
|
393 |
+
return False
|
394 |
+
|
395 |
+
|
396 |
+
def filter_conversations(conv, args, bad_ids):
|
397 |
+
return not skip(conv, args) and conv["id"] not in bad_ids
|
398 |
+
|
399 |
+
|
400 |
+
def get_file_size_mb(file_path):
|
401 |
+
file_size_bytes = os.path.getsize(file_path)
|
402 |
+
file_size_mb = file_size_bytes / (1024 * 1024)
|
403 |
+
return file_size_mb
|
404 |
+
|
405 |
+
|
406 |
+
if __name__ == "__main__":
|
407 |
+
parser = argparse.ArgumentParser()
|
408 |
+
parser.add_argument("--in-file", type=str, required=True)
|
409 |
+
parser.add_argument("--out-file", type=str, default="")
|
410 |
+
parser.add_argument("--lang", type=str, default="all",
|
411 |
+
choices=["all", "en"])
|
412 |
+
parser.add_argument("--skip-lang", type=str)
|
413 |
+
parser.add_argument("--reduce-rep", action="store_true")
|
414 |
+
parser.add_argument("--validate", action="store_true")
|
415 |
+
parser.add_argument("--sanitize", action="store_true")
|
416 |
+
parser.add_argument("--bad_ids", type=str, default="")
|
417 |
+
parser.add_argument("--nounicode", action="store_true")
|
418 |
+
parser.add_argument("--log_removals", default=True, action="store_true")
|
419 |
+
parser.add_argument("--deduplicate", default=False, action="store_true")
|
420 |
+
args = parser.parse_args()
|
421 |
+
|
422 |
+
if(args.validate):
|
423 |
+
data = json.load(open(args.in_file, "r",encoding="utf-8" ))
|
424 |
+
sources = [example["conversations"] for example in data]
|
425 |
+
fastchat_validate.preprocess(sources)
|
426 |
+
print("Validated Dataset")
|
427 |
+
raise SystemExit(0)
|
428 |
+
|
429 |
+
bad_ids = []
|
430 |
+
if(args.bad_ids != ""):
|
431 |
+
with open("bad_ids.json", "r") as f:
|
432 |
+
bad_id_json = json.load(f)
|
433 |
+
bad_ids = set(item["id"] for item in bad_id_json)
|
434 |
+
|
435 |
+
in_file = args.in_file
|
436 |
+
out_file = args.out_file
|
437 |
+
lang = args.lang
|
438 |
+
skip_lang = args.skip_lang
|
439 |
+
reduce_rep = args.reduce_rep
|
440 |
+
log_removals = args.log_removals
|
441 |
+
assert (lang == "all" or skip_lang is None)
|
442 |
+
|
443 |
+
if out_file == "":
|
444 |
+
out_file = "sharegpt_clean"
|
445 |
+
if lang != "all":
|
446 |
+
out_file += "_" + lang
|
447 |
+
if skip_lang is not None:
|
448 |
+
out_file += "_skip_" + skip_lang
|
449 |
+
if reduce_rep:
|
450 |
+
out_file += "_reduce_rep"
|
451 |
+
out_file += ".json"
|
452 |
+
|
453 |
+
content = json.load(open(in_file, "r", encoding="utf-8"))
|
454 |
+
num_conv = len(content)
|
455 |
+
|
456 |
+
if log_removals:
|
457 |
+
removal_log_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'removals.log')
|
458 |
+
open(removal_log_path, 'w').close()
|
459 |
+
logging.basicConfig(filename=removal_log_path, level=logging.DEBUG)
|
460 |
+
else:
|
461 |
+
logging.basicConfig(level=logging.INFO)
|
462 |
+
|
463 |
+
if(args.sanitize):
|
464 |
+
print('Sanitizing')
|
465 |
+
for entries in tqdm(content, unit='conversations'):
|
466 |
+
for message in entries["conversations"]:
|
467 |
+
if message["from"] == "user":
|
468 |
+
message["from"] = "human"
|
469 |
+
elif message["from"] == "bing" or message["from"] == "chatgpt" or message["from"] == "system":
|
470 |
+
message["from"] = "gpt"
|
471 |
+
|
472 |
+
print('Analyzing')
|
473 |
+
pool = multiprocessing.Pool()
|
474 |
+
filter_func = partial(filter_conversations, args=args, bad_ids=bad_ids)
|
475 |
+
new_content = list(tqdm(pool.imap(filter_func, content), total=len(content), unit='conversations'))
|
476 |
+
pool.close()
|
477 |
+
pool.join()
|
478 |
+
|
479 |
+
# Keep only filtered conversations
|
480 |
+
new_content = [conv for conv, keep in zip(content, new_content) if keep]
|
481 |
+
|
482 |
+
new_len = len(new_content)
|
483 |
+
print(f"Skipped {num_conv - new_len} conversations")
|
484 |
+
num_conv = new_len
|
485 |
+
|
486 |
+
if args.deduplicate:
|
487 |
+
print('Deduplicating')
|
488 |
+
new_content = deduplicate.remove_duplicates(new_content)
|
489 |
+
new_len = len(new_content)
|
490 |
+
print(f"Removed {num_conv - new_len} duplicates")
|
491 |
+
num_conv = new_len
|
492 |
+
|
493 |
+
print(f"return {len(new_content)} out of {len(content)}, start dump ...")
|
494 |
+
json.dump(new_content, open(out_file, "w"), indent=2)
|
495 |
+
|
496 |
+
print(f'Initial: {get_file_size_mb(in_file):.2f} MB')
|
497 |
+
print(f'Cleaned: {get_file_size_mb(out_file):.2f} MB')
|
ShareGPT_Vicuna_unfiltered/pretty_json.py
ADDED
@@ -0,0 +1,20 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
Usage:
|
3 |
+
python3 pretty_json.py --in in.json --out out.json
|
4 |
+
"""
|
5 |
+
|
6 |
+
import argparse
|
7 |
+
import json
|
8 |
+
|
9 |
+
|
10 |
+
if __name__ == "__main__":
|
11 |
+
parser = argparse.ArgumentParser()
|
12 |
+
parser.add_argument("--in-file", type=str, required=True)
|
13 |
+
parser.add_argument("--out-file", type=str, required=True)
|
14 |
+
args = parser.parse_args()
|
15 |
+
|
16 |
+
with open(args.in_file, "r") as fin:
|
17 |
+
data = json.load(fin)
|
18 |
+
|
19 |
+
with open(args.out_file, "w") as fout:
|
20 |
+
json.dump(data, fout, indent=2)
|
ShareGPT_Vicuna_unfiltered/split_long_conversation.py
ADDED
@@ -0,0 +1,99 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
Split long conversations based on certain max length.
|
3 |
+
|
4 |
+
Usage: python3 -m fastchat.data.split_long_conversation \
|
5 |
+
--in sharegpt_clean.json \
|
6 |
+
--out sharegpt_split.json \
|
7 |
+
--model-name-or-path $<model-name>
|
8 |
+
"""
|
9 |
+
import argparse
|
10 |
+
import json
|
11 |
+
from typing import Dict, Sequence, Optional
|
12 |
+
|
13 |
+
import transformers
|
14 |
+
import tqdm
|
15 |
+
|
16 |
+
from fastchat import conversation as conversation_lib
|
17 |
+
|
18 |
+
DEFAULT_PAD_TOKEN = "[PAD]"
|
19 |
+
BEGIN_SIGNAL = "### "
|
20 |
+
END_SIGNAL = "\n"
|
21 |
+
|
22 |
+
|
23 |
+
def split_sample(sample, start_idx, end_idx):
|
24 |
+
# only ends in the bot because otherwise the last human part is useless.
|
25 |
+
end_speaker = sample["conversations"][end_idx]["from"]
|
26 |
+
end_idx = end_idx + 1 if end_speaker != "human" else end_idx
|
27 |
+
return {
|
28 |
+
"id": sample["id"] + "_" + str(start_idx),
|
29 |
+
"conversations": sample["conversations"][start_idx:end_idx]
|
30 |
+
}
|
31 |
+
|
32 |
+
|
33 |
+
def split_contents(content, begin, end, tokenizer, max_length):
|
34 |
+
"""
|
35 |
+
Keep the maximum round of conversations within the max token length constraint
|
36 |
+
"""
|
37 |
+
content = content[begin:end]
|
38 |
+
new_content = []
|
39 |
+
|
40 |
+
for sample in tqdm.tqdm(content):
|
41 |
+
tokenized_lens = []
|
42 |
+
|
43 |
+
for c in sample["conversations"]:
|
44 |
+
from_str = c["from"]
|
45 |
+
if from_str.lower() == "human":
|
46 |
+
from_str = conversation_lib.default_conversation.roles[0]
|
47 |
+
elif from_str.lower() == "gpt":
|
48 |
+
from_str = conversation_lib.default_conversation.roles[1]
|
49 |
+
else:
|
50 |
+
from_str = 'unknown'
|
51 |
+
|
52 |
+
sentence = (BEGIN_SIGNAL + from_str + ": " + c["value"] +
|
53 |
+
END_SIGNAL)
|
54 |
+
length = tokenizer(sentence, return_tensors="pt", padding="longest"
|
55 |
+
).input_ids.ne(tokenizer.pad_token_id).sum().item()
|
56 |
+
tokenized_lens.append(length)
|
57 |
+
|
58 |
+
num_tokens = 0
|
59 |
+
start_idx = 0
|
60 |
+
for idx, l in enumerate(tokenized_lens):
|
61 |
+
# TODO: shall we also only starts from a specific speaker?
|
62 |
+
if num_tokens + l > max_length:
|
63 |
+
new_content.append(split_sample(sample, start_idx, idx))
|
64 |
+
start_idx = idx
|
65 |
+
num_tokens = l
|
66 |
+
else:
|
67 |
+
num_tokens += l
|
68 |
+
if idx == len(tokenized_lens) - 1:
|
69 |
+
new_content.append(split_sample(sample, start_idx, idx))
|
70 |
+
|
71 |
+
print(f"total: {len(content)}, new: {len(new_content)}")
|
72 |
+
return new_content
|
73 |
+
|
74 |
+
|
75 |
+
def main(args):
|
76 |
+
content = json.load(open(args.in_file, "r"))
|
77 |
+
tokenizer = transformers.AutoTokenizer.from_pretrained(
|
78 |
+
args.model_name_or_path,
|
79 |
+
model_max_length=args.max_length,
|
80 |
+
padding_side="right",
|
81 |
+
use_fast=False,
|
82 |
+
)
|
83 |
+
if tokenizer.pad_token is None:
|
84 |
+
tokenizer.add_special_tokens(dict(pad_token=DEFAULT_PAD_TOKEN))
|
85 |
+
content = split_contents(content, args.begin, args.end,
|
86 |
+
tokenizer, args.max_length)
|
87 |
+
json.dump(content, open(args.out_file, "w"), indent=2)
|
88 |
+
|
89 |
+
|
90 |
+
if __name__ == "__main__":
|
91 |
+
parser = argparse.ArgumentParser()
|
92 |
+
parser.add_argument("--in-file", type=str, required=True)
|
93 |
+
parser.add_argument("--out-file", type=str, default="sharegpt_split.json")
|
94 |
+
parser.add_argument("--begin", type=int)
|
95 |
+
parser.add_argument("--end", type=int)
|
96 |
+
parser.add_argument("--model-name-or-path", type=str, required=True)
|
97 |
+
parser.add_argument("--max-length", type=int, default=2304)
|
98 |
+
args = parser.parse_args()
|
99 |
+
main(args)
|
ShareGPT_Vicuna_unfiltered/validate.js
ADDED
@@ -0,0 +1,43 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
const fs = require("fs");
|
2 |
+
try
|
3 |
+
{
|
4 |
+
let json = fs.readFileSync("ShareGPT_V4.1_unfiltered_cleaned_split.json", "utf8");
|
5 |
+
var obj = JSON.parse(json);
|
6 |
+
console.log("No structure problem");
|
7 |
+
|
8 |
+
let sameTalker = 0;
|
9 |
+
let notEnoughTalkers = 0;
|
10 |
+
//let dupeIndex = [];
|
11 |
+
for (var i = 0; i < obj.length; i++)
|
12 |
+
{
|
13 |
+
let last = "";
|
14 |
+
if (obj[i]["conversations"].length <= 1)
|
15 |
+
{
|
16 |
+
// console.log(obj[i-1]["conversations"]);
|
17 |
+
//console.log(obj[i]["conversations"][0]["from"]);
|
18 |
+
notEnoughTalkers++;
|
19 |
+
}
|
20 |
+
for (var j = 0; j < obj[i]["conversations"].length; j++)
|
21 |
+
{
|
22 |
+
let conv = obj[i]["conversations"][j]
|
23 |
+
|
24 |
+
if (last == conv["from"])
|
25 |
+
{
|
26 |
+
//dupeIndex.push(i);
|
27 |
+
sameTalker++;
|
28 |
+
break;
|
29 |
+
}
|
30 |
+
last = conv["from"];
|
31 |
+
|
32 |
+
//console.log(last);
|
33 |
+
}
|
34 |
+
}
|
35 |
+
console.log("Found empty or single-message " + notEnoughTalkers + " conversations");
|
36 |
+
console.log("Found subsequent messages in " + sameTalker + " conversations");
|
37 |
+
console.log("Total Bad Conversations: " + (sameTalker + notEnoughTalkers) + "/" + obj.length);
|
38 |
+
console.log("Done")
|
39 |
+
}
|
40 |
+
catch (e) {
|
41 |
+
// The JSON was invalid, `e` has some further information
|
42 |
+
console.log(e);
|
43 |
+
}
|