Added aicg
Browse files- .gitattributes +14 -0
- aicg/aicg.json +3 -0
- aicg/clean.py +30 -0
- aicg/convert-to-instruct.py +24 -0
- aicg/csv-to-json.py +27 -0
- aicg/csvs/Log_20230415_122514.csv +3 -0
- aicg/csvs/Log_20230415_131942.csv +0 -0
- aicg/csvs/Log_20230415_132110.csv +3 -0
- aicg/csvs/Log_20230415_134333.csv +3 -0
- aicg/csvs/Log_20230417_152213.csv +3 -0
- aicg/csvs/Log_20230417_165354.csv +3 -0
- aicg/csvs/Log_20230417_174922.csv +3 -0
- aicg/csvs/Log_20230417_183445.csv +3 -0
- aicg/csvs/Log_20230417_191610.csv +3 -0
- aicg/csvs/Log_20230417_195418.csv +3 -0
- aicg/csvs/Log_20230417_203437.csv +3 -0
- aicg/csvs/Log_20230417_210300.csv +3 -0
- aicg/csvs/merged.csv +3 -0
- aicg/dedup-clean.py +40 -0
- aicg/getgozzed.txt +0 -0
- aicg/instruct_output.json +3 -0
- aicg/merge.py +23 -0
- aicg/validatecsv.py +39 -0
.gitattributes
CHANGED
@@ -52,3 +52,17 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
52 |
*.jpg filter=lfs diff=lfs merge=lfs -text
|
53 |
*.jpeg filter=lfs diff=lfs merge=lfs -text
|
54 |
*.webp filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
52 |
*.jpg filter=lfs diff=lfs merge=lfs -text
|
53 |
*.jpeg filter=lfs diff=lfs merge=lfs -text
|
54 |
*.webp filter=lfs diff=lfs merge=lfs -text
|
55 |
+
aicg/aicg.json filter=lfs diff=lfs merge=lfs -text
|
56 |
+
aicg/csvs/Log_20230415_122514.csv filter=lfs diff=lfs merge=lfs -text
|
57 |
+
aicg/csvs/Log_20230415_132110.csv filter=lfs diff=lfs merge=lfs -text
|
58 |
+
aicg/csvs/Log_20230415_134333.csv filter=lfs diff=lfs merge=lfs -text
|
59 |
+
aicg/csvs/Log_20230417_152213.csv filter=lfs diff=lfs merge=lfs -text
|
60 |
+
aicg/csvs/Log_20230417_165354.csv filter=lfs diff=lfs merge=lfs -text
|
61 |
+
aicg/csvs/Log_20230417_174922.csv filter=lfs diff=lfs merge=lfs -text
|
62 |
+
aicg/csvs/Log_20230417_183445.csv filter=lfs diff=lfs merge=lfs -text
|
63 |
+
aicg/csvs/Log_20230417_191610.csv filter=lfs diff=lfs merge=lfs -text
|
64 |
+
aicg/csvs/Log_20230417_195418.csv filter=lfs diff=lfs merge=lfs -text
|
65 |
+
aicg/csvs/Log_20230417_203437.csv filter=lfs diff=lfs merge=lfs -text
|
66 |
+
aicg/csvs/Log_20230417_210300.csv filter=lfs diff=lfs merge=lfs -text
|
67 |
+
aicg/csvs/merged.csv filter=lfs diff=lfs merge=lfs -text
|
68 |
+
aicg/instruct_output.json filter=lfs diff=lfs merge=lfs -text
|
aicg/aicg.json
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:57845dd4c2c4d7f284a59653a79694ca6e9336df00c96e67e05b5f70297c7ed2
|
3 |
+
size 563689805
|
aicg/clean.py
ADDED
@@ -0,0 +1,30 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Import the os and re modules
|
2 |
+
import os
|
3 |
+
import re
|
4 |
+
|
5 |
+
# Define the directory where the csv files are located
|
6 |
+
directory = "."
|
7 |
+
|
8 |
+
# Define a function to remove invalid characters from a csv file
|
9 |
+
def remove_invalid_chars(file):
|
10 |
+
# Open the file in read mode with UTF-8 encoding and ignore errors
|
11 |
+
with open(file, "r", encoding="utf-8", errors="ignore") as infile:
|
12 |
+
# Read the file content as a string
|
13 |
+
content = infile.read()
|
14 |
+
# Remove any null bytes from the string
|
15 |
+
content = content.replace("\x00", "")
|
16 |
+
# Remove any non-printable characters from the string
|
17 |
+
# content = re.sub(r"[^\x20-\x7E]", "", content)
|
18 |
+
# Open the file in write mode with UTF-8 encoding
|
19 |
+
with open(file, "w", encoding="utf-8") as outfile:
|
20 |
+
# Write the modified content to the file
|
21 |
+
outfile.write(content)
|
22 |
+
|
23 |
+
# Loop through each file in the directory
|
24 |
+
for file in os.listdir(directory):
|
25 |
+
# Check if the file is a csv file
|
26 |
+
if file.endswith(".csv"):
|
27 |
+
# Remove invalid characters from the csv file and print the result
|
28 |
+
print(f"Removing invalid characters from {file}...")
|
29 |
+
remove_invalid_chars(os.path.join(directory, file))
|
30 |
+
print(f"{file} is cleaned.")
|
aicg/convert-to-instruct.py
ADDED
@@ -0,0 +1,24 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import json
|
2 |
+
|
3 |
+
input_file = "aicg.json"
|
4 |
+
output_file = "instruct_output.json"
|
5 |
+
|
6 |
+
with open(input_file, "r", encoding="utf-8") as infile:
|
7 |
+
input_data = json.load(infile)
|
8 |
+
output_data = []
|
9 |
+
|
10 |
+
for item in input_data:
|
11 |
+
|
12 |
+
prompt_string = item["prompt string"]
|
13 |
+
response = item["response"]
|
14 |
+
|
15 |
+
output_item = {
|
16 |
+
"input": "",
|
17 |
+
"instruction": prompt_string,
|
18 |
+
"output": response
|
19 |
+
}
|
20 |
+
|
21 |
+
output_data.append(output_item)
|
22 |
+
|
23 |
+
with open(output_file, "w", encoding="utf-8") as outfile:
|
24 |
+
json.dump(output_data, outfile, indent=4)
|
aicg/csv-to-json.py
ADDED
@@ -0,0 +1,27 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Import the csv and json modules
|
2 |
+
import csv
|
3 |
+
import json
|
4 |
+
|
5 |
+
# Define the input and output file names
|
6 |
+
csv_file = "merged.csv"
|
7 |
+
json_file = "output.json"
|
8 |
+
|
9 |
+
# Open the csv file in read mode
|
10 |
+
with open(csv_file, "r", encoding="utf-8") as infile:
|
11 |
+
# Create a csv reader object
|
12 |
+
reader = csv.reader(infile)
|
13 |
+
# Get the header row
|
14 |
+
header = next(reader)
|
15 |
+
# Create an empty list to store the data
|
16 |
+
data = []
|
17 |
+
# Loop through the rest of the rows
|
18 |
+
for row in reader:
|
19 |
+
# Create a dictionary for each row using the header as keys
|
20 |
+
record = dict(zip(header, row))
|
21 |
+
# Append the dictionary to the data list
|
22 |
+
data.append(record)
|
23 |
+
|
24 |
+
# Open the json file in write mode
|
25 |
+
with open(json_file, "w", encoding="utf-8") as outfile:
|
26 |
+
# Dump the data list as json to the output file
|
27 |
+
json.dump(data, outfile, indent=4)
|
aicg/csvs/Log_20230415_122514.csv
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:375ede0bd2239dfda37b692fcecc359a5e7bcf1d66d2290dc0a702a958c54d3d
|
3 |
+
size 30458190
|
aicg/csvs/Log_20230415_131942.csv
ADDED
The diff for this file is too large to render.
See raw diff
|
|
aicg/csvs/Log_20230415_132110.csv
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:75b1d6379abde0f27a66bc0b70f37ef5a6f9c14cec80b854348f0d28c7fb7827
|
3 |
+
size 37042193
|
aicg/csvs/Log_20230415_134333.csv
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:d8649306e1ab2cf4915ee387a8587ffc74a5a26900954ad067473eb43de3c585
|
3 |
+
size 15184269
|
aicg/csvs/Log_20230417_152213.csv
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:c23e3c36d5df6eeadfea4ea84847357ebe2e7beafe43ab25029806e1fc829fd1
|
3 |
+
size 63633917
|
aicg/csvs/Log_20230417_165354.csv
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:fd9ab1ef0c80ce5550ebfac1de5f2b8cd90a88fc4d466b718032a415ed61ff51
|
3 |
+
size 59353852
|
aicg/csvs/Log_20230417_174922.csv
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:a77d6128705e9107328e139ca2d9c5aed20550d4c2d153936d891d0cce724550
|
3 |
+
size 60828096
|
aicg/csvs/Log_20230417_183445.csv
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:66f96c676c5dd68f368d6dd7bf9f4d85e5d27b54a7ca5d4053ca96a8101aec2d
|
3 |
+
size 57428666
|
aicg/csvs/Log_20230417_191610.csv
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:06d6e7f1aca98c87b00cc839d9366c0ec7695f3c4db9adf72765708d56a51a56
|
3 |
+
size 61793735
|
aicg/csvs/Log_20230417_195418.csv
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:2fa499f5a46c3a5c4b9e1287e9b21c6125e49e2c55fa7abe4a854d7c808b7298
|
3 |
+
size 63951564
|
aicg/csvs/Log_20230417_203437.csv
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:ab2888292b3ebd7e0fb398077882f48d4054da82209d63072eb280ba90e656f6
|
3 |
+
size 44427578
|
aicg/csvs/Log_20230417_210300.csv
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:1b674b368cf55587a7261e2b77d1ca8d5d0344ec2a7997d84ffaabb47e4e27ab
|
3 |
+
size 54080658
|
aicg/csvs/merged.csv
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:8558133a33cf42a5a3d137bf256a3102e3100a638c4f8648b4eec744fd974b87
|
3 |
+
size 549763229
|
aicg/dedup-clean.py
ADDED
@@ -0,0 +1,40 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Import the json and os modules
|
2 |
+
import json
|
3 |
+
import os
|
4 |
+
|
5 |
+
# Define the directory where the json files are located
|
6 |
+
directory = "./test"
|
7 |
+
|
8 |
+
# Define a function to deduplicate and clean a json file
|
9 |
+
def dedup_and_clean(file):
|
10 |
+
# Open the file in read mode
|
11 |
+
with open(file, "r", encoding="utf-8") as infile:
|
12 |
+
# Load the file as a json object
|
13 |
+
data = json.load(infile)
|
14 |
+
# Create an empty list to store the unique and non-empty items
|
15 |
+
new_data = []
|
16 |
+
# Create an empty set to store the seen items
|
17 |
+
seen = set()
|
18 |
+
# Loop through each item in the data
|
19 |
+
for item in data:
|
20 |
+
# Convert the item to a string for hashing
|
21 |
+
item_str = json.dumps(item, sort_keys=True)
|
22 |
+
# Check if the item is not empty and not seen before
|
23 |
+
if item_str != "{}" and item_str not in seen:
|
24 |
+
# Add the item to the new data list
|
25 |
+
new_data.append(item)
|
26 |
+
# Add the item string to the seen set
|
27 |
+
seen.add(item_str)
|
28 |
+
# Open the file in write mode
|
29 |
+
with open(file, "w", encoding="utf-8") as outfile:
|
30 |
+
# Dump the new data list as json to the file
|
31 |
+
json.dump(new_data, outfile, indent=4)
|
32 |
+
|
33 |
+
# Loop through each file in the directory
|
34 |
+
for file in os.listdir(directory):
|
35 |
+
# Check if the file is a json file
|
36 |
+
if file.endswith(".json"):
|
37 |
+
# Deduplicate and clean the json file and print the result
|
38 |
+
print(f"Deduplicating and cleaning {file}...")
|
39 |
+
dedup_and_clean(os.path.join(directory, file))
|
40 |
+
print(f"{file} is done.")
|
aicg/getgozzed.txt
ADDED
File without changes
|
aicg/instruct_output.json
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:491db4599e8a94f6afbb1716682d7fbd9eff4e85341686cd8b5483158fabdc5d
|
3 |
+
size 282729208
|
aicg/merge.py
ADDED
@@ -0,0 +1,23 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Import the os and pandas modules
|
2 |
+
import os
|
3 |
+
import pandas as pd
|
4 |
+
|
5 |
+
# Define the directory where the csv files are located
|
6 |
+
directory = "."
|
7 |
+
|
8 |
+
# Create an empty list to store the data frames
|
9 |
+
data_frames = []
|
10 |
+
|
11 |
+
# Loop through each file in the directory
|
12 |
+
for file in os.listdir(directory):
|
13 |
+
# Check if the file is a csv file
|
14 |
+
if file.endswith(".csv"):
|
15 |
+
# Read the csv file as a data frame and append it to the list
|
16 |
+
data_frame = pd.read_csv(os.path.join(directory, file))
|
17 |
+
data_frames.append(data_frame)
|
18 |
+
|
19 |
+
# Concatenate all the data frames into one
|
20 |
+
merged_data_frame = pd.concat(data_frames)
|
21 |
+
|
22 |
+
# Save the merged data frame as a new csv file
|
23 |
+
merged_data_frame.to_csv("merged.csv", index=False, encoding="utf-8")
|
aicg/validatecsv.py
ADDED
@@ -0,0 +1,39 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Import the csv and os modules
|
2 |
+
import csv
|
3 |
+
import os
|
4 |
+
|
5 |
+
# Define the directory where the csv files are located
|
6 |
+
directory = "."
|
7 |
+
|
8 |
+
# Define a function to validate a csv file
|
9 |
+
def validate_csv(file):
|
10 |
+
# Open the file in read mode
|
11 |
+
with open(file, "r", encoding="utf-8") as f:
|
12 |
+
# Create a csv reader object
|
13 |
+
reader = csv.reader(f)
|
14 |
+
# Get the header row
|
15 |
+
header = next(reader)
|
16 |
+
# Check if the header has the expected number of columns
|
17 |
+
if len(header) != 4:
|
18 |
+
# Return False if not
|
19 |
+
return False
|
20 |
+
# Loop through the rest of the rows
|
21 |
+
for row in reader:
|
22 |
+
# Check if each row has the same number of columns as the header
|
23 |
+
if len(row) != len(header):
|
24 |
+
# Return False if not
|
25 |
+
return False
|
26 |
+
# Return True if no errors are found
|
27 |
+
return True
|
28 |
+
|
29 |
+
# Loop through each file in the directory
|
30 |
+
for file in os.listdir(directory):
|
31 |
+
# Check if the file is a csv file
|
32 |
+
if file.endswith(".csv"):
|
33 |
+
# Validate the csv file and print the result
|
34 |
+
print(f"Validating {file}...")
|
35 |
+
result = validate_csv(os.path.join(directory, file))
|
36 |
+
if result:
|
37 |
+
print(f"{file} is valid.")
|
38 |
+
else:
|
39 |
+
print(f"{file} is invalid.")
|