Create code/make_pretty_dataset.py
Browse files- code/make_pretty_dataset.py +67 -0
code/make_pretty_dataset.py
ADDED
@@ -0,0 +1,67 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import json
|
3 |
+
from huggingface_hub import HfApi
|
4 |
+
import glob
|
5 |
+
from datetime import datetime
|
6 |
+
from datasets import Dataset
|
7 |
+
|
8 |
+
TOKEN = os.environ.get("HF_WRITE_TOKEN")
|
9 |
+
API = HfApi(token=TOKEN)
|
10 |
+
REPO_ID = "AIEnergyScore/results_debug" #"meg/calculate_carbon_runs"
|
11 |
+
UPLOAD_REPO_ID = 'meg/HUGS_energy'
|
12 |
+
|
13 |
+
output_directory = API.snapshot_download(repo_id=REPO_ID, repo_type='dataset')
|
14 |
+
print(output_directory)
|
15 |
+
#runs_dir = glob.glob(f"{output_directory}/*")
|
16 |
+
#print(runs_dir)
|
17 |
+
dataset_results = []
|
18 |
+
for task in ['text_generation']:
|
19 |
+
org_dirs = glob.glob(f"{output_directory}/{task}/*") #runs/{task}/*")
|
20 |
+
print(org_dirs)
|
21 |
+
for org_dir in org_dirs:
|
22 |
+
org = org_dir.split("/")[-1]
|
23 |
+
model_dirs = glob.glob(f"{org_dir}/*")
|
24 |
+
print(model_dirs)
|
25 |
+
for model_dir in model_dirs:
|
26 |
+
model = model_dir.split("/")[-1]
|
27 |
+
model_runs = glob.glob(f"{model_dir}/*")
|
28 |
+
dates = [dir.split("/")[-1] for dir in model_runs]
|
29 |
+
try:
|
30 |
+
# Sort dates as dates
|
31 |
+
sorted_dates = sorted(
|
32 |
+
[datetime.strptime(date, '%Y-%m-%d-%H-%M-%S') for date in
|
33 |
+
dates])
|
34 |
+
# Convert back to string format
|
35 |
+
sorted_dates_str = [date.strftime('%Y-%m-%d-%H-%M-%S') for date in
|
36 |
+
sorted_dates]
|
37 |
+
last_date = sorted_dates_str[-1]
|
38 |
+
most_recent_run = f"{model_dir}/{last_date}"
|
39 |
+
print(most_recent_run)
|
40 |
+
try:
|
41 |
+
benchmark_report = json.loads(open(f"{most_recent_run}/benchmark_report.json", "rb+").read())
|
42 |
+
print(benchmark_report)
|
43 |
+
prefill_data = benchmark_report['prefill']
|
44 |
+
prefill_energy = prefill_data['energy']
|
45 |
+
prefill_efficiency = prefill_data['efficiency']
|
46 |
+
decode_data = benchmark_report['decode']
|
47 |
+
decode_energy = decode_data['energy']
|
48 |
+
decode_efficiency = decode_data['efficiency']
|
49 |
+
preprocess_data = benchmark_report['preprocess']
|
50 |
+
preprocess_energy = preprocess_data['energy']
|
51 |
+
preprocess_efficiency = preprocess_data['efficiency']
|
52 |
+
dataset_results += [{'task':task, 'org':org, 'model':model, 'hardware':'a10g-large',
|
53 |
+
'date':last_date, 'prefill':{'energy':prefill_energy,
|
54 |
+
'efficency':prefill_efficiency},
|
55 |
+
'decode':{'energy':decode_energy, 'efficiency':decode_efficiency},
|
56 |
+
'preprocess': {'energy':preprocess_energy, 'efficiency': preprocess_efficiency}},]
|
57 |
+
|
58 |
+
except FileNotFoundError:
|
59 |
+
error_report = open(f"{most_recent_run}/error.log", "rb+").read()
|
60 |
+
print(error_report)
|
61 |
+
except ValueError:
|
62 |
+
# Not a directory with a timestamp.
|
63 |
+
continue
|
64 |
+
|
65 |
+
hub_dataset_results = Dataset.from_list(dataset_results)
|
66 |
+
print(hub_dataset_results)
|
67 |
+
hub_dataset_results.push_to_hub(UPLOAD_REPO_ID, token=TOKEN)
|