maom commited on
Commit
2071122
1 Parent(s): cb51931

add curaction scripts

Browse files
src/00_setup_curation.sh ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+
3
+ # from a base directory
4
+
5
+
6
+ mkdir data
7
+ mkdir intermeidate
8
+ mkdir product
9
+
10
+ cd product
11
+ git clone https://huggingface.co/RosettaCommons/MIP
12
+ cd ..
13
+
14
+ # Run each numbered script in product/MIP/src/ in order (starting with this one)
15
+ #
16
+ # Tips:
17
+ # 1) Make sure to set the working directory to the base directory (outside of the HF repo)
18
+ # 2) While most of the scripts should work, I recommend running them interactively
19
+ # 3) Some stages require more memory than others, all can be done with < 400GB of memory
20
+ # but perhaps more more could reduce memory requirements
21
+ # 4)
22
+
23
+
24
+
src/01_gather_data.R ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+
3
+ # Download data from: https://zenodo.org/records/6611431
4
+ # Sequence-structure-function relationships in the microbial protein universe
5
+
6
+ # 45.4 GB
7
+ system("cd data && curl -o microbiome_immunity_project_dataset.zip https://zenodo.org/records/6611431/files/microbiome_immunity_project_dataset.zip?download=1")
8
+
9
+ md5sum_expected <- "b3e021609ffa052d2ab2333dc998964b data/microbiome_immunity_project_dataset.zip"
10
+ md5sum <- system(
11
+ "md5cksum data/microbiome_immunity_project_dataset.zip",
12
+ intern = TRUE)
13
+ if (md5sum != md5sum_expected) {
14
+ cat("Expected and obtained md5sum values don't match\n")
15
+ }
16
+
17
+ system("cd data && unzip microbiome_immunity_project_dataset.zip")
src/02.1_assemble_datasets.R ADDED
@@ -0,0 +1,250 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+
3
+
4
+ #' Assemble a Rosetta models dataset
5
+ #'
6
+ #' @param data_path character directory .pdb.gz files are located
7
+ #' @param output_path character output .parquet path
8
+ #'
9
+ #' Write output_path .parquet file with columns
10
+ #' <id> <pdb> [<scores>]
11
+ #' where [<scores>] are key-value entries following
12
+ #' the TER line in each .pdb.gz file
13
+ assemble_rosetta_models <- function(
14
+ data_path,
15
+ output_path) {
16
+
17
+ cat(
18
+ "data path: ", data_path, "\n",
19
+ "output path: ", output_path, "\n",
20
+ sep = "")
21
+
22
+ file_index <- 1
23
+ models <- list.files(
24
+ path = data_path,
25
+ full.names = TRUE,
26
+ pattern = "*.pdb.gz",
27
+ recursive = TRUE) |>
28
+ purrr::map_dfr(.f = function(path) {
29
+ file_handle <- path |>
30
+ file(open = "rb") |>
31
+ gzcon()
32
+
33
+ if( file_index %% 1000 == 0) {
34
+ cat("Reading '", path, "' ", file_index, "\n", sep = "")
35
+ }
36
+ file_index <<- file_index + 1
37
+
38
+ lines <- file_handle |> readLines()
39
+ file_handle |> close()
40
+
41
+ ter_line_index <- which(
42
+ lines |> stringr::str_detect("^TER"),
43
+ arr.ind = TRUE)
44
+
45
+ lines[(ter_line_index + 1) : (length(lines) - 1)] |>
46
+ paste0(collapse = "\n") |>
47
+ readr::read_delim(
48
+ delim = " ",
49
+ col_names = c("key", "value"),
50
+ show_col_types = FALSE) |>
51
+ dplyr::mutate(
52
+ id = path |>
53
+ basename() |>
54
+ stringr::str_replace_all(".pdb.gz", ""),
55
+ .before = 1) |>
56
+ dplyr::mutate(
57
+ pdb = lines[1:ter_line_index] |> paste0(collapse = "\n"))
58
+
59
+ })
60
+ models <- arrow::arrow_table(models)
61
+ models$pdb <- models$pdb$cast(arrow::string())
62
+ models |> arrow::write_parquet(output_path)
63
+ }
64
+
65
+
66
+ # call assemble_rosetta_models for the high_quality and low_quality datasets
67
+ dataset_tag <- "rosetta_high_quality_models"
68
+ assemble_rosetta_models(
69
+ data_path = paste0(
70
+ "data/microbiome_immunity_project_dataset/dataset/",
71
+ dataset_tag),
72
+ output_path = paste0("intermediate/", dataset_tag, ".parquet"))
73
+
74
+ dataset_tag <- "rosetta_low_quality_models"
75
+ assemble_rosetta_models(
76
+ data_path = paste0(
77
+ "data/microbiome_immunity_project_dataset/dataset/",
78
+ dataset_tag),
79
+ output_path = paste0("intermediate/", dataset_tag, ".parquet"))
80
+
81
+ #' Assemble a DMP-Fold models dataset
82
+ #'
83
+ #' @param data_path character directory .pdb.gz files are located
84
+ #' @param output_path character output .parquet path
85
+ #'
86
+ #' Write output_path .parquet file with columns
87
+ #' <id> <pdb>
88
+ #'
89
+ #' Note that dmpfold doesn't write out score lines like Rosetta
90
+ assemble_dmpfold_models <- function(
91
+ data_path,
92
+ output_path) {
93
+
94
+ cat(
95
+ "data path: ", data_path, "\n",
96
+ "output path: ", output_path, "\n",
97
+ sep = "")
98
+
99
+ file_index <- 1
100
+ models <- list.files(
101
+ path = data_path,
102
+ full.names = TRUE,
103
+ pattern = "*.pdb.gz",
104
+ recursive = TRUE) |>
105
+ purrr::map_dfr(.f = function(path) {
106
+ file_handle <- path |>
107
+ file(open = "rb") |>
108
+ gzcon()
109
+
110
+ if (file_index %% 1000 == 0) {
111
+ cat("Reading '", path, "' ", file_index, "\n", sep = "")
112
+ }
113
+ file_index <<- file_index + 1
114
+
115
+ lines <- file_handle |> readLines()
116
+ file_handle |> close()
117
+
118
+ ter_line_index <- which(
119
+ lines |> stringr::str_detect("^TER"),
120
+ arr.ind = TRUE)
121
+
122
+ data.frame(
123
+ id = path |> basename() |> stringr::str_replace_all(".pdb.gz", ""),
124
+ pdb = lines[1:ter_line_index] |> paste0(collapse = "\n"))
125
+ })
126
+ models |>
127
+ arrow::write_parquet(output_path)
128
+ }
129
+
130
+ # call assemble_rosetta_models for the high_quality and low_quality datasets
131
+ dataset_tag <- "dmpfold_high_quality_models"
132
+ assemble_dmpfold_models(
133
+ data_path = paste0(
134
+ "data/microbiome_immunity_project_dataset/dataset/",
135
+ dataset_tag),
136
+ output_path = paste0("intermediate/", dataset_tag, ".parquet"))
137
+
138
+ dataset_tag <- "dmpfold_low_quality_models"
139
+ assemble_dmpfold_models(
140
+ data_path = paste0(
141
+ "data/microbiome_immunity_project_dataset/dataset/",
142
+ dataset_tag),
143
+ output_path = paste0("intermediate/", dataset_tag, ".parquet"))
144
+
145
+
146
+ ####################################
147
+ ## ##
148
+ ## Assemble Function Predictions ##
149
+ ## ##
150
+ ####################################
151
+
152
+
153
+ #' Assemble DeepFRI Function Prediction dataset
154
+ #'
155
+ #' @param data_path character directory where *_pred_scores.json.gz files are located
156
+ #' @param output_path character output .parquet path
157
+ #'
158
+ #' Write output_path .parquet file with columns
159
+ #' <id> <term_id> <term_name> <Y_hat>
160
+ #'
161
+ #' <id>: Structure identifier like `MIP_00004873`
162
+ #' <term_ontology>: term onlogy, one of [BP, CC, EC, or MF]
163
+ #' <term_id>: GO or EC term identifiers like `GO:0009225`
164
+ #' <term_name> is the description of the term
165
+ assemble_DeepFRI_function_predictions <- function(
166
+ data_path,
167
+ output_path) {
168
+
169
+ cat(
170
+ "data path: ", data_path, "\n",
171
+ "output path: ", output_path, "\n",
172
+ sep = "")
173
+
174
+ file_index <- 1
175
+
176
+ scores <- c("BP", "CC", "EC", "MF") |>
177
+ purrr::map_dfr(.f = function(ontology) {
178
+ cat("Reading predictions cores for ontology ", ontology, "\n", sep = "")
179
+ list.files(
180
+ path = data_path,
181
+ full.names = TRUE,
182
+ pattern = paste0("*_", ontology, "_pred_scores.json.gz"),
183
+ recursive = TRUE) |>
184
+ purrr::map_dfr(.f = function(path) {
185
+ cat("Reading '", path, "' ", file_index, "\n", sep = "")
186
+ file_index <<- file_index + 1
187
+
188
+ data <- jsonlite::fromJSON(txt = path)
189
+
190
+ scores <- as.data.frame(data$Y_hat)
191
+ names(scores) <- data$goterms
192
+ scores <- scores |>
193
+ dplyr::mutate(
194
+ id = data$pdb_chains,
195
+ .before = 1) |>
196
+ tidyr::pivot_longer(
197
+ cols = -"id",
198
+ names_to = "term_id",
199
+ values_to = "Y_hat") |>
200
+ dplyr::left_join(
201
+ data.frame(
202
+ term_ontology = ontology,
203
+ term_id = data$goterms,
204
+ term_name = data$gonames),
205
+ by = "term_id") |>
206
+ dplyr::select(
207
+ id,
208
+ term_ontology,
209
+ term_id,
210
+ term_name,
211
+ Y_hat)
212
+ })
213
+ })
214
+
215
+ scores |>
216
+ arrow::write_parquet(output_path)
217
+
218
+ }
219
+
220
+ # call assemble_rosetta_models for all the datasets
221
+ dataset_tag <- "rosetta_high_quality_function_predictions"
222
+ assemble_DeepFRI_function_predictions(
223
+ data_path = paste0(
224
+ "data/microbiome_immunity_project_dataset/dataset/",
225
+ dataset_tag),
226
+ output_path = paste0("intermediate/", dataset_tag, ".parquet"))
227
+
228
+ dataset_tag <- "rosetta_low_quality_function_predictions"
229
+ assemble_DeepFRI_function_predictions(
230
+ data_path = paste0(
231
+ "data/microbiome_immunity_project_dataset/dataset/",
232
+ dataset_tag),
233
+ output_path = paste0("intermediate/", dataset_tag, ".parquet"))
234
+
235
+
236
+ dataset_tag <- "dmpfold_high_quality_function_predictions"
237
+ assemble_DeepFRI_function_predictions(
238
+ data_path = paste0(
239
+ "data/mxoicrobiome_immunity_project_dataset/dataset/",
240
+ dataset_tag),
241
+ output_path = paste0("intermediate/", dataset_tag, ".parquet"))
242
+
243
+ dataset_tag <- "dmpfold_low_quality_function_predictions"
244
+ assemble_DeepFRI_function_predictions(
245
+ data_path = paste0(
246
+ "data/microbiome_immunity_project_dataset/dataset/",
247
+ dataset_tag),
248
+ output_path = paste0("intermediate/", dataset_tag, ".parquet"))
249
+
250
+
src/02.2_check_assembled_datasets.R ADDED
@@ -0,0 +1,64 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+
3
+ # consistency between models and function predictions
4
+ source("product/MPI/src/summarize_map.R")
5
+
6
+
7
+ check_function_prediction_pivot <- function(dataset_tag, verbose = FALSE) {
8
+
9
+ if (verbose) {
10
+ "Checking function prediction pivot for dataset ", dataset_tag, "\n", sep = "")
11
+ }
12
+ dataset_long <- arrow::read_parquet(
13
+ paste0("intermediate/", dataset_tag, "_function_predictions.parquet"))
14
+
15
+ dataset_wide <- dataset_long |>
16
+ dplyr::select(-term_name) |>
17
+ tidyr::pivot_wider(
18
+ id_cols = id,
19
+ names_from = term_id,
20
+ values_from = Y_hat)
21
+
22
+ sum(is.na(dataset_wide))
23
+ }
24
+
25
+ check_function_prediction_pivot("rosetta_high_quality")
26
+ check_function_prediction_pivot("rosetta_low_quality")
27
+ check_function_prediction_pivot("dmpfold_high_quality")
28
+ check_function_prediction_pivot("dmpfold_low_quality")
29
+
30
+
31
+
32
+ check_id_consistency <- function(dataset_tag, verbose = FALSE) {
33
+ if (verbose) {
34
+ cat("Loading model ids...\n")
35
+ }
36
+ ids_model <- arrow::read_parquet(
37
+ paste0("intermediate/", dataset_tag, "_models.parquet"),
38
+ col_select = "id")
39
+
40
+ if (verbose) {
41
+ cat("Loading function prediction ids...\n")
42
+ }
43
+ ids_anno <- arrow::read_parquet(
44
+ paste0("intermediate/", dataset_tag, "_function_predictions.parquet"),
45
+ col_select = "id") |>
46
+ dplyr::distinct(id)
47
+
48
+ problems <- dplyr::full_join(
49
+ ids_model |>
50
+ dplyr::mutate(model_id = id),
51
+ ids_anno |>
52
+ dplyr::mutate(anno_id = id),
53
+ by = "id") |>
54
+ summarize_map(
55
+ x_cols = model_id,
56
+ y_cols = anno_id,
57
+ verbose = verbose)
58
+ problems
59
+ }
60
+
61
+ check_id_consistency("rosetta_high_quality")
62
+ check_id_consistency("rosetta_low_quality")
63
+ check_id_consistency("dmpfold_high_quality")
64
+ check_id_consistency("dmpfold_low_quality")
src/03.1_uplaod_data.py ADDED
@@ -0,0 +1,156 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+
3
+
4
+ # install huggingface_hub from the command line:
5
+ #
6
+ # pip install huggingface_hub
7
+ # pip install datasets
8
+ #
9
+ # Log into huggingface hub
10
+ #
11
+ # huggingface-cli login
12
+ #
13
+ # This will ask you for an access token
14
+
15
+
16
+ import datasets
17
+
18
+ ##### rosetta_high_quality_models #######
19
+ dataset = datasets.load_dataset(
20
+ "parquet",
21
+ name = "rosetta_high_quality_models",
22
+ data_dir = "./intermediate",
23
+ data_files = {"train" : "rosetta_high_quality_models.parquet"},
24
+ cache_dir = "/scratch/maom_root/maom0/maom",
25
+ split = "train",
26
+ keep_in_memory = True)
27
+
28
+ dataset.push_to_hub(
29
+ repo_id = "RosettaCommons/MIP",
30
+ config_name = "rosetta_high_quality_models",
31
+ data_dir = "rosetta_high_quality_models/data")
32
+
33
+
34
+
35
+ ##### rosetta_low_quality_models #######
36
+ dataset = datasets.load_dataset(
37
+ "parquet",
38
+ name = "rosetta_low_quality_models",
39
+ data_dir = "./intermediate",
40
+ data_files = {"train" : "rosetta_low_quality_models.parquet"},
41
+ cache_dir = "/scratch/maom_root/maom0/maom",
42
+ split = "train",
43
+ keep_in_memory = True)
44
+
45
+ dataset.push_to_hub(
46
+ repo_id = "RosettaCommons/MIP",
47
+ config_name = "rosetta_low_quality_models",
48
+ data_dir = "rosetta_low_quality_models/data")
49
+
50
+
51
+ ##### dmpfold_high_quality_models #######
52
+ dataset = datasets.load_dataset(
53
+ "parquet",
54
+ name = "dmpfold_high_quality_models",
55
+ data_dir = "./intermediate",
56
+ data_files = {"train" : "dmpfold_high_quality_models.parquet"},
57
+ cache_dir = "/scratch/maom_root/maom0/maom",
58
+ split = "train",
59
+ keep_in_memory = True)
60
+
61
+ dataset.push_to_hub(
62
+ repo_id = "RosettaCommons/MIP",
63
+ config_name = "dmpfold_high_quality_models",
64
+ data_dir = "dmpfold_high_quality_models/data")
65
+
66
+
67
+
68
+ ##### dmpfold_low_quality_models #######
69
+ dataset = datasets.load_dataset(
70
+ "parquet",
71
+ name = "dmpfold_low_quality_models",
72
+ data_dir = "./intermediate",
73
+ data_files = {"train" : "dmpfold_low_quality_models.parquet"},
74
+ cache_dir = "/scratch/maom_root/maom0/maom",
75
+ split = "train",
76
+ keep_in_memory = True)
77
+
78
+ dataset.push_to_hub(
79
+ repo_id = "RosettaCommons/MIP",
80
+ config_name = "dmpfold_low_quality_models",
81
+ data_dir = "dmpfold_low_quality_models/data")
82
+
83
+ ##########################
84
+ ## Function Predictions ##
85
+ ##########################
86
+
87
+ #### rosetta_high_quality_function_predictions
88
+ dataset = datasets.load_dataset(
89
+ "parquet",
90
+ name = "rosetta_high_quality_function_predictions",
91
+ data_dir = "./intermediate",
92
+ data_files = {"train" : "rosetta_high_quality_function_predictions.parquet"},
93
+ cache_dir = "/scratch/maom_root/maom0/maom",
94
+ split = "train",
95
+ keep_in_memory = True)
96
+
97
+ dataset.push_to_hub(
98
+ repo_id = "RosettaCommons/MIP",
99
+ config_name = "rosetta_high_quality_function_predictions",
100
+ data_dir = "rosetta_high_quality_function_predictions/data")
101
+
102
+ #### rosetta_low_quality_function_predictions
103
+ dataset = datasets.load_dataset(
104
+ "parquet",
105
+ name = "rosetta_low_quality_function_predictions",
106
+ data_dir = "./intermediate",
107
+ data_files = {"train" : "rosetta_low_quality_function_predictions.parquet"},
108
+ cache_dir = "/scratch/maom_root/maom0/maom",
109
+ split = "train",
110
+ keep_in_memory = True)
111
+
112
+ dataset.push_to_hub(
113
+ repo_id = "RosettaCommons/MIP",
114
+ config_name = "rosetta_low_quality_function_predictions",
115
+ data_dir = "rosetta_low_quality_function_predictions/data")
116
+
117
+
118
+
119
+
120
+ #### dmpfold_high_quality_function_predictions
121
+ dataset = datasets.load_dataset(
122
+ "parquet",
123
+ name = "dmpfold_high_quality_function_predictions",
124
+ data_dir = "./intermediate",
125
+ data_files = {"train" : "dmpfold_high_quality_function_predictions.parquet"},
126
+ cache_dir = "/scratch/maom_root/maom0/maom",
127
+ split = "train",
128
+ keep_in_memory = True)
129
+
130
+ dataset.push_to_hub(
131
+ repo_id = "RosettaCommons/MIP",
132
+ config_name = "dmpfold_high_quality_function_predictions",
133
+ data_dir = "dmpfold_high_quality_function_predictions/data")
134
+
135
+ #### dmpfold_low_quality_function_predictions
136
+ dataset = datasets.load_dataset(
137
+ "parquet",
138
+ name = "dmpfold_low_quality_function_predictions",
139
+ data_dir = "./intermediate",
140
+ data_files = {"train" : "dmpfold_low_quality_function_predictions.parquet"},
141
+ cache_dir = "/scratch/maom_root/maom0/maom",
142
+ split = "train",
143
+ keep_in_memory = True)
144
+
145
+ dataset.push_to_hub(
146
+ repo_id = "RosettaCommons/MIP",
147
+ config_name = "dmpfold_low_quality_function_predictions",
148
+ data_dir = "dmpfold_low_quality_function_predictions/data")
149
+
150
+
151
+
152
+
153
+
154
+
155
+
156
+
src/03.2_check_uploaded_data.py ADDED
@@ -0,0 +1,40 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+
3
+ import datasets
4
+ import pyarrow
5
+
6
+ def test_local_hf_match(dataset_tag):
7
+ print(f"For dataset : '{dataset_tag}' testing if local and remote ids match ...")
8
+ ids_hf = datasets.load_dataset(
9
+ path = "RosettaCommons/MIP",
10
+ name = dataset_tag,
11
+ data_dir = dataset_tag,
12
+ cache_dir = "/scratch/maom_root/maom0/maom",
13
+ keep_in_memory = True).data['train'].select(['id']).to_pandas()
14
+ ids_local = pyarrow.parquet.read_table(
15
+ source = f"intermediate/{dataset_tag}.parquet",
16
+ columns = ["id"]).to_pandas()
17
+ assert ids_local.equals(ids_hf)
18
+
19
+
20
+ test_local_hf_match("rosetta_high_quality_models")
21
+ test_local_hf_match("rosetta_low_quality_models")
22
+ test_local_hf_match("dmpfold_high_quality_models")
23
+ test_local_hf_match("dmpfold_low_quality_models")
24
+
25
+ test_local_hf_match("rosetta_high_quality_function_predictions")
26
+ test_local_hf_match("rosetta_low_quality_function_predictions")
27
+ test_local_hf_match("dmpfold_high_quality_function_predictions")
28
+ test_local_hf_match("dmpfold_low_quality_function_predictions")
29
+
30
+
31
+
32
+ import pandas
33
+ dataset_long = pyarrow.parquet.read_table(
34
+ "intermediate/dmpfold_low_quality_function_predictions.parquet").to_pandas()
35
+
36
+ dataset_wide = pandas.pivot(
37
+ dataset_long[["id", "term_id", "Y_hat"]],
38
+ columns = "term_id",
39
+ index = "id",
40
+ values = "Y_hat")
src/summarize_map.R ADDED
@@ -0,0 +1,346 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ #' Diagnostics for messy joins
3
+ #'
4
+ #' given a data frame with two ways to group rows,
5
+ #' summarize and give examples of situations where the mapping is not 1-1
6
+ #'
7
+ #' @param x_cols tidyselect specification of a set of columns defining objects
8
+ #' @param y_cols tidyselect specification of a set of columns defining objects
9
+ #'
10
+ #' data <- data.frame(
11
+ #' x=c(1,2,NA,3,4,5,6,6,6,7,7),
12
+ #' y=c("a",NA,"c","d","d","d","e","f","g","h","h"))
13
+ #'
14
+ #' data |> summarize_map(
15
+ #' x_cols = x),
16
+ #' y_cols = y))
17
+ #' X<-[x]:
18
+ #' |X|: 7 # number of groups
19
+ #' |is.na.X|: 1 # number of groups with NA in atleaset 1 col
20
+ #' range(|x|:X): 1, 3 # size range of groups
21
+ #' Y<-[y]:
22
+ #' |Y|: 7
23
+ #' |is.na.Y|: 1
24
+ #' range(|y|:Y): 1, 3
25
+ #' [X U Y]: # grouping by the union of xcols and ycols
26
+ #' |X U Y|: 8
27
+ #' |is.na.XUY|: 2
28
+ #' range(|z|:X U Y): 1, 2
29
+ #' [X @ Y]:
30
+ #' |X ~ Y|: 5
31
+ #' |X:X < Y|, |Y:Y < X|: 1, 1
32
+ #' |X:X > Y|, |Y:Y < X|: 3, 3
33
+ #' $is.na.X
34
+ #' x y
35
+ #' 1 NA c
36
+ #'
37
+ #' $is.na.Y
38
+ #' x y
39
+ #' 1 2 <NA>
40
+ #'
41
+ #' $dup.XUY
42
+ #' x y
43
+ #' 1 7 h
44
+ #' 2 7 h
45
+ #'
46
+ #' $dup.X
47
+ #' x y
48
+ #' 1 6 e
49
+ #' 2 6 f
50
+ #' 3 6 g
51
+ #'
52
+ #' $dup.Y
53
+ #' y x
54
+ #' 1 d 3
55
+ #' 2 d 4
56
+ #' 3 d 5
57
+ #' @export
58
+ summarize_map <- function(
59
+ data,
60
+ x_cols,
61
+ y_cols,
62
+ n_examples = 4,
63
+ verbose = FALSE) {
64
+
65
+ # convert column selections named vectors of column indices into data
66
+ x_cols <- tidyselect::eval_select(rlang::enquo(x_cols), data)
67
+ y_cols <- tidyselect::eval_select(rlang::enquo(y_cols), data)
68
+ xUy_cols <- union(x_cols, y_cols)
69
+ names(xUy_cols) <- names(data[xUy_cols])
70
+
71
+ if(verbose) {
72
+ cat("The following is a report of the relationship between two different ways of identifying instances\n")
73
+ }
74
+
75
+ # example rows
76
+ problems <- list()
77
+
78
+ count_xUy <- data |>
79
+ dplyr::count(dplyr::across(tidyselect::all_of(xUy_cols))) |>
80
+ dplyr::ungroup()
81
+ count_x <- count_xUy |>
82
+ dplyr::count(dplyr::across(tidyselect::all_of(names(x_cols))), name = "size") |>
83
+ dplyr::ungroup()
84
+ count_y <- count_xUy |>
85
+ dplyr::count(dplyr::across(tidyselect::all_of(names(y_cols))), name = "size") |>
86
+ dplyr::ungroup()
87
+
88
+ if (verbose) {
89
+ cat("\nProperties of X identifiers:\n")
90
+ }
91
+ cat("X<-[", paste(names(x_cols), collapse = ", "), "]:\n", sep = "")
92
+ cat(" |X|: ", count_x |> stats::na.omit(method = "r") |> nrow(), sep = "")
93
+
94
+ na_count <- data |>
95
+ dplyr::select(tidyselect::all_of(x_cols)) |>
96
+ stats::complete.cases() |>
97
+ magrittr::not() |>
98
+ sum()
99
+ cat(
100
+ ifelse(
101
+ na_count == 0,
102
+ "",
103
+ paste0(" (", na_count, " NA)")),
104
+ "\n", sep = "")
105
+
106
+ size_dist <- count_x |>
107
+ stats::na.omit(method = "r") |>
108
+ dplyr::count(size) |>
109
+ dplyr::ungroup()
110
+ if (nrow(size_dist) < 12) {
111
+ cat(" count*size: ",
112
+ paste(size_dist$n, size_dist$size, sep = "*", collapse = ", "),
113
+ "\n", sep = "")
114
+ } else {
115
+ top <- 1:6
116
+ bottom <- (nrow(size_dist) - 6+1):nrow(size_dist)
117
+ cat(" count*size: ",
118
+ paste(
119
+ size_dist$n[top],
120
+ size_dist$size[top], sep = "*", collapse = ", "),
121
+ ", ... ",
122
+ paste(
123
+ size_dist$n[bottom],
124
+ size_dist$size[bottom], sep = "*", collapse = ", "),
125
+ "\n", sep="")
126
+ }
127
+
128
+ if (verbose) {
129
+ cat("\nProperties of the Y identifiers:\n")
130
+ }
131
+ cat("Y<-[", paste(names(y_cols), collapse = ", "), "]:\n", sep = "")
132
+ cat(" |Y|: ", count_y |> stats::na.omit(method = "r") |> nrow(), sep = "")
133
+ na_count <- data |>
134
+ dplyr:::select(tidyselect::all_of(y_cols)) |>
135
+ stats::complete.cases() |>
136
+ magrittr::not() |>
137
+ sum()
138
+ cat(ifelse(na_count == 0, "", paste0(" (", na_count, " NA)")), "\n", sep = "")
139
+
140
+ size_dist <- count_y |>
141
+ stats::na.omit(method = "r") |>
142
+ dplyr::count(size) |>
143
+ dplyr::ungroup()
144
+ if (nrow(size_dist) < 12) {
145
+ cat(" count*size: ",
146
+ paste(size_dist$n, size_dist$size, sep = "*", collapse = ", "),
147
+ "\n", sep = "")
148
+ } else {
149
+ top <- 1:6
150
+ bottom <- (nrow(size_dist) - 6+1):nrow(size_dist)
151
+ cat(" count*size: ",
152
+ paste(
153
+ size_dist$n[top],
154
+ size_dist$size[top], sep = "*", collapse = ", "),
155
+ ", ... ",
156
+ paste(
157
+ size_dist$n[bottom],
158
+ size_dist$size[bottom], sep = "*", collapse = ", "),
159
+ "\n", sep="")
160
+ }
161
+
162
+ if (verbose) {
163
+ cat("\nProperties of the intersection of union of the X and Y identifiers:\n")
164
+ }
165
+ cat("[X U Y]:\n")
166
+ cat(" |X U Y|: ", count_xUy |> stats::na.omit(method = "r") |> nrow(), sep = "")
167
+ na_count <- data |>
168
+ dplyr:::select(!!!xUy_cols) |>
169
+ stats::complete.cases() |>
170
+ magrittr::not() |>
171
+ sum()
172
+ cat(ifelse(na_count == 0, "", paste0(" (", na_count, " NA)")), "\n", sep = "")
173
+
174
+ size_dist <- count_xUy |>
175
+ stats::na.omit(method = "r") |>
176
+ dplyr::rename(size = n) |>
177
+ dplyr::count(size) |>
178
+ dplyr::ungroup()
179
+ if (nrow(size_dist) < 12) {
180
+ cat(" count*size: ",
181
+ paste(size_dist$n, size_dist$size, sep = "*", collapse = ", "),
182
+ "\n", sep="")
183
+ } else {
184
+ top <- 1:6
185
+ bottom <- (nrow(size_dist) - 6+1):nrow(size_dist)
186
+ cat(" count*size: ",
187
+ paste(
188
+ size_dist$n[top],
189
+ size_dist$size[top], sep = "*", collapse = ", "),
190
+ ", ... ",
191
+ paste(
192
+ size_dist$n[bottom],
193
+ size_dist$size[bottom], sep = "*", collapse = ", "),
194
+ "\n", sep = "")
195
+ }
196
+
197
+
198
+ count_xUy <- count_xUy |> stats::na.omit(method = "r")
199
+
200
+ if (verbose) {
201
+ cat("Properties of the intersection of the X and Y identifiers:\n")
202
+ }
203
+ cat("[X @ Y]:\n")
204
+ if (verbose) {
205
+ cat(" Number of X and Y identifiers that are 1 to 1:\n")
206
+ }
207
+ cat(" |X ~ Y|: ",
208
+ count_xUy |>
209
+ dplyr::semi_join(
210
+ count_x |> dplyr::filter(size == 1),
211
+ by = names(x_cols)) |>
212
+ dplyr::semi_join(
213
+ count_y |> dplyr::filter(size == 1),
214
+ by = names(y_cols)) |>
215
+ nrow(),
216
+ "\n", sep = "")
217
+
218
+ if (verbose) {
219
+ cat(" Number of X and Y identifiers where an X identifier maps to multiple Y identifiers:\n")
220
+ }
221
+ cat(
222
+ " |X:X < Y|, |Y:Y < X|: ",
223
+ count_xUy |>
224
+ dplyr::semi_join(
225
+ count_x |> dplyr::filter(size > 1),
226
+ by = names(x_cols)) |>
227
+ nrow(),
228
+ ", ",
229
+ count_xUy |>
230
+ dplyr::count(
231
+ dplyr::across(tidyselect::all_of(names(x_cols))),
232
+ name = "size") |>
233
+ dplyr::filter(size > 1) |>
234
+ nrow(),
235
+ "\n", sep = "")
236
+
237
+ if (verbose) {
238
+ cat(
239
+ " Number of X and Y identifiers where a Y identifier maps to ",
240
+ "multiple X identifiers:\n")
241
+ }
242
+ cat(
243
+ " |X:X > Y|, |Y:Y > X|: ",
244
+ count_xUy |>
245
+ dplyr::semi_join(
246
+ count_y |>
247
+ dplyr::filter(size > 1),
248
+ by = names(y_cols)) |>
249
+ nrow(),
250
+ ", ",
251
+ count_xUy |>
252
+ dplyr::count(
253
+ dplyr::across(tidyselect::all_of(names(y_cols))),
254
+ name = "size") |>
255
+ dplyr::filter(size > 1) |>
256
+ nrow(),
257
+ "\n", sep = "")
258
+
259
+ #is.na.X
260
+ ex_rows <- data |>
261
+ dplyr:::select(tidyselect::all_of(x_cols)) |>
262
+ stats::complete.cases() |>
263
+ magrittr::not() |>
264
+ which()
265
+ if (length(ex_rows)) {
266
+ if (!is.null(n_examples) && (n_examples < length(ex_rows))) {
267
+ ex_rows <- ex_rows |> sample(n_examples, replace = FALSE)
268
+ }
269
+ problems$is.na.X <- data |>
270
+ dplyr::slice(ex_rows) |>
271
+ dplyr::arrange(dplyr::across(tidyselect::all_of(names(x_cols))))
272
+ }
273
+
274
+ #is.na.Y
275
+ ex_rows <- data |>
276
+ dplyr:::select(tidyselect::all_of(y_cols)) |>
277
+ stats::complete.cases() |>
278
+ magrittr::not() |>
279
+ which()
280
+ if (length(ex_rows)) {
281
+ if (!is.null(n_examples) && (n_examples < length(ex_rows))) {
282
+ ex_rows <- ex_rows |> sample(n_examples, replace = FALSE)
283
+ }
284
+ problems$is.na.Y <- data |>
285
+ dplyr::slice(ex_rows) |>
286
+ dplyr::arrange(dplyr::across(tidyselect::all_of(names(y_cols))))
287
+ }
288
+
289
+ #dup.X
290
+ dup.X <- count_xUy |>
291
+ dplyr::filter(n == 1) |>
292
+ dplyr::count(
293
+ dplyr::across(tidyselect::all_of(names(x_cols))),
294
+ name = "size") |>
295
+ dplyr::filter(size > 1) |>
296
+ dplyr::ungroup() |>
297
+ dplyr:::select(-size)
298
+ if (nrow(dup.X) > 1) {
299
+ if (!is.null(n_examples) && (n_examples < nrow(dup.X))) {
300
+ dup.X <- dup.X |> dplyr::sample_n(n_examples, replace = FALSE)
301
+ }
302
+ problems$dup.X <- dup.X |>
303
+ dplyr::left_join(data, by = names(x_cols)) |>
304
+ dplyr::arrange(dplyr::across(tidyselect::all_of(names(x_cols))))
305
+ }
306
+
307
+ #dup.Y
308
+ dup.Y <- count_xUy |>
309
+ dplyr::filter(n == 1) |>
310
+ dplyr::count(
311
+ dplyr::across(tidyselect::all_of(names(y_cols))),
312
+ name = "size") |>
313
+ dplyr::filter(size > 1) |>
314
+ dplyr::ungroup() |>
315
+ dplyr:::select(-size)
316
+ if (nrow(dup.Y) > 1) {
317
+ if (!is.null(n_examples) && (n_examples < nrow(dup.Y))) {
318
+ dup.Y <- dup.Y |> dplyr::sample_n(n_examples, replace = FALSE)
319
+ }
320
+ problems$dup.Y <- dup.Y |>
321
+ dplyr::left_join(data, by = names(ycols)) |>
322
+ dplyr::arrange(dplyr::across(tidyselect::all_of(names(y_cols))))
323
+ }
324
+
325
+ #dup.XUY
326
+ dup.XUY <- count_xUy |>
327
+ dplyr::filter(n > 1) |>
328
+ dplyr:::select(-n)
329
+ if (nrow(dup.XUY) > 1) {
330
+ if (!is.null(n_examples) && (n_examples < nrow(dup.XUY))) {
331
+ dup.XUY <- dup.XUY |> dplyr::sample_n(n_examples, replace = FALSE)
332
+ }
333
+ problems$dup.XUY <- dup.XUY |>
334
+ dplyr::left_join(data, by = names(xUy_cols)) |>
335
+ dplyr::arrange(dplyr::across(tidyselect::all_of(names(xUy_cols))))
336
+ }
337
+ if (verbose) {
338
+ cat("Returned instances where:\n")
339
+ cat("\tis.na.X: The X identifier is NA\n")
340
+ cat("\tis.na.Y: The Y identifier is NA\n")
341
+ cat("\tdup.X: The X identifier is not unique\n")
342
+ cat("\tdup.Y: The Y identifier is not unique\n")
343
+ cat("\tdup.XUY: The X and Y identifiers together are not unique\n")
344
+ }
345
+ problems
346
+ }