Spaces:
Running
Running
File size: 7,227 Bytes
cdd672b |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 |
# %%
# Set logging level to DEBUG
import logging
import os
import dotenv
import numpy as np
import pandas as pd
import pyalex
import rdflib
from ampligraph.datasets import (
GraphDataLoader,
SQLiteAdapter,
DataSourceIdentifier,
)
from ampligraph.datasets.graph_partitioner import NaiveGraphPartitioner, BucketGraphPartitioner
from ampligraph.evaluation import train_test_split_no_unseen
from ampligraph.latent_features import ScoringBasedEmbeddingModel
from pyalex import Authors, Concepts, Funders, Institutions, Publishers, Sources, Works
from sklearn.model_selection import train_test_split
import tensorflow as tf
from ampligraph.evaluation import hits_at_n_score, mrr_score
from ampligraph.latent_features import ScoringBasedEmbeddingModel
from ampligraph.latent_features.loss_functions import get as get_loss
from ampligraph.latent_features.regularizers import get as get_regularizer
logging.basicConfig(level=logging.DEBUG)
loggers = [logging.getLogger(name) for name in logging.root.manager.loggerDict]
for logger in loggers:
logger.setLevel(logging.INFO)
# Load the triples from the file
X_train = np.load("train.npy")
X_valid = np.load("valid.npy")
X_test = np.load("test.npy")
## Store as CSVs. There are commas in the names of some institutions, so we need to use a tab as the delimiter
#np.savetxt("train.csv", X_train, delimiter="\t", fmt="%s")
#np.savetxt("valid.csv", X_valid, delimiter="\t", fmt="%s")
#np.savetxt("test.csv", X_test, delimiter="\t", fmt="%s")
#
#print(f"Train size: {X_train.shape[0]}")
#print(f"Valid size: {X_valid.shape[0]}")
#print(f"Test size: {X_test.shape[0]}")
# Initialize a ComplEx neural embedding model: the embedding size is k,
# eta specifies the number of corruptions to generate per each positive,
# scoring_type determines the scoring function of the embedding model.
partitioned_model = ScoringBasedEmbeddingModel(k=150, eta=10, scoring_type="ComplEx")
# Optimizer, loss and regularizer definition
optim = tf.keras.optimizers.Adam(learning_rate=1e-3)
loss = get_loss("pairwise", {"margin": 0.5})
regularizer = get_regularizer("LP", {"p": 2, "lambda": 1e-5})
# Compilation of the model
partitioned_model.compile(
optimizer=optim, loss=loss, entity_relation_regularizer=regularizer
)
# For evaluation, we can use a filter which would be used to filter out
# positives statements created by the corruption procedure.
# Here we define the filter set by concatenating all the positives
filter = {"test": np.concatenate((X_train, X_valid, X_test))}
# Early Stopping callback
checkpoint = tf.keras.callbacks.EarlyStopping(
monitor="val_{}".format("hits10"),
min_delta=0,
patience=5,
verbose=1,
mode="max",
restore_best_weights=True,
)
######
use_db = False
if use_db:
AMPLIGRAPH_DATA_HOME = os.path.join(os.getcwd(), "data") # + os.sep
from ampligraph.datasets.data_indexer import SQLite as SQLiteIndexer, DataIndexer
# Initialize GraphDataLoader from .csv file
sqlite_indexer = SQLiteIndexer(
data=None,
db_file="main_partition.db",
root_directory=AMPLIGRAPH_DATA_HOME,
name="main_partition",
)
indexer = DataIndexer(
X=None,
backend_type='sqlite',
backend=sqlite_indexer,
)
dataset_loader = GraphDataLoader(
"train.csv",
backend=SQLiteAdapter,
in_memory=False,
verbose=True,
root_directory=AMPLIGRAPH_DATA_HOME,
db_name="mydb.db",
use_indexer=indexer,
)
# adapter = SQLiteAdapter(
# "database_25-12-2023_07-28-41_485047_PM_2a11fc49-2337-415e-8672-2bfa48a83745.db",
# identifier=DataSourceIdentifier,
# root_directory=AMPLIGRAPH_DATA_HOME,
# )
print("Graph data loader initialized")
# for elem in next(dataset_loader._get_batch_generator()):
# print(elem)
# break
######
else:
X_train = np.load("train.npy")
dataset_loader = GraphDataLoader(
X_train,
verbose=True,
use_indexer=True,
in_memory=True,
)
print(f'next: {next(dataset_loader)}')
print(f'next: {next(dataset_loader)}')
print(f'next: {next(dataset_loader)}')
#x = np.loadtxt(
# "train.csv",
# delimiter="\t",
# dtype=str,
#)
#print(x[0])
# Choose the partitioner - in this case we choose RandomEdges partitioner
partition = False
if partition:
print("Will start partitioning now")
graph_partitioner_train = NaiveGraphPartitioner(dataset_loader, k=6)
print("Graph partitioner initialized")
#indexer = (
# partitioned_model.data_handler.get_mapper()
#) # get the mapper from the trained model
# dataset_loader_test = GraphDataLoader(
# data_source=X_test,
# backend=SQLiteAdapter, # type of backend to use
# batch_size=400, # batch size to use while iterating over this dataset
# dataset_type="test", # dataset type
# use_indexer=indexer, # mapper to map test concepts to the same indices used during training
# verbose=True,
# )
# graph_partitioner_test = BucketGraphPartitioner(data=partitioner, k=3)
print("Will start training now")
# Fit the model on training and validation set
partitioned_model.fit(
#graph_partitioner_train if partition else dataset_loader,
X_train,
batch_size=500,
epochs=45, # Number of training epochs
validation_freq=20, # Epochs between successive validation
validation_burn_in=100, # Epoch to start validation
validation_data=X_test, # Validation data
validation_filter=filter, # Filter positives from validation corruptions
callbacks=[
checkpoint
], # Early stopping callback (more from tf.keras.callbacks are supported)
verbose=True, # Enable stdout messages
#partitioning_k=7, # Number of partitions to create
)
# %%
# Store the model
super(ScoringBasedEmbeddingModel, partitioned_model).save_weights("model/")
partitioned_model.save_metadata(filedir="model")
# from ampligraph.utils import save_model
# save_model(partitioned_model, model_name_path='model.pkl')
# %%
# Create a dataframe of the institutions and their names
import pandas as pd
import rdflib
g = rdflib.Graph()
uri = "urn:acmcmc:unis:"
unis = rdflib.Namespace(uri)
g.bind("unis", unis)
g.parse("universities_large_1200.ttl", format="turtle")
query_results = g.query(
"""
SELECT DISTINCT ?institution ?name
WHERE {
?institution a unis:Institution .
?institution unis:name ?name .
}
"""
)
institutions = pd.DataFrame(query_results, columns=["institution", "name"])
institutions["institution"] = institutions["institution"].apply(lambda x: str(x))
institutions["name"] = institutions["name"].apply(lambda x: str(x))
# Store the dataframe
institutions.to_csv("institutions.csv", index=False)
# %%
# Run the evaluation procedure on the test set (with filtering)
# To disable filtering: use_filter=None
# Usually, we corrupt subject and object sides separately and compute ranks
ranks = partitioned_model.evaluate(X_test, use_filter=filter, corrupt_side="s,o")
# compute and print metrics:
mrr = mrr_score(ranks)
hits_10 = hits_at_n_score(ranks, n=10)
print("MRR: %f, Hits@10: %f" % (mrr, hits_10))
|