matching_series / __main__.py
bowdbeg's picture
add documents
fcc706c
import json
import logging
import time
from argparse import ArgumentParser
import evaluate
import numpy as np
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
parser = ArgumentParser(
description="Compute the matching series score between two time series freezed in a numpy array"
)
parser.add_argument("predictions", type=str, help="Path to the numpy array containing the predictions")
parser.add_argument("references", type=str, help="Path to the numpy array containing the references")
parser.add_argument("--output", type=str, help="Path to the output file")
parser.add_argument("--batch_size", type=int, help="Batch size to use for the computation")
parser.add_argument("--num_processes", type=int, help="Batch size to use for the computation", default=1)
parser.add_argument("--dtype", type=str, help="Data type to use for the computation", default="float32")
parser.add_argument("--debug", action="store_true", help="Debug mode")
args = parser.parse_args()
if not args.predictions or not args.references:
raise ValueError("You must provide the path to the predictions and references numpy arrays")
predictions = np.load(args.predictions).astype(args.dtype)
references = np.load(args.references).astype(args.dtype)
if args.debug:
predictions = predictions[:1000]
references = references[:1000]
logger.info(f"predictions shape: {predictions.shape}")
logger.info(f"references shape: {references.shape}")
import matching_series
s = time.time()
metric = matching_series.matching_series()
# metric = evaluate.load("matching_series.py")
results = metric.compute(
predictions=predictions,
references=references,
batch_size=args.batch_size,
num_processes=args.num_process,
return_each_features=True,
return_coverages=True,
dtype=args.dtype,
)
logger.info(f"Time taken: {time.time() - s}")
print(json.dumps(results))
if args.output:
with open(args.output, "w") as f:
json.dump(results, f)