repo
stringlengths 3
91
| file
stringlengths 16
152
| code
stringlengths 0
3.77M
| file_length
int64 0
3.77M
| avg_line_length
float64 0
16k
| max_line_length
int64 0
273k
| extension_type
stringclasses 1
value |
---|---|---|---|---|---|---|
tiatoolbox | tiatoolbox-master/tiatoolbox/tools/pyramid.py | """Tile pyramid generation in standard formats.
Included methods are DeepZoom and Zoomify in addition to a generic
method.
These are generally intended for serialisation or streaming via a web
UI. The `get_tile` method returns a Pillow Image object which can be
easily serialised via the use of an io.BytesIO object or saved directly
to disk.
"""
import tarfile
import time
import zipfile
from io import BytesIO
from pathlib import Path
from typing import Iterable, Tuple, Union
import defusedxml
import numpy as np
from PIL import Image
from tiatoolbox import DuplicateFilter, logger
from tiatoolbox.annotation.storage import AnnotationStore
from tiatoolbox.utils.transforms import imresize, locsize2bounds
from tiatoolbox.utils.visualization import AnnotationRenderer, random_colors
from tiatoolbox.wsicore.wsireader import WSIMeta, WSIReader
defusedxml.defuse_stdlib()
class TilePyramidGenerator:
r"""Generic tile pyramid generator with sensible defaults.
Args:
wsi (WSIReader):
The WSI reader object. Must implement
`tiatoolbox.wsicore.wsi_Reader.WSIReader.read_rect`.
tile_size (int):
The size of tiles to generate. Default is 256. Note that the
output tile size will be :math:`\text{tile size} + 2
\times\text{overlap}`.
downsample (int):
The downsample factor between levels. Default is 2.
overlap (int):
The number of extra pixel to add to each edge of the tile.
Default is 0.
"""
def __init__(
self,
wsi: WSIReader,
tile_size: int = 256,
downsample: int = 2,
overlap: int = 0,
) -> None:
self.wsi = wsi
self.tile_size = tile_size
self.overlap = overlap
self.downsample = downsample
@property
def output_tile_size(self) -> int:
r"""The size of the tile which will be returned.
This is equivalent to :math:`\text{tile size} + 2*\text{overlay}`.
"""
return self.tile_size + 2 * self.overlap
def level_downsample(self, level: int) -> float:
"""Find the downsample factor for a level."""
return 2 ** (self.level_count - level - 1)
def level_dimensions(self, level: int) -> Tuple[int, int]:
"""The total pixel dimensions of the tile pyramid at a given level.
Args:
level (int):
The level to calculate the dimensions for.
"""
baseline_dims = self.wsi.info.slide_dimensions
level_dims = np.ceil(
np.divide(baseline_dims, self.level_downsample(level))
).astype(int)
return tuple(level_dims)
def tile_grid_size(self, level: int) -> Tuple[int, int]:
"""Width and height of the minimal grid of tiles to cover the slide.
Args:
level (int):
The level to calculate the grid size for.
"""
if level < 0 or level >= self.level_count:
raise IndexError("Invalid level.")
return tuple(
np.ceil(np.divide(self.level_dimensions(level), self.tile_size)).astype(int)
)
@property
def sub_tile_level_count(self) -> int:
"""The number of sub-tile levels in the pyramid."""
return 0
@property
def level_count(self) -> int:
"""Number of levels in the tile pyramid.
The number of levels is such that level_count - 1 is a 1:1 of
the slide baseline resolution (level 0 of the WSI).
"""
wsi_to_tile_ratio = np.divide(self.wsi.info.slide_dimensions, self.tile_size)
# Levels where a tile contains only part of the wsi
super_level_count = np.ceil(np.log2(wsi_to_tile_ratio)).max()
total_level_count = super_level_count + 1 + self.sub_tile_level_count
return int(total_level_count)
def get_thumb_tile(self) -> Image:
"""Return a thumbnail which fits the whole slide in one tile.
The thumbnail output size has the longest edge equal to the tile
size. The other edge preserves the original aspect ratio.
"""
slide_dims = np.array(self.wsi.info.slide_dimensions)
tile_dim = self.tile_size + self.overlap
out_dims = np.round(slide_dims / slide_dims.max() * tile_dim).astype(int)
bounds = (0, 0, *slide_dims)
thumb = self.wsi.read_bounds(
bounds, resolution=self.wsi.info.level_count - 1, units="level"
)
thumb = imresize(thumb, output_size=out_dims)
return Image.fromarray(thumb)
def get_tile(
self,
level: int,
x: int,
y: int,
res: int = 1,
pad_mode: str = "constant",
interpolation: str = "optimise",
) -> Image:
"""Get a tile at a given level and coordinate.
Note that levels are in the reverse order of those in WSIReader.
I.E. level 0 here corresponds to the lowest resolution whereas
level 0 in WSIReader corresponds to the maximum resolution
(baseline).
Args:
level (int):
The pyramid level of the tile starting from 0 (the whole
slide in one tile, 0-0-0).
x (int):
The tile index in the x direction.
y (int):
The tile index in the y direction.
pad_mode (str):
Method for padding when reading areas outside the
input image. Default is constant (0 padding). This is
passed to `read_func` which defaults to
:func:`safe_padded_read`. See :func:`safe_padded_read`
for supported pad modes. Setting to "none" or None will
result in no padding being applied.
interpolation (str):
Interpolation mode to use. Defaults to optimise.
Possible values are: linear, cubic, lanczos, nearest,
area, optimise. Linear most closely matches OpenSlide.
Returns:
PIL.Image:
Pillow image of the tile.
Example:
>>> from tiatoolbox.tools.pyramid import TilePyramidGenerator
>>> from tiatoolbox.wsicore.wsireader import WSIReader
>>> wsi = WSIReader.open("sample.svs")
>>> tile_generator = TilePyramidGenerator(
... wsi=reader,
... tile_size=256,
... )
>>> tile_0_0_0 = tile_generator.get_tile(level=0, x=0, y=0)
"""
if level < 0:
raise IndexError
if level > self.level_count:
raise IndexError("Invalid level.")
scale = self.level_downsample(level)
baseline_x = (x * self.tile_size * scale) - (self.overlap * scale)
baseline_y = (y * self.tile_size * scale) - (self.overlap * scale)
output_size = [self.output_tile_size] * 2
coord = (int(baseline_x), int(baseline_y))
if level < self.sub_tile_level_count:
output_size = self.output_tile_size // 2 ** (
self.sub_tile_level_count - level
)
output_size = np.repeat(output_size, 2).astype(int)
thumb = self.get_thumb_tile()
thumb.thumbnail(output_size)
return thumb
slide_dimensions = np.array(self.wsi.info.slide_dimensions)
if all(slide_dimensions < [baseline_x, baseline_y]):
raise IndexError
# Don't print out multiple warnings about interpolation etc.
duplicate_filter = DuplicateFilter()
logger.addFilter(duplicate_filter)
tile = self.wsi.read_rect(
coord,
size=[v * res for v in output_size],
resolution=res / scale,
units="baseline",
pad_mode=pad_mode,
interpolation=interpolation,
)
logger.removeFilter(duplicate_filter)
return Image.fromarray(tile)
def tile_path(self, level: int, x: int, y: int) -> Path:
"""Generate the path for a specified tile.
Args:
level (int):
The pyramid level of the tile starting from 0 (the whole
slide in one tile, 0-0-0).
x (int):
The tile index in the x direction.
y (int):
The tile index in the y direction.
Returns:
pathlib.Path:
A pathlib path object with two parts.
"""
raise NotImplementedError
def dump( # noqa: CCR001
self, path: Union[str, Path], container=None, compression=None
):
"""Write all tiles to disk.
Arguments:
path (str or Path):
The path to write the tiles to.
container (str):
Container to use. Defaults to None which saves to a
directory. Possible values are "zip", "tar".
compression (str):
Compression method. Defaults to None. Possible values
are None, "deflate", "gzip", "bz2", "lzma". Note that
tar does not support deflate and zip does not support
gzip.
Examples:
>>> from tiatoolbox.tools.pyramid import TilePyramidGenerator
>>> from tiatoolbox.wsicore.wsireader import WSIReader
>>> wsi = WSIReader.open("sample.svs")
>>> tile_generator = TilePyramidGenerator(
... wsi=reader,
... tile_size=256,
... )
>>> tile_generator.dump(
... path="sample.gz.zip",
... container="zip",
... compression="gzip",
... )
"""
path = Path(path)
if container not in [None, "zip", "tar"]:
raise ValueError("Unsupported container.")
if container is None:
path.mkdir(parents=False)
if compression is not None:
raise ValueError("Unsupported compression for container None.")
def save_tile(tile_path: Path, tile: Image.Image) -> None:
"""Write the tile to the output directory."""
full_path = path / tile_path
full_path.parent.mkdir(parents=True, exist_ok=True)
tile.save(full_path)
elif container == "zip":
compression2enum = {
None: zipfile.ZIP_STORED,
"deflate": zipfile.ZIP_DEFLATED,
"bz2": zipfile.ZIP_BZIP2,
"lzma": zipfile.ZIP_LZMA,
}
if compression not in compression2enum:
raise ValueError("Unsupported compression for zip.")
archive = zipfile.ZipFile(
path, mode="w", compression=compression2enum[compression]
)
def save_tile(tile_path: Path, tile: Image.Image) -> None:
"""Write the tile to the output tar."""
bio = BytesIO()
tile.save(bio, format="jpeg")
bio.seek(0)
data = bio.read()
archive.writestr(
str(tile_path),
data,
compress_type=compression2enum[compression],
)
else: # container == "tar":
compression2mode = {
None: "w",
"gzip": "w:gz",
"bz2": "w:bz2",
"lzma": "w:xz",
}
if compression not in compression2mode:
raise ValueError("Unsupported compression for tar.")
archive = tarfile.TarFile.open(path, mode=compression2mode[compression])
def save_tile(tile_path: Path, tile: Image.Image) -> None:
"""Write the tile to the output zip."""
bio = BytesIO()
tile.save(bio, format="jpeg")
bio.seek(0)
tar_info = tarfile.TarInfo(name=str(tile_path))
tar_info.mtime = time.time()
tar_info.size = bio.tell()
archive.addfile(tarinfo=tar_info, fileobj=bio)
for level in range(self.level_count):
for x, y in np.ndindex(self.tile_grid_size(level)):
tile = self.get_tile(level=level, x=x, y=y)
tile_path = self.tile_path(level, x, y)
save_tile(tile_path, tile)
if container is not None:
archive.close()
def __len__(self) -> int:
return sum(
np.prod(self.tile_grid_size(level)) for level in range(self.level_count)
)
def __iter__(self) -> Iterable:
for level in range(self.level_count):
for x, y in np.ndindex(self.tile_grid_size(level)):
yield self.get_tile(level=level, x=x, y=y)
class ZoomifyGenerator(TilePyramidGenerator):
r"""Pyramid tile generator with extra Zoomify specific methods.
Zoomify splits tiles into groups of 256 (due to old file system
limitations). The extra `tile_group` method here is for calculating
these tile groups when generating tile paths.
An old description of the Zoomify format can be found `here`_.
.. _here:
https://ecommons.cornell.edu/bitstream/handle/1813/5410/Introducing_Zoomify_Image.pdf
Args:
wsi (WSIReader):
The WSI reader object. Must implement
`tiatoolbox.wsicore.wsi_Reader.WSIReader.read_rect`.
tile_size (int):
The size of tiles to generate. Default is 256. Note that the
output tile size will be :math:`\text{tile size} + 2
\times\text{overlap}`.
downsample (int):
The downsample factor between levels. Default is 2.
tile_overlap (int):
The number of extra pixel to add to each edge of the tile.
Default is 0.
"""
def tile_group(self, level: int, x: int, y: int) -> int:
"""Find the tile group for a tile index.
Tile groups are numbered from level 0 (tile 0-0-0) and increment
every 256 tiles in ZXY axis order.
Args:
level (int):
The pyramid level of the tile starting from 0 (the whole
slide in one tile, 0-0-0).
x (int):
The tile index in the x direction.
y (int):
The tile index in the y direction.
Raises:
IndexError:
If the level, x, y tile index is out of bounds.
Returns:
int:
The tile group for the specified tile.
"""
grid_size = np.array(self.tile_grid_size(level))
if any(grid_size <= [x, y]):
raise IndexError
cumulative_sum = sum(np.prod(self.tile_grid_size(n)) for n in range(level))
index_in_level = np.ravel_multi_index((y, x), self.tile_grid_size(level)[::-1])
tile_index = cumulative_sum + index_in_level
return tile_index // 256 # the tile group
def tile_path(self, level: int, x: int, y: int) -> Path:
"""Generate the Zoomify path for a specified tile.
Args:
level (int):
The pyramid level of the tile starting from 0 (the whole
slide in one tile, 0-0-0).
x (int):
The tile index in the x direction.
y (int):
The tile index in the y direction.
Returns:
pathlib.Path:
A pathlib path object with two parts.
"""
g = self.tile_group(level, x, y)
z = level
return Path(f"TileGroup{g}") / f"{z}-{x}-{y}.jpg"
class AnnotationTileGenerator(ZoomifyGenerator):
r"""Tile generator using an AnnotationRenderer to render tiles
showing annotations held in an AnnotationStore
Args:
info (WSIMeta):
An WSIMeta Object storing the metadata of the slide this
generator is rendering tiles for
Store (AnnotationStore):
An AnnotationStore Object containing annotations to be
rendered for given slide
renderer (AnnotationRenderer):
An AnnotationRenderer Object which will render annotations
belonging to a tile according to specified parameters
tile_size (int):
The size of tiles to generate. Default is 256. Note that the
output tile size will be :math:`\text{tile size} + 2
\times\text{overlap}`.
downsample (int):
The downsample factor between levels. Default is 2.
overlap (int):
The number of extra pixel to add to each edge of the tile.
Default is 0.
"""
def __init__(
self,
info: WSIMeta,
store: AnnotationStore,
renderer: AnnotationRenderer = None,
tile_size: int = 256,
downsample: int = 2,
overlap: int = 0,
):
super().__init__(None, tile_size, downsample, overlap)
self.info = info
self.store = store
if renderer is None:
renderer = AnnotationRenderer()
self.renderer = renderer
# if using blur, render overlapping tiles to minimise edge effects.
# factor of 1.5 below chosen empirically as a good balance between
# empirical visual quality and added rendering time.
self.overlap = int(1.5 * renderer.blur_radius)
output_size = [self.output_tile_size] * 2
self.empty_img = Image.fromarray(
np.zeros((output_size[0], output_size[1], 4), dtype=np.uint8)
)
if self.renderer.mapper == "categorical":
# get the possible categories for given score_prop from store
types = self.store.pquery(f"props[{self.renderer.score_prop!r}]")
# make a random dictionary colour map
colors = random_colors(len(types))
mapper = {key: (*color, 1) for key, color in zip(types, colors)}
self.renderer.mapper = lambda x: mapper[x]
def get_thumb_tile(self) -> Image:
"""Return a thumbnail which fits the whole slide in one tile.
The thumbnail output size has the longest edge equal to the tile
size. The other edge preserves the original aspect ratio.
"""
slide_dims = np.array(self.info.slide_dimensions)
scale = self.level_downsample(self.level_count - 1)
bounds = (0, 0, *slide_dims)
thumb = self.renderer.render_annotations(self.store, bounds, scale)
return Image.fromarray(thumb)
def level_dimensions(self, level: int) -> Tuple[int, int]:
"""The total pixel dimensions of the tile pyramid at a given level.
Args:
level (int):
The level to calculate the dimensions for.
"""
baseline_dims = self.info.slide_dimensions
level_dims = np.ceil(
np.divide(baseline_dims, self.level_downsample(level))
).astype(int)
return tuple(level_dims)
@property
def level_count(self) -> int:
"""Number of levels in the tile pyramid.
The number of levels is such that level_count - 1 is a 1:1 of
the slide baseline resolution (level 0 of the WSI).
"""
wsi_to_tile_ratio = np.divide(self.info.slide_dimensions, self.tile_size)
# Levels where a tile contains only part of the wsi
super_level_count = np.ceil(np.log2(wsi_to_tile_ratio)).max()
total_level_count = super_level_count + 1 + self.sub_tile_level_count
return int(total_level_count)
def get_tile(
self,
level: int,
x: int,
y: int,
res: int = 1,
pad_mode: str = None,
interpolation: str = None,
) -> Image:
"""Render a tile at a given level and coordinate.
Note that levels are in the reverse order of those in WSIReader.
I.E. level 0 here corresponds to the lowest resolution whereas
level 0 in WSIReader corresponds to the maximum resolution
(baseline).
Args:
level (int):
The pyramid level of the tile starting from 0 (the whole
slide in one tile, 0-0-0).
x (int):
The tile index in the x direction.
y (int):
The tile index in the y direction.
Returns:
PIL.Image:
Pillow image of the tile.
Example:
>>> from tiatoolbox.tools.pyramid import AnnotationTileGenerator
>>> from tiatoolbox.wsicore.wsireader import WSIReader
>>> from tiatoolbox.annotation.storage import SQLiteStore
>>> wsi = WSIReader.open("sample.svs")
>>> SQ=SQLiteStore.from_geojson(geo_path)
>>> tile_generator = AnnotationTileGenerator(
... info=wsi.info,
... store=SQ,
... )
>>> tile_0_0_0 = tile_generator.get_tile(level=0, x=0, y=0)
"""
if pad_mode is not None or interpolation is not None:
logger.warning(
"interpolation, pad_mode are unused by AnnotationTileGenerator",
stacklevel=2,
)
if level < 0:
raise IndexError
if level > self.level_count:
raise IndexError("Invalid level.")
scale = self.level_downsample(level)
baseline_x = (x * self.tile_size * scale) - (self.overlap * scale)
baseline_y = (y * self.tile_size * scale) - (self.overlap * scale)
coord = [baseline_x, baseline_y]
if level < self.sub_tile_level_count:
output_size = self.output_tile_size // 2 ** (
self.sub_tile_level_count - level
)
output_size = np.repeat(output_size, 2).astype(int)
thumb = self.get_thumb_tile()
thumb.thumbnail(output_size)
return thumb
slide_dimensions = np.array(self.info.slide_dimensions)
if all(slide_dimensions < [baseline_x, baseline_y]):
raise IndexError
bounds = locsize2bounds(coord, [self.output_tile_size * scale] * 2)
tile = self.renderer.render_annotations(
self.store, bounds, scale, res, self.overlap
)
return Image.fromarray(tile)
| 22,314 | 35.166937 | 93 | py |
tiatoolbox | tiatoolbox-master/tiatoolbox/tools/stainextract.py | """Stain matrix extraction for stain normalization."""
import numpy as np
from sklearn.decomposition import DictionaryLearning
from tiatoolbox.utils.misc import get_luminosity_tissue_mask
from tiatoolbox.utils.transforms import rgb2od
def vectors_in_correct_direction(e_vectors):
"""Points the eigen vectors in the right direction.
Args:
e_vectors (:class:`numpy.ndarray`):
Eigen vectors.
Returns:
:class:`numpy.ndarray`:
Pointing in the correct direction.
"""
if e_vectors[0, 0] < 0:
e_vectors[:, 0] *= -1
if e_vectors[0, 1] < 0:
e_vectors[:, 1] *= -1
return e_vectors
def h_and_e_in_right_order(v1, v2):
"""Rearrange input vectors for H&E in correct order with H as first output.
Args:
v1 (:class:`numpy.ndarray`):
Input vector for stain extraction.
v2 (:class:`numpy.ndarray`):
Input vector for stain extraction.
Returns:
:class:`numpy.ndarray`:
Input vectors in the correct order.
"""
if v1[0] > v2[0]:
return np.array([v1, v2])
return np.array([v2, v1])
def dl_output_for_h_and_e(dictionary):
"""Return correct value for H and E from dictionary learning output.
Args:
dictionary (:class:`numpy.ndarray`):
:class:`sklearn.decomposition.DictionaryLearning` output
Returns:
:class:`numpy.ndarray`:
With correct values for H and E.
"""
if dictionary[0, 0] < dictionary[1, 0]:
return dictionary[[1, 0], :]
return dictionary
class CustomExtractor:
"""Get the user-defined stain matrix.
This class contains code inspired by StainTools
[https://github.com/Peter554/StainTools] written by Peter Byfield.
Examples:
>>> from tiatoolbox.tools.stainextract import CustomExtractor
>>> from tiatoolbox.utils.misc import imread
>>> extractor = CustomExtractor(stain_matrix)
>>> img = imread('path/to/image')
>>> stain_matrix = extractor.get_stain_matrix(img)
"""
def __init__(self, stain_matrix):
self.stain_matrix = stain_matrix
if self.stain_matrix.shape not in [(2, 3), (3, 3)]:
raise ValueError("Stain matrix must have shape (2, 3) or (3, 3).")
def get_stain_matrix(self, _):
"""Get the user defined stain matrix.
Returns:
:class:`numpy.ndarray`:
User defined stain matrix.
"""
return self.stain_matrix
class RuifrokExtractor:
"""Reuifrok stain extractor.
Get the stain matrix as defined in:
Ruifrok, Arnout C., and Dennis A. Johnston. "Quantification of
histochemical staining by color deconvolution." Analytical and
quantitative cytology and histology 23.4 (2001): 291-299.
This class contains code inspired by StainTools
[https://github.com/Peter554/StainTools] written by Peter Byfield.
Examples:
>>> from tiatoolbox.tools.stainextract import RuifrokExtractor
>>> from tiatoolbox.utils.misc import imread
>>> extractor = RuifrokExtractor()
>>> img = imread('path/to/image')
>>> stain_matrix = extractor.get_stain_matrix(img)
"""
def __init__(self):
self.__stain_matrix = np.array([[0.65, 0.70, 0.29], [0.07, 0.99, 0.11]])
def get_stain_matrix(self, _):
"""Get the pre-defined stain matrix.
Returns:
:class:`numpy.ndarray`:
Pre-defined stain matrix.
"""
return self.__stain_matrix.copy()
class MacenkoExtractor:
"""Macenko stain extractor.
Get the stain matrix as defined in:
Macenko, Marc, et al. "A method for normalizing histology
slides for quantitative analysis." 2009 IEEE International
Symposium on Biomedical Imaging: From Nano to Macro. IEEE, 2009.
This class contains code inspired by StainTools
[https://github.com/Peter554/StainTools] written by Peter Byfield.
Args:
luminosity_threshold (float):
Threshold used for tissue area selection
angular_percentile (int):
Percentile of angular coordinates to be selected
with respect to the principle, orthogonal eigenvectors.
Examples:
>>> from tiatoolbox.tools.stainextract import MacenkoExtractor
>>> from tiatoolbox.utils.misc import imread
>>> extractor = MacenkoExtractor()
>>> img = imread('path/to/image')
>>> stain_matrix = extractor.get_stain_matrix(img)
"""
def __init__(self, luminosity_threshold=0.8, angular_percentile=99):
self.__luminosity_threshold = luminosity_threshold
self.__angular_percentile = angular_percentile
def get_stain_matrix(self, img):
"""Stain matrix estimation.
Args:
img (:class:`numpy.ndarray`):
Input image used for stain matrix estimation.
Returns:
:class:`numpy.ndarray`:
Estimated stain matrix.
"""
img = img.astype("uint8") # ensure input image is uint8
luminosity_threshold = self.__luminosity_threshold
angular_percentile = self.__angular_percentile
# convert to OD and ignore background
tissue_mask = get_luminosity_tissue_mask(
img, threshold=luminosity_threshold
).reshape((-1,))
img_od = rgb2od(img).reshape((-1, 3))
img_od = img_od[tissue_mask]
# eigenvectors of covariance in OD space (orthogonal as covariance symmetric)
_, eigen_vectors = np.linalg.eigh(np.cov(img_od, rowvar=False))
# the two principle eigenvectors
eigen_vectors = eigen_vectors[:, [2, 1]]
# make sure vectors are pointing the right way
eigen_vectors = vectors_in_correct_direction(e_vectors=eigen_vectors)
# project on this basis.
proj = np.dot(img_od, eigen_vectors)
# angular coordinates with respect to the principle, orthogonal eigenvectors
phi = np.arctan2(proj[:, 1], proj[:, 0])
# min and max angles
min_phi = np.percentile(phi, 100 - angular_percentile)
max_phi = np.percentile(phi, angular_percentile)
# the two principle colors
v1 = np.dot(eigen_vectors, np.array([np.cos(min_phi), np.sin(min_phi)]))
v2 = np.dot(eigen_vectors, np.array([np.cos(max_phi), np.sin(max_phi)]))
# order of H&E - H first row
he = h_and_e_in_right_order(v1, v2)
return he / np.linalg.norm(he, axis=1)[:, None]
class VahadaneExtractor:
"""Vahadane stain extractor.
Get the stain matrix as defined in:
Vahadane, Abhishek, et al. "Structure-preserving color normalization
and sparse stain separation for histological images."
IEEE transactions on medical imaging 35.8 (2016): 1962-1971.
This class contains code inspired by StainTools
[https://github.com/Peter554/StainTools] written by Peter Byfield.
Args:
luminosity_threshold (float):
Threshold used for tissue area selection.
regularizer (float):
Regularizer used in dictionary learning.
Examples:
>>> from tiatoolbox.tools.stainextract import VahadaneExtractor
>>> from tiatoolbox.utils.misc import imread
>>> extractor = VahadaneExtractor()
>>> img = imread('path/to/image')
>>> stain_matrix = extractor.get_stain_matrix(img)
"""
def __init__(self, luminosity_threshold=0.8, regularizer=0.1):
self.__luminosity_threshold = luminosity_threshold
self.__regularizer = regularizer
def get_stain_matrix(self, img):
"""Stain matrix estimation.
Args:
img (:class:`numpy.ndarray`):
Input image used for stain matrix estimation
Returns:
:class:`numpy.ndarray`:
Estimated stain matrix.
"""
img = img.astype("uint8") # ensure input image is uint8
luminosity_threshold = self.__luminosity_threshold
regularizer = self.__regularizer
# convert to OD and ignore background
tissue_mask = get_luminosity_tissue_mask(
img, threshold=luminosity_threshold
).reshape((-1,))
img_od = rgb2od(img).reshape((-1, 3))
img_od = img_od[tissue_mask]
# do the dictionary learning
dl = DictionaryLearning(
n_components=2,
alpha=regularizer,
transform_alpha=regularizer,
fit_algorithm="lars",
transform_algorithm="lasso_lars",
positive_dict=True,
verbose=False,
max_iter=3,
transform_max_iter=1000,
)
dictionary = dl.fit_transform(X=img_od.T).T
# order H and E.
# H on first row.
dictionary = dl_output_for_h_and_e(dictionary)
return dictionary / np.linalg.norm(dictionary, axis=1)[:, None]
| 8,958 | 30 | 85 | py |
tiatoolbox | tiatoolbox-master/tiatoolbox/tools/__init__.py | """This package contains various tools for working with WSIs."""
from tiatoolbox.tools import (
graph,
patchextraction,
pyramid,
registration,
stainaugment,
stainextract,
stainnorm,
tissuemask,
)
| 228 | 18.083333 | 64 | py |
tiatoolbox | tiatoolbox-master/tiatoolbox/tools/stainaugment.py | """Stain augmentation."""
import copy
import random
import numpy as np
from albumentations.core.transforms_interface import ImageOnlyTransform
from tiatoolbox.tools.stainnorm import get_normalizer
from tiatoolbox.utils.misc import get_luminosity_tissue_mask
class StainAugmentor(ImageOnlyTransform):
"""Stain augmentation using predefined stain matrix or stain extraction methods.
This stain augmentation class can be used in 'albumentations'
augmentation pipelines as well as stand alone. There is an option to
use predefined `stain_matrix` in the input which enables the
`StainAugmentor` to generate augmented images faster or do stain
normalization to a specific target `stain_matrix`. Having stain
matrix beforehand, we don't need to do dictionary learning for stain
matrix extraction, hence,speed up the stain
augmentation/normalization process which makes it more appropriate
for one-the-fly stain augmentation/normalization.
Args:
method (str):
The method to use for stain matrix and stain concentration
extraction. Can be either "vahadane" (default) or "macenko".
stain_matrix (:class:`numpy.ndarray`):
Pre-extracted stain matrix of a target image. This can be
used for both on-the-fly stain normalization and faster
stain augmentation. User can use tools in
`tiatoolbox.tools.stainextract` to extract this information.
If None (default), the stain matrix will be automatically
extracted using the method specified by user.
sigma1 (float):
Controls the extent of the stain concentrations scale
parameter (`alpha` belonging to [1-sigma1, 1+sigma1] range).
Default is 0.5.
sigma2 (float):
Controls the extent of the stain concentrations shift
parameter (`beta` belonging to [-sigma2, sigma2] range).
Default is 0.25.
augment_background (bool):
Specifies whether to apply stain augmentation on the
background or not. Default is False, which indicates that
only tissue region will be stain augmented.
always_apply (False):
For use with 'albumentations' pipeline. Please refer to
albumentations documentations for more information.
p (float):
For use with 'albumentations' pipeline which specifies the
probability of using the augmentation in a 'albumentations'
pipeline. . Please refer to albumentations documentations
for more information.
Attributes:
stain_normalizer:
Fitted stain normalization class.
stain_matrix (:class:`numpy.ndarray`):
extracted stain matrix from the image
source_concentrations (:class:`numpy.ndarray`):
Extracted stain concentrations from the input image.
n_stains (int):
Number of stain channels in the stain concentrations.
Expected to be 2 for H&E stained images.
tissue_mask (:class:`numpy.ndarray`):
Tissue region mask in the image.
Examples:
>>> '''Using the stain augmentor in the 'albumentations' pipeline'''
>>> from tiatoolbox.tools.stainaugment import StainAugmentor
>>> import albumentations as A
>>> # Defining an exemplar stain matrix as reference
>>> stain_matrix = np.array([[0.91633014, -0.20408072, -0.34451435],
... [0.17669817, 0.92528011, 0.33561059]])
>>> # Define albumentations pipeline
>>> aug_pipline = A.Compose([
... A.RandomRotate90(),
... A.Flip(),
... StainAugmentor(stain_matrix=stain_matrix)
... ])
>>> # apply the albumentations pipeline on an image (RGB numpy unit8 type)
>>> img_aug = aug(image=img)['image']
>>> '''Using the stain augmentor stand alone'''
>>> from tiatoolbox.tools.stainaugment import StainAugmentor
>>> # Defining an exemplar stain matrix as reference
>>> stain_matrix = np.array([[0.91633014, -0.20408072, -0.34451435],
... [0.17669817, 0.92528011, 0.33561059]])
>>> # Instantiate the stain augmentor and fit it on an image
>>> stain_augmentor = StainAugmentor(stain_matrix=stain_matrix)
>>> stain_augmentor.fit(img)
>>> # Now using the fitted `stain_augmentor` in a loop to generate
>>> # several augmented instances from the same image.
>>> for i in range(10):
... img_aug = stain_augmentor.augment()
"""
def __init__(
self,
method: str = "vahadane",
stain_matrix: np.ndarray = None,
sigma1: float = 0.4,
sigma2: float = 0.2,
augment_background: bool = False,
always_apply=False,
p=0.5,
) -> np.ndarray:
super().__init__(always_apply=always_apply, p=p)
self.augment_background = augment_background
self.sigma1 = sigma1
self.sigma2 = sigma2
self.method = method
self.stain_matrix = stain_matrix
if self.method.lower() not in {"macenko", "vahadane"}:
raise ValueError(
f"Unsupported stain extractor method {self.method!r} for "
"StainAugmentor. Choose either 'vahadane' or 'macenko'."
)
self.stain_normalizer = get_normalizer(self.method.lower())
self.alpha = None
self.beta = None
self.img_shape = None
self.tissue_mask = None
self.n_stains = None
self.source_concentrations = None
def fit(self, img, threshold=0.85):
"""Fit function to extract information needed for stain augmentation.
The `fit` function uses either 'Macenko' or 'Vahadane' stain
extraction methods to extract stain matrix and stain
concentrations of the input image to be used in the `augment`
function.
Args:
img (:class:`numpy.ndarray`):
RGB image in the form of uint8 numpy array.
threshold (float):
The threshold value used to find tissue mask from the
luminosity component of the image. The found
`tissue_mask` will be used to filter out background area
in stain augmentation process upon user setting
`augment_background=False`.
"""
if self.stain_matrix is None:
self.stain_normalizer.fit(img)
self.stain_matrix = self.stain_normalizer.stain_matrix_target
self.source_concentrations = self.stain_normalizer.target_concentrations
else:
self.source_concentrations = self.stain_normalizer.get_concentrations(
img, self.stain_matrix
)
self.n_stains = self.source_concentrations.shape[1]
if not self.augment_background:
self.tissue_mask = get_luminosity_tissue_mask(
img, threshold=threshold
).ravel()
self.img_shape = img.shape
def augment(self):
"""Return an augmented instance based on source stain concentrations.
Stain concentrations of the source image are altered (scaled and
shifted) based on the random alpha and beta parameters, and then
an augmented image is reconstructed from the altered
concentrations. All parameters needed for this part are
calculated when calling `fit()` function.
Returns:
:class:`numpy.ndarray`:
Stain augmented image.
"""
augmented_concentrations = copy.deepcopy(self.source_concentrations)
for i in range(self.n_stains):
self.get_params()
if self.augment_background:
augmented_concentrations[:, i] *= self.alpha
augmented_concentrations[:, i] += self.beta
else:
augmented_concentrations[self.tissue_mask, i] *= self.alpha
augmented_concentrations[self.tissue_mask, i] += self.beta
img_augmented = 255 * np.exp(
-1 * np.dot(augmented_concentrations, self.stain_matrix)
)
img_augmented = img_augmented.reshape(self.img_shape)
img_augmented = np.clip(img_augmented, 0, 255)
return np.uint8(img_augmented)
def apply(self, img, **params): # alpha=None, beta=None, # skipcq: PYL-W0613
"""Call the `fit` and `augment` functions to generate a stain augmented image.
Args:
img (:class:`numpy.ndarray`):
Input RGB image in the form of unit8 numpy array.
Returns:
:class:`numpy.ndarray`:
Stain augmented image with the same size and format as
the input img.
"""
self.fit(img, threshold=0.85)
return self.augment()
def get_params(self):
"""Returns randomly generated parameters based on input arguments."""
self.alpha = random.uniform(1 - self.sigma1, 1 + self.sigma1)
self.beta = random.uniform(-self.sigma2, self.sigma2)
return {}
def get_params_dependent_on_targets(self, params): # skipcq: PYL-W0613, PYL-R0201
"""Does nothing, added to resolve flake 8 error"""
return {}
@staticmethod
def get_transform_init_args_names(**kwargs):
"""Return the argument names for albumentations use."""
return "method", "stain_matrix", "sigma1", "sigma2", "augment_background"
| 9,664 | 41.577093 | 86 | py |
tiatoolbox | tiatoolbox-master/tiatoolbox/tools/tissuemask.py | """Methods of masking tissue and background."""
from abc import ABC, abstractmethod
import cv2
import numpy as np
from skimage.filters import threshold_otsu
from tiatoolbox.utils.misc import objective_power2mpp
class TissueMasker(ABC):
"""Base class for tissue maskers.
Takes an image as in put and outputs a mask.
"""
def __init__(self) -> None:
super().__init__()
self.fitted = False
@abstractmethod
def fit(self, images: np.ndarray, masks=None) -> None:
"""Fit the masker to the images and parameters.
Args:
images (:class:`numpy.ndarray`):
List of images, usually WSI thumbnails. Expected shape is
NHWC (number images, height, width, channels).
masks (:class:`numpy.ndarray`):
Target/ground-truth masks. Expected shape is NHW (n
images, height, width).
"""
@abstractmethod
def transform(self, images: np.ndarray) -> np.ndarray:
"""Create and return a tissue mask.
Args:
images (:class:`numpy.ndarray`):
RGB image, usually a WSI thumbnail.
Returns:
:class:`numpy.ndarray`:
Map of semantic classes spatially over the WSI
e.g. regions of tissue vs background.
"""
if not self.fitted:
raise SyntaxError("Fit must be called before transform.")
def fit_transform(self, images: np.ndarray, **kwargs) -> np.ndarray:
"""Perform :func:`fit` then :func:`transform`.
Sometimes it can be more optimal to perform both at the same
time for a single sample. In this case the base implementation
of :func:`fit` followed by :func:`transform` can be overridden.
Args:
images (:class:`numpy.ndarray`):
Image to create mask from.
**kwargs (dict):
Other key word arguments passed to fit.
"""
self.fit(images, **kwargs)
return self.transform(images)
class OtsuTissueMasker(TissueMasker):
"""Tissue masker which uses Otsu's method to determine background.
Otsu's method.
Examples:
>>> from tiatoolbox.tools.tissuemask import OtsuTissueMasker
>>> masker = OtsuTissueMasker()
>>> masker.fit(thumbnail)
>>> masks = masker.transform([thumbnail])
>>> from tiatoolbox.tools.tissuemask import OtsuTissueMasker
>>> masker = OtsuTissueMasker()
>>> masks = masker.fit_transform([thumbnail])
"""
def __init__(self) -> None:
super().__init__()
self.threshold = None
def fit(self, images: np.ndarray, masks=None) -> None:
"""Find a binary threshold using Otsu's method.
Args:
images (:class:`numpy.ndarray`):
List of images with a length 4 shape (N, height, width,
channels).
masks (:class:`numpy.ndarray`):
Unused here, for API consistency.
"""
images_shape = np.shape(images)
if len(images_shape) != 4:
raise ValueError(
"Expected 4 dimensional input shape (N, height, width, 3)"
f" but received shape of {images_shape}."
)
# Convert RGB images to greyscale
grey_images = [x[..., 0] for x in images]
if images_shape[-1] == 3:
grey_images = np.zeros(images_shape[:-1], dtype=np.uint8)
for n, image in enumerate(images):
grey_images[n] = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
pixels = np.concatenate([np.array(grey).flatten() for grey in grey_images])
# Find Otsu's threshold for all pixels
self.threshold = threshold_otsu(pixels)
self.fitted = True
def transform(self, images: np.ndarray) -> np.ndarray:
"""Create masks using the threshold found during :func:`fit`.
Args:
images (:class:`numpy.ndarray`):
List of images with a length 4 shape (N, height, width,
channels).
Returns:
:class:`numpy.ndarray`:
List of images with a length 4 shape (N, height, width,
channels).
"""
super().transform(images)
masks = []
for image in images:
grey = image[..., 0]
if len(image.shape) == 3 and image.shape[-1] == 3:
grey = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
mask = (grey < self.threshold).astype(bool)
masks.append(mask)
return [mask]
class MorphologicalMasker(OtsuTissueMasker):
"""Tissue masker which uses a threshold and simple morphological operations.
This method applies Otsu's threshold before a simple small region
removal, followed by a morphological dilation. The kernel for the
dilation is an ellipse of radius 64/mpp unless a value is given for
kernel_size. MPP is estimated from objective power via
func:`tiatoolbox.utils.misc.objective_power2mpp` if a power argument
is given instead of mpp to the initialiser.
For small region removal, the minimum area size defaults to the area
of the kernel. If no mpp, objective power, or kernel_size arguments
are given then the kernel defaults to a size of 1x1.
The scale of the morphological operations can also be manually
specified with the `kernel_size` argument, for example if the
automatic scale from mpp or objective power is too large or small.
Examples:
>>> from tiatoolbox.tools.tissuemask import MorphologicalMasker
>>> from tiatoolbox.wsicore.wsireader import WSIReader
>>> wsi = WSIReader.open("slide.svs")
>>> thumbnail = wsi.slide_thumbnail(32, "mpp")
>>> masker = MorphologicalMasker(mpp=32)
>>> masks = masker.fit_transform([thumbnail])
An example reading a thumbnail from a file where the objective power
is known:
>>> from tiatoolbox.tools.tissuemask import MorphologicalMasker
>>> from tiatoolbox.utils.misc import imread
>>> thumbnail = imread("thumbnail.png")
>>> masker = MorphologicalMasker(power=1.25)
>>> masks = masker.fit_transform([thumbnail])
"""
def __init__(
self, *, mpp=None, power=None, kernel_size=None, min_region_size=None
) -> None:
"""Initialise a morphological masker.
Args:
mpp (float or tuple(float)):
The microns per-pixel of the image to be masked. Used to
calculate kernel_size a 64/mpp, optional.
power (float or tuple(float)):
The objective power of the image to be masked. Used to
calculate kernel_size as 64/objective_power2mpp(power),
optional.
kernel_size (int or tuple(int)):
Size of elliptical kernel in x and y, optional.
min_region_size (int):
Minimum region size in pixels to consider as foreground.
Defaults to area of the kernel.
"""
super().__init__()
self.min_region_size = min_region_size
self.threshold = None
# Check for conflicting arguments
if sum(arg is not None for arg in [mpp, power, kernel_size]) > 1:
raise ValueError("Only one of mpp, power, kernel_size can be given.")
# Default to kernel_size of (1, 1) if no arguments given
if all(arg is None for arg in [mpp, power, kernel_size]):
kernel_size = np.array([1, 1])
# Convert (objective) power approximately to MPP to unify units
if power is not None:
mpp = objective_power2mpp(power)
# Convert MPP to an integer kernel_size
if mpp is not None:
mpp = np.array(mpp)
if mpp.size != 2:
mpp = mpp.repeat(2)
kernel_size = np.max([32 / mpp, [1, 1]], axis=0)
# Ensure kernel_size is a length 2 numpy array
kernel_size = np.array(kernel_size)
if kernel_size.size != 2:
kernel_size = kernel_size.repeat(2)
# Convert to an integer double/ pair
self.kernel_size = tuple(np.round(kernel_size).astype(int))
# Create structuring element for morphological operations
self.kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, self.kernel_size)
# Set min region size to kernel area if None
if self.min_region_size is None:
self.min_region_size = np.sum(self.kernel)
def transform(self, images: np.ndarray):
"""Create masks using the found threshold followed by morphological operations.
Args:
images (:class:`numpy.ndarray`):
List of images with a length 4 shape (N, height, width,
channels).
Returns:
:class:`numpy.ndarray`:
List of images with a length 4 shape (N, height, width,
channels).
"""
super().transform(images)
results = []
for image in images:
if len(image.shape) == 3 and image.shape[-1] == 3:
gray = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
else:
gray = image
mask = (gray < self.threshold).astype(np.uint8)
_, output, stats, _ = cv2.connectedComponentsWithStats(mask, connectivity=8)
sizes = stats[1:, -1]
for i, size in enumerate(sizes):
if size < self.min_region_size:
mask[output == i + 1] = 0
mask = cv2.morphologyEx(mask, cv2.MORPH_DILATE, self.kernel)
results.append(mask.astype(bool))
return results
| 9,770 | 33.40493 | 88 | py |
tiatoolbox | tiatoolbox-master/tiatoolbox/tools/stainnorm.py | """Stain normalization classes."""
import cv2
import numpy as np
from tiatoolbox.tools.stainextract import (
CustomExtractor,
MacenkoExtractor,
RuifrokExtractor,
VahadaneExtractor,
)
from tiatoolbox.utils.exceptions import MethodNotSupported
from tiatoolbox.utils.misc import load_stain_matrix
from tiatoolbox.utils.transforms import od2rgb, rgb2od
class StainNormalizer:
"""Stain normalization base class.
This class contains code inspired by StainTools
[https://github.com/Peter554/StainTools] written by Peter Byfield.
Attributes:
extractor (CustomExtractor, RuifrokExtractor):
Method specific stain extractor.
stain_matrix_target (:class:`numpy.ndarray`):
Stain matrix of target.
target_concentrations (:class:`numpy.ndarray`):
Stain concentration matrix of target.
maxC_target (:class:`numpy.ndarray`):
99th percentile of each stain.
stain_matrix_target_RGB (:class:`numpy.ndarray`):
Target stain matrix in RGB.
"""
def __init__(self):
self.extractor = None
self.stain_matrix_target = None
self.target_concentrations = None
self.maxC_target = None
self.stain_matrix_target_RGB = None
@staticmethod
def get_concentrations(img, stain_matrix):
"""Estimate concentration matrix given an image and stain matrix.
Args:
img (:class:`numpy.ndarray`):
Input image.
stain_matrix (:class:`numpy.ndarray`):
Stain matrix for haematoxylin and eosin stains.
Returns:
numpy.ndarray:
Stain concentrations of input image.
"""
od = rgb2od(img).reshape((-1, 3))
x, _, _, _ = np.linalg.lstsq(stain_matrix.T, od.T, rcond=-1)
return x.T
def fit(self, target):
"""Fit to a target image.
Args:
target (:class:`numpy.ndarray` of type :class:`numpy.uint8`):
Target/reference image.
"""
self.stain_matrix_target = self.extractor.get_stain_matrix(target)
self.target_concentrations = self.get_concentrations(
target, self.stain_matrix_target
)
self.maxC_target = np.percentile(
self.target_concentrations, 99, axis=0
).reshape((1, 2))
# useful to visualize.
self.stain_matrix_target_RGB = od2rgb(self.stain_matrix_target)
def transform(self, img):
"""Transform an image.
Args:
img (:class:`numpy.ndarray` of type :class:`numpy.uint8`):
RGB input source image.
Returns:
:class:`numpy.ndarray`:
RGB stain normalized image.
"""
stain_matrix_source = self.extractor.get_stain_matrix(img)
source_concentrations = self.get_concentrations(img, stain_matrix_source)
max_c_source = np.percentile(source_concentrations, 99, axis=0).reshape((1, 2))
source_concentrations *= self.maxC_target / max_c_source
trans = 255 * np.exp(
-1 * np.dot(source_concentrations, self.stain_matrix_target)
)
# ensure between 0 and 255
trans[trans > 255] = 255
trans[trans < 0] = 0
return trans.reshape(img.shape).astype(np.uint8)
class CustomNormalizer(StainNormalizer):
"""Stain Normalization using a user-defined stain matrix.
This class contains code inspired by StainTools
[https://github.com/Peter554/StainTools] written by Peter Byfield.
Args:
stain_matrix (:class:`numpy.ndarray`):
User-defined stain matrix. Must be either 2x3 or 3x3.
Examples:
>>> from tiatoolbox.tools.stainnorm import CustomNormalizer
>>> norm = CustomNormalizer(stain_matrix)
>>> norm.fit(target_img)
>>> norm_img = norm.transform(source_img)
"""
def __init__(self, stain_matrix):
super().__init__()
self.extractor = CustomExtractor(stain_matrix)
class RuifrokNormalizer(StainNormalizer):
"""Ruifrok & Johnston stain normalizer.
Normalize a patch to the stain appearance of the target image using
the method of:
Ruifrok, Arnout C., and Dennis A. Johnston. "Quantification of
histochemical staining by color deconvolution." Analytical and
quantitative cytology and histology 23.4 (2001): 291-299.
This class contains code inspired by StainTools
[https://github.com/Peter554/StainTools] written by Peter Byfield.
Examples:
>>> from tiatoolbox.tools.stainnorm import RuifrokNormalizer
>>> norm = RuifrokNormalizer()
>>> norm.fit(target_img)
>>> norm_img = norm.transform(source_img)
"""
def __init__(self):
super().__init__()
self.extractor = RuifrokExtractor()
class MacenkoNormalizer(StainNormalizer):
"""Macenko stain normalizer.
Normalize a patch to the stain appearance of the target image using
the method of:
Macenko, Marc, et al. "A method for normalizing histology slides for
quantitative analysis." 2009 IEEE International Symposium on
Biomedical Imaging: From Nano to Macro. IEEE, 2009.
This class contains code inspired by StainTools
[https://github.com/Peter554/StainTools] written by Peter Byfield.
Examples:
>>> from tiatoolbox.tools.stainnorm import MacenkoNormalizer
>>> norm = MacenkoNormalizer()
>>> norm.fit(target_img)
>>> norm_img = norm.transform(source_img)
"""
def __init__(self):
super().__init__()
self.extractor = MacenkoExtractor()
class VahadaneNormalizer(StainNormalizer):
"""Vahadane stain normalizer.
Normalize a patch to the stain appearance of the target image using
the method of:
Vahadane, Abhishek, et al. "Structure-preserving color normalization
and sparse stain separation for histological images." IEEE
transactions on medical imaging 35.8 (2016): 1962-1971.
This class contains code inspired by StainTools
[https://github.com/Peter554/StainTools] written by Peter Byfield.
Examples:
>>> from tiatoolbox.tools.stainnorm import VahadaneNormalizer
>>> norm = VahadaneNormalizer()
>>> norm.fit(target_img)
>>> norm_img = norm.transform(source_img)
"""
def __init__(self):
super().__init__()
self.extractor = VahadaneExtractor()
class ReinhardNormalizer:
"""Reinhard colour normalizer.
Normalize a patch colour to the target image using the method of:
Reinhard, Erik, et al. "Color transfer between images." IEEE
Computer graphics and applications 21.5 (2001): 34-41.
This class contains code inspired by StainTools
[https://github.com/Peter554/StainTools] written by Peter Byfield.
Attributes:
target_means (float):
Mean of each LAB channel.
target_stds (float):
Standard deviation of each LAB channel.
Examples:
>>> from tiatoolbox.tools.stainnorm import ReinhardNormalizer
>>> norm = ReinhardNormalizer()
>>> norm.fit(target_img)
>>> norm_img = norm.transform(src_img)
"""
def __init__(self):
self.target_means = None
self.target_stds = None
def fit(self, target):
"""Fit to a target image.
Args:
target (:class:`numpy.ndarray` of type :class:`numpy.uint8`):
Target image.
"""
means, stds = self.get_mean_std(target)
self.target_means = means
self.target_stds = stds
def transform(self, img):
"""Transform an image.
Args:
img (:class:`numpy.ndarray` of type :class:`numpy.uint8`):
Input image.
Returns:
:class:`numpy.ndarray` of type :class:`numpy.float`:
Colour normalized RGB image.
"""
chan1, chan2, chan3 = self.lab_split(img)
means, stds = self.get_mean_std(img)
norm1 = (
(chan1 - means[0]) * (self.target_stds[0] / stds[0])
) + self.target_means[0]
norm2 = (
(chan2 - means[1]) * (self.target_stds[1] / stds[1])
) + self.target_means[1]
norm3 = (
(chan3 - means[2]) * (self.target_stds[2] / stds[2])
) + self.target_means[2]
return self.merge_back(norm1, norm2, norm3)
@staticmethod
def lab_split(img):
"""Convert from RGB uint8 to LAB and split into channels.
Args:
img (:class:`numpy.ndarray` of type :class:`numpy.uint8`):
Input image.
Returns:
tuple:
- :py:obj:`float`:
L channel in LAB colour space.
- :py:obj:`float`:
A channel in LAB colour space.
- :py:obj:`float`:
B channel in LAB colour space.
"""
img = img.astype("uint8") # ensure input image is uint8
img = cv2.cvtColor(img, cv2.COLOR_RGB2LAB)
img_float = img.astype(np.float32)
chan1, chan2, chan3 = cv2.split(img_float)
chan1 /= 2.55 # should now be in range [0,100]
chan2 -= 128.0 # should now be in range [-127,127]
chan3 -= 128.0 # should now be in range [-127,127]
return chan1, chan2, chan3
@staticmethod
def merge_back(chan1, chan2, chan3):
"""Take separate LAB channels and merge back to give RGB uint8.
Args:
chan1 (float):
L channel.
chan2 (float):
A channel.
chan3 (float):
B channel.
Returns:
:class:`numpy.ndarray`:
Merged image.
"""
chan1 *= 2.55 # should now be in range [0,255]
chan2 += 128.0 # should now be in range [0,255]
chan3 += 128.0 # should now be in range [0,255]
img = np.clip(cv2.merge((chan1, chan2, chan3)), 0, 255).astype(np.uint8)
return cv2.cvtColor(img, cv2.COLOR_LAB2RGB)
def get_mean_std(self, img):
"""Get mean and standard deviation of each channel.
Args:
img (:class:`numpy.ndarray` of type :class:`numpy.uint8`):
Input image.
Returns:
tuple:
- :py:obj:`float` - Means:
Mean values for each RGB channel.
- :py:obj:`float` - Standard deviations:
Standard deviation for each RGB channel.
"""
img = img.astype("uint8") # ensure input image is uint8
chan1, chan2, chan3 = self.lab_split(img)
m1, sd1 = cv2.meanStdDev(chan1)
m2, sd2 = cv2.meanStdDev(chan2)
m3, sd3 = cv2.meanStdDev(chan3)
means = m1, m2, m3
stds = sd1, sd2, sd3
return means, stds
def get_normalizer(method_name, stain_matrix=None):
"""Return a :class:`.StainNormalizer` with corresponding name.
Args:
method_name (str):
Name of stain norm method, must be one of "reinhard",
"custom", "ruifrok", "macenko" or "vahadane".
stain_matrix (:class:`numpy.ndarray` or str or pathlib.Path):
User-defined stain matrix. This must either be a numpy array
or a path to either a .csv or .npy file. This is only
utilised if using "custom" method name.
Returns:
StainNormalizer:
An object with base :class:'.StainNormalizer' as base class.
Examples:
>>> from tiatoolbox.tools.stainnorm import get_normalizer
>>> norm = get_normalizer('Reinhard')
>>> norm.fit(target_img)
>>> norm_img = norm.transform(source_img)
"""
if method_name.lower() not in [
"reinhard",
"ruifrok",
"macenko",
"vahadane",
"custom",
]:
raise MethodNotSupported
if stain_matrix is not None and method_name.lower() != "custom":
raise ValueError(
'`stain_matrix` is only defined when using `method_name`="custom".'
)
if method_name.lower() == "reinhard":
return ReinhardNormalizer()
if method_name.lower() == "ruifrok":
return RuifrokNormalizer()
if method_name.lower() == "macenko":
return MacenkoNormalizer()
if method_name.lower() == "vahadane":
return VahadaneNormalizer()
return CustomNormalizer(load_stain_matrix(stain_matrix))
| 12,493 | 30.471033 | 87 | py |
tiatoolbox | tiatoolbox-master/tiatoolbox/tools/registration/wsi_registration.py | import itertools
from numbers import Number
from typing import Dict, Tuple, Union
import cv2
import numpy as np
import SimpleITK as sitk # noqa: N813
import torch
import torchvision
from numpy.linalg import inv
from skimage import exposure, filters
from skimage.registration import phase_cross_correlation
from skimage.util import img_as_float
from tiatoolbox import logger
from tiatoolbox.tools.patchextraction import PatchExtractor
from tiatoolbox.utils.metrics import dice
from tiatoolbox.utils.transforms import imresize
from tiatoolbox.wsicore.wsireader import VirtualWSIReader, WSIReader
Resolution = Union[Number, Tuple[Number, Number], np.ndarray]
IntBounds = Tuple[int, int, int, int]
def _check_dims(
fixed_img: np.ndarray,
moving_img: np.ndarray,
fixed_mask: np.ndarray,
moving_mask: np.ndarray,
) -> Tuple[np.ndarray, np.ndarray]:
"""Check the dimensionality of images and mask.
This function verify the dimensionality of images and their corresponding masks.
If the input images are RGB images, it converts them to grayscale images.
Args:
fixed_img (:class:`numpy.ndarray`):
A fixed image.
moving_img (:class:`numpy.ndarray`):
A moving image.
fixed_mask (:class:`numpy.ndarray`):
A binary tissue mask for the fixed image.
moving_mask (:class:`numpy.ndarray`):
A binary tissue mask for the moving image.
Returns:
tuple:
- :class:`numpy.ndarray` - A grayscale fixed image.
- :class:`numpy.ndarray` - A grayscale moving image.
"""
if len(np.unique(fixed_mask)) == 1 or len(np.unique(moving_mask)) == 1:
raise ValueError("The foreground is missing in the mask.")
if (
fixed_img.shape[:2] != fixed_mask.shape
or moving_img.shape[:2] != moving_mask.shape
):
raise ValueError("Mismatch of shape between image and its corresponding mask.")
if len(fixed_img.shape) == 3:
fixed_img = cv2.cvtColor(fixed_img, cv2.COLOR_BGR2GRAY)
if len(moving_img.shape) == 3:
moving_img = cv2.cvtColor(moving_img, cv2.COLOR_BGR2GRAY)
return fixed_img, moving_img
def compute_center_of_mass(mask: np.ndarray) -> tuple:
"""Compute center of mass.
Args:
mask: (:class:`numpy.ndarray`):
A binary mask.
Returns:
:py:obj:`tuple` - x- and y- coordinates representing center of mass.
- :py:obj:`int` - X coordinate.
- :py:obj:`int` - Y coordinate.
"""
moments = cv2.moments(mask)
x_coord_center = moments["m10"] / moments["m00"]
y_coord_center = moments["m01"] / moments["m00"]
return (x_coord_center, y_coord_center)
def apply_affine_transformation(fixed_img, moving_img, transform_initializer):
"""Apply affine transformation using OpenCV.
Args:
fixed_img (:class:`numpy.ndarray`):
A fixed image.
moving_img (:class:`numpy.ndarray`):
A moving image.
transform_initializer (:class:`numpy.ndarray`):
A rigid transformation matrix.
Returns:
:class:`numpy.ndarray`:
A transformed image.
Examples:
>>> moving_image = apply_affine_transformation(
... fixed_image, moving_image, transform_initializer
... )
"""
return cv2.warpAffine(
moving_img, transform_initializer[0:-1][:], fixed_img.shape[:2][::-1]
)
def prealignment(
fixed_img: np.ndarray,
moving_img: np.ndarray,
fixed_mask: np.ndarray,
moving_mask: np.ndarray,
dice_overlap: float = 0.5,
rotation_step: int = 10,
) -> Tuple[np.ndarray, np.ndarray, np.ndarray, float]:
"""Coarse registration of an image pair.
This function performs initial alignment of a moving image with respect to a
fixed image. This can be used as a prealignment step before final refinement.
Args:
fixed_img (:class:`numpy.ndarray`):
A fixed image.
moving_img (:class:`numpy.ndarray`):
A moving image.
fixed_mask (:class:`numpy.ndarray`):
A binary tissue mask for the fixed image.
moving_mask (:class:`numpy.ndarray`):
A binary tissue mask for the moving image.
dice_overlap (float):
Dice ratio used for the selection of the best
transformation matrix.
rotation_step (int):
Rotation_step defines an increment in the rotation angles.
Returns:
tuple:
- :class:`numpy.ndarray` - A rigid transform matrix.
- :class:`numpy.ndarray` - Transformed moving image.
- :class:`numpy.ndarray` - Transformed moving mask.
- :py:obj:`float` - Dice overlap.
Examples:
>>> from tiatoolbox.tools.registration.wsi_registration import prealignment
>>> transform, transformed_image, transformed_mask, dice_overlap = prealignment(
... fixed_thumbnail, moving_thumbnail, fixed_mask, moving_mask
... )
"""
orig_fixed_img, orig_moving_img = fixed_img, moving_img
if len(fixed_mask.shape) != 2:
fixed_mask = fixed_mask[:, :, 0]
if len(moving_mask.shape) != 2:
moving_mask = moving_mask[:, :, 0]
fixed_mask = np.uint8(fixed_mask > 0)
moving_mask = np.uint8(moving_mask > 0)
fixed_img = np.squeeze(fixed_img)
moving_img = np.squeeze(moving_img)
fixed_img, moving_img = _check_dims(fixed_img, moving_img, fixed_mask, moving_mask)
if rotation_step < 10 or rotation_step > 20:
raise ValueError("Please select the rotation step in between 10 and 20.")
if dice_overlap < 0 or dice_overlap > 1:
raise ValueError("The dice_overlap should be in between 0 and 1.0.")
fixed_img = exposure.rescale_intensity(img_as_float(fixed_img), in_range=(0, 1))
moving_img = exposure.rescale_intensity(img_as_float(moving_img), in_range=(0, 1))
# Resizing of fixed and moving masks so that dice can be computed
height = np.max((fixed_mask.shape[0], moving_mask.shape[0]))
width = np.max((fixed_mask.shape[1], moving_mask.shape[1]))
padded_fixed_mask = np.pad(
fixed_mask,
pad_width=[(0, height - fixed_mask.shape[0]), (0, width - fixed_mask.shape[1])],
mode="constant",
)
padded_moving_mask = np.pad(
moving_mask,
pad_width=[
(0, height - moving_mask.shape[0]),
(0, width - moving_mask.shape[1]),
],
mode="constant",
)
dice_before = dice(padded_fixed_mask, padded_moving_mask)
fixed_com = compute_center_of_mass((1 - fixed_img) * fixed_mask)
moving_com = compute_center_of_mass((1 - moving_img) * moving_mask)
com_transform = np.array(
[
[1, 0, fixed_com[0] - moving_com[0]],
[0, 1, fixed_com[1] - moving_com[1]],
[0, 0, 1],
]
)
origin_transform_com_ = [[1, 0, -fixed_com[0]], [0, 1, -fixed_com[1]], [0, 0, 1]]
origin_transform_com = [[1, 0, fixed_com[0]], [0, 1, fixed_com[1]], [0, 0, 1]]
all_dice = []
all_transform = []
for angle in np.arange(0, 360, rotation_step).tolist():
theta = np.radians(angle)
c, s = np.cos(theta), np.sin(theta)
rotation_matrix = np.array(((c, -s, 0), (s, c, 0), (0, 0, 1)))
transform = np.matmul(
np.matmul(
np.matmul(origin_transform_com, rotation_matrix), origin_transform_com_
),
com_transform,
)
# Apply transformation
warped_moving_mask = cv2.warpAffine(
moving_mask, transform[0:-1][:], fixed_img.shape[:2][::-1]
)
dice_com = dice(fixed_mask, warped_moving_mask)
all_dice.append(dice_com)
all_transform.append(transform)
if max(all_dice) >= dice_overlap:
dice_after = max(all_dice)
pre_transform = all_transform[all_dice.index(dice_after)]
# Apply transformation to both image and mask
moving_img = apply_affine_transformation(
orig_fixed_img, orig_moving_img, pre_transform
)
moving_mask = apply_affine_transformation(fixed_img, moving_mask, pre_transform)
return pre_transform, moving_img, moving_mask, dice_after
logger.warning(
"Not able to find the best transformation for pre-alignment. "
"Try changing the values for 'dice_overlap' and 'rotation_step'.",
)
return np.eye(3), moving_img, moving_mask, dice_before
def match_histograms(
image_a: np.ndarray, image_b: np.ndarray, kernel_size: int = 7
) -> Tuple[np.ndarray, np.ndarray]:
"""Image normalization function.
This function performs histogram equalization to unify the
appearance of an image pair.
Args:
image_a (:class:`numpy.ndarray`):
A grayscale image.
image_b (:class:`numpy.ndarray`):
A grayscale image.
kernel_size (int):
The size of the ellipse-shaped footprint.
Returns:
tuple:
A normalized pair of images for performing registration.
- :class:`numpy.ndarray` - A normalized grayscale image.
- :class:`numpy.ndarray` - A normalized grayscale image.
Examples:
>>> from tiatoolbox.tools.registration.wsi_registration import match_histograms
>>> norm_image_a, norm_image_b = match_histograms(gray_image_a, gray_image_b)
"""
image_a, image_b = np.squeeze(image_a), np.squeeze(image_b)
if len(image_a.shape) == 3 or len(image_b.shape) == 3:
raise ValueError("The input images should be grayscale images.")
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (kernel_size, kernel_size))
entropy_a, entropy_b = filters.rank.entropy(image_a, kernel), filters.rank.entropy(
image_b, kernel
)
if np.mean(entropy_a) > np.mean(entropy_b):
image_b = exposure.match_histograms(image_b, image_a).astype(np.uint8)
else:
image_a = exposure.match_histograms(image_a, image_b).astype(np.uint8)
return image_a, image_b
class DFBRFeatureExtractor(torch.nn.Module):
"""Feature extractor for Deep Feature based Registration (DFBR).
This class extracts features from three different layers of VGG16.
These features are processed in DFBRegister class for registration
of a pair of images.
"""
def __init__(self):
super().__init__()
output_layers_id: list[str] = ["16", "23", "30"]
output_layers_key: list[str] = ["block3_pool", "block4_pool", "block5_pool"]
self.features: dict = dict.fromkeys(output_layers_key, None)
self.pretrained: torch.nn.Sequential = torchvision.models.vgg16(
pretrained=True
).features
self.f_hooks = []
for i, l in enumerate(output_layers_id):
self.f_hooks.append(
getattr(self.pretrained, l).register_forward_hook(
self.forward_hook(output_layers_key[i])
)
)
def forward_hook(self, layer_name: str) -> None:
"""Register a hook.
Args:
layer_name (str):
User-defined name for a layer.
Returns:
None
"""
def hook(
_module: torch.nn.MaxPool2d,
_module_input: Tuple[torch.Tensor],
module_output: torch.Tensor,
) -> None:
"""Forward hook for feature extraction.
Args:
_module:
Unused argument for the module.
_module_input:
Unused argument for the modules' input.
module_output (torch.Tensor):
Output (features) of the module.
Returns:
None
"""
self.features[layer_name] = module_output
return hook
def forward(self, x: torch.Tensor) -> Dict[str, torch.Tensor]:
"""Forward pass for feature extraction.
Args:
x (torch.Tensor):
Batch of input images.
Returns:
dict:
A dictionary containing the multiscale features.
The expected format is {layer_name: features}.
"""
_ = self.pretrained(x)
return self.features
class DFBRegister:
r"""Deep Feature based Registration (DFBR).
This class implements a CNN feature based method for
registering a pair of histology images, as presented
in the paper [1]. This work is adapted from [2].
References:
[1] Awan, R., Raza, S.E.A., Lotz, J. and Rajpoot, N.M., 2022.
`Deep Feature based Cross-slide Registration
<https://arxiv.org/pdf/2202.09971.pdf>`_. arXiv preprint
arXiv:2202.09971.
[2] Yang, Z., Dan, T. and Yang, Y., 2018. Multi-temporal remote
sensing image registration using deep convolutional features.
Ieee Access, 6, pp.38544-38555.
Examples:
>>> from tiatoolbox.tools.registration.wsi_registration import DFBRegister
>>> import cv2
>>> df = DFBRegister()
>>> fixed_image = np.repeat(np.expand_dims(fixed_gray, axis=2), 3, axis=2)
>>> moving_image = np.repeat(np.expand_dims(moving_gray, axis=2), 3, axis=2)
>>> transform = df.register(fixed_image, moving_image, fixed_mask, moving_mask)
>>> registered = cv2.warpAffine(
... moving_gray, transform[0:-1], fixed_gray.shape[:2][::-1]
... )
"""
def __init__(self, patch_size: Tuple[int, int] = (224, 224)):
self.patch_size = patch_size
self.x_scale, self.y_scale = [], []
self.feature_extractor = DFBRFeatureExtractor()
# Make this function private when full pipeline is implemented.
def extract_features(
self, fixed_img: np.ndarray, moving_img: np.ndarray
) -> Dict[str, torch.Tensor]:
"""CNN based feature extraction for registration.
This function extracts multiscale features from a pre-trained
VGG-16 model for an image pair.
Args:
fixed_img (:class:`numpy.ndarray`):
A fixed image.
moving_img (:class:`numpy.ndarray`):
A moving image.
Returns:
Dict:
A dictionary containing the multiscale features.
The expected format is {layer_name: features}.
"""
self.x_scale = 1.0 * np.array(fixed_img.shape[:2]) / self.patch_size
self.y_scale = 1.0 * np.array(moving_img.shape[:2]) / self.patch_size
fixed_cnn = imresize(
fixed_img, output_size=self.patch_size, interpolation="linear"
)
moving_cnn = imresize(
moving_img, output_size=self.patch_size, interpolation="linear"
)
fixed_cnn = fixed_cnn / 255.0
moving_cnn = moving_cnn / 255.0
fixed_cnn = np.moveaxis(fixed_cnn, -1, 0)
moving_cnn = np.moveaxis(moving_cnn, -1, 0)
fixed_cnn = np.expand_dims(fixed_cnn, axis=0)
moving_cnn = np.expand_dims(moving_cnn, axis=0)
cnn_input = np.concatenate((fixed_cnn, moving_cnn), axis=0)
x = torch.from_numpy(cnn_input).type(torch.float32)
return self.feature_extractor(x)
@staticmethod
def finding_match(feature_dist: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
"""Computes matching points.
This function computes all the possible matching points
between fixed and moving images.
Args:
feature_dist (:class:`numpy.ndarray`):
A feature distance array.
Returns:
tuple:
- :class:`numpy.ndarray` - An array of matching points.
- :class:`numpy.ndarray` - An array of floating numbers representing
quality of each matching point.
"""
seq = np.arange(feature_dist.shape[0])
ind_first_min = np.argmin(feature_dist, axis=1)
first_min = feature_dist[seq, ind_first_min]
mask = np.zeros_like(feature_dist)
mask[seq, ind_first_min] = 1
masked = np.ma.masked_array(feature_dist, mask)
second_min = np.amin(masked, axis=1)
return np.array([seq, ind_first_min]).transpose(), np.array(
second_min / first_min
)
@staticmethod
def compute_feature_distances(
features_x: np.ndarray, features_y: np.ndarray, factor: int
) -> np.ndarray:
"""Compute feature distance.
This function computes Euclidean distance between features of
fixed and moving images.
Args:
features_x (:class:`numpy.ndarray`):
Features computed for a fixed image.
features_y (:class:`numpy.ndarray`):
Features computed for a moving image.
factor (int):
A number multiplied by the feature size
for getting the referenced feature size.
Returns:
:class:`numpy.ndarray`:
A feature distance array.
"""
feature_distance = np.linalg.norm(
np.repeat(np.expand_dims(features_x, axis=0), features_y.shape[0], axis=0)
- np.repeat(
np.expand_dims(features_y, axis=1), features_x.shape[0], axis=1
),
axis=len(features_x.shape),
)
feature_size_2d = np.int_(np.sqrt(feature_distance.shape[0]))
ref_feature_size_2d = factor * feature_size_2d
feature_size, ref_feature_size = feature_size_2d**2, ref_feature_size_2d**2
feature_grid = np.kron(
np.arange(feature_size).reshape([feature_size_2d, feature_size_2d]),
np.ones([factor, factor], dtype="int32"),
)
row_ind = np.repeat(
feature_grid.reshape([ref_feature_size, 1]), ref_feature_size, axis=1
)
col_ind = np.repeat(
feature_grid.reshape([1, ref_feature_size]), ref_feature_size, axis=0
)
return feature_distance[row_ind, col_ind]
def feature_mapping(
self, features: Dict[str, torch.Tensor], num_matching_points: int = 128
) -> Tuple[np.ndarray, np.ndarray, np.ndarray]:
"""Find mapping between CNN features.
This function maps features of a fixed image to that of
a moving image on the basis of Euclidean distance between
them.
Args:
features (Dict):
Multiscale CNN features.
num_matching_points (int):
Number of required matching points.
Returns:
tuple:
Parameters for estimating transformation parameters.
- :class:`numpy.ndarray` - A matching 2D point set in the fixed image.
- :class:`numpy.ndarray` - A matching 2D point set in the moving image.
- :class:`numpy.ndarray` - A 1D array, where each element represents
quality of each matching point.
"""
if len(features) != 3:
raise ValueError("The feature mapping step expects 3 blocks of features.")
pool3_feat = features["block3_pool"].detach().numpy()
pool4_feat = features["block4_pool"].detach().numpy()
pool5_feat = features["block5_pool"].detach().numpy()
ref_feature_size = pool3_feat.shape[2]
fixed_feat1 = np.reshape(pool3_feat[0, :, :, :], [-1, 256])
moving_feat1 = np.reshape(pool3_feat[1, :, :, :], [-1, 256])
fixed_feat2 = np.reshape(pool4_feat[0, :, :, :], [-1, 512])
moving_feat2 = np.reshape(pool4_feat[1, :, :, :], [-1, 512])
fixed_feat3 = np.reshape(pool5_feat[0, :, :, :], [-1, 512])
moving_feat3 = np.reshape(pool5_feat[1, :, :, :], [-1, 512])
fixed_feat1 = fixed_feat1 / np.std(fixed_feat1)
moving_feat1 = moving_feat1 / np.std(moving_feat1)
fixed_feat2 = fixed_feat2 / np.std(fixed_feat2)
moving_feat2 = moving_feat2 / np.std(moving_feat2)
fixed_feat3 = fixed_feat3 / np.std(fixed_feat3)
moving_feat3 = moving_feat3 / np.std(moving_feat3)
feature_dist1 = self.compute_feature_distances(fixed_feat1, moving_feat1, 1)
feature_dist2 = self.compute_feature_distances(fixed_feat2, moving_feat2, 2)
feature_dist3 = self.compute_feature_distances(fixed_feat3, moving_feat3, 4)
feature_dist = np.sqrt(2) * feature_dist1 + feature_dist2 + feature_dist3
seq = np.array(
[[i, j] for i in range(ref_feature_size) for j in range(ref_feature_size)],
dtype="int32",
)
fixed_points = np.array(seq, dtype="float32") * 8.0 + 4.0
moving_points = np.array(seq, dtype="float32") * 8.0 + 4.0
fixed_points = (fixed_points - 112.0) / 224.0
moving_points = (moving_points - 112.0) / 224.0
matching_points, quality = self.finding_match(feature_dist)
max_quality = np.max(quality)
while np.where(quality >= max_quality)[0].shape[0] <= num_matching_points:
max_quality -= 0.01
matching_points = matching_points[np.where(quality >= max_quality)]
count_matching_points = matching_points.shape[0]
fixed_points, moving_points = (
fixed_points[matching_points[:, 1]],
moving_points[matching_points[:, 0]],
)
feature_dist = feature_dist[
np.repeat(
np.reshape(matching_points[:, 1], [count_matching_points, 1]),
count_matching_points,
axis=1,
),
np.repeat(
np.reshape(matching_points[:, 0], [1, count_matching_points]),
count_matching_points,
axis=0,
),
]
fixed_points = ((fixed_points * 224.0) + 112.0) * self.x_scale
moving_points = ((moving_points * 224.0) + 112.0) * self.y_scale
fixed_points, moving_points = fixed_points[:, [1, 0]], moving_points[:, [1, 0]]
return fixed_points, moving_points, np.amin(feature_dist, axis=1)
@staticmethod
def estimate_affine_transform(
points_0: np.ndarray, points_1: np.ndarray
) -> np.ndarray:
"""Compute affine transformation matrix.
This function estimates transformation parameters
using linear least squares for a given set of matched
points.
Args:
points_0 (:class:`numpy.ndarray`):
An Nx2 array of points in a fixed image.
points_1 (:class:`numpy.ndarray`):
An Nx2 array of points in a moving image.
Returns:
:class:`numpy.ndarray`:
A 3x3 transformation matrix.
"""
num_points = min(len(points_0), len(points_1))
x = np.hstack([points_0[:num_points], np.ones((num_points, 1))])
y = np.hstack([points_1[:num_points], np.ones((num_points, 1))])
matrix = np.linalg.lstsq(x, y, rcond=-1)[0].T
matrix[-1, :] = [0, 0, 1]
return matrix
@staticmethod
def get_tissue_regions(
fixed_image: np.ndarray,
fixed_mask: np.ndarray,
moving_image: np.ndarray,
moving_mask: np.ndarray,
) -> Tuple[np.array, np.array, np.array, np.array, IntBounds]:
"""Extract tissue region.
This function uses binary mask for extracting tissue
region from the image.
Args:
fixed_image (:class:`numpy.ndarray`):
A fixed image.
fixed_mask (:class:`numpy.ndarray`):
A binary tissue mask for the fixed image.
moving_image (:class:`numpy.ndarray`):
A moving image.
moving_mask (:class:`numpy.ndarray`):
A binary tissue mask for the moving image.
Returns:
tuple:
- :class:`numpy.ndarray` - A cropped image containing tissue region
from fixed image.
- :class:`numpy.ndarray` - A cropped image containing tissue mask
from fixed image.
- :class:`numpy.ndarray` - A cropped image containing tissue region
from moving image.
- :class:`numpy.ndarray` - A cropped image containing tissue mask
from moving image.
- :py:obj:`tuple` - Bounds of the tissue region.
- :py:obj:`int` - Top (start y value)
- :py:obj:`int` - Left (start x value)
- :py:obj:`int` - Bottom (end y value)
- :py:obj:`int` - Right (end x value)
"""
fixed_minc, fixed_min_r, width, height = cv2.boundingRect(fixed_mask)
fixed_max_c, fixed_max_r = fixed_minc + width, fixed_min_r + height
moving_minc, moving_min_r, width, height = cv2.boundingRect(moving_mask)
moving_max_c, moving_max_r = moving_minc + width, moving_min_r + height
minc, max_c, min_r, max_r = (
np.min([fixed_minc, moving_minc]),
np.max([fixed_max_c, moving_max_c]),
np.min([fixed_min_r, moving_min_r]),
np.max([fixed_max_r, moving_max_r]),
)
fixed_tissue_image = fixed_image[min_r:max_r, minc:max_c]
fixed_tissue_mask = fixed_mask[min_r:max_r, minc:max_c]
moving_tissue_image = moving_image[min_r:max_r, minc:max_c]
moving_tissue_mask = moving_mask[min_r:max_r, minc:max_c]
moving_tissue_image[np.all(moving_tissue_image == (0, 0, 0), axis=-1)] = (
243,
243,
243,
)
return (
fixed_tissue_image,
fixed_tissue_mask,
moving_tissue_image,
moving_tissue_mask,
(min_r, minc, max_r, max_c),
)
@staticmethod
def find_points_inside_boundary(mask: np.ndarray, points: np.ndarray) -> np.ndarray:
"""Find indices of points lying inside the boundary.
This function returns indices of points which are
enclosed by an area indicated by a binary mask.
Args:
mask (:class:`numpy.ndarray`):
A binary tissue mask
points (:class:`numpy.ndarray`):
(N, 2) array of point coordinates.
Returns:
:class:`numpy.ndarray`:
Indices of points enclosed by a boundary.
"""
kernel = np.ones((25, 25), np.uint8)
mask = cv2.dilate(mask, kernel, iterations=1)
mask_reader = VirtualWSIReader(mask)
# convert coordinates of shape [N, 2] to [N, 4]
end_x_y = points[:, 0:2] + 1
bbox_coord = np.c_[points, end_x_y].astype(int)
return PatchExtractor.filter_coordinates(
mask_reader, bbox_coord, mask.shape[::-1]
)
def filtering_matching_points(
self,
fixed_mask: np.ndarray,
moving_mask: np.ndarray,
fixed_matched_points: np.ndarray,
moving_matched_points: np.ndarray,
quality: np.ndarray,
) -> Tuple[np.ndarray, np.ndarray, np.ndarray]:
"""Filter the matching points.
This function removes the duplicated points and the points
which are identified outside the tissue region.
Args:
fixed_mask (:class:`numpy.ndarray`):
A binary tissue mask for the fixed image.
moving_mask (:class:`numpy.ndarray`):
A binary tissue mask for the moving image.
fixed_matched_points (:class:`numpy.ndarray`):
(N, 2) array of coordinates.
moving_matched_points (:class:`numpy.ndarray`):
(N, 2) array of coordinates.
quality (:class:`numpy.ndarray`):
An array representing quality of each matching point.
Returns:
tuple:
- np.ndarray - Filtered matching points for a fixed image.
- np.ndarray - Filtered matching points for a moving image.
- np.ndarray - Quality of matching points.
"""
included_index = self.find_points_inside_boundary(
fixed_mask, fixed_matched_points
)
fixed_matched_points, moving_matched_points, quality = (
fixed_matched_points[included_index, :],
moving_matched_points[included_index, :],
quality[included_index],
)
included_index = self.find_points_inside_boundary(
moving_mask, moving_matched_points
)
fixed_matched_points, moving_matched_points, quality = (
fixed_matched_points[included_index, :],
moving_matched_points[included_index, :],
quality[included_index],
)
# remove duplicate matching points
duplicate_ind = []
unq, count = np.unique(fixed_matched_points, axis=0, return_counts=True)
repeated_points = unq[count > 1]
for repeated_point in repeated_points:
repeated_idx = np.argwhere(
np.all(fixed_matched_points == repeated_point, axis=1)
)
duplicate_ind = np.hstack([duplicate_ind, repeated_idx.ravel()])
unq, count = np.unique(moving_matched_points, axis=0, return_counts=True)
repeated_points = unq[count > 1]
for repeated_point in repeated_points:
repeated_idx = np.argwhere(
np.all(moving_matched_points == repeated_point, axis=1)
)
duplicate_ind = np.hstack([duplicate_ind, repeated_idx.ravel()])
if len(duplicate_ind) > 0:
duplicate_ind = duplicate_ind.astype(int)
fixed_matched_points = np.delete(
fixed_matched_points, duplicate_ind, axis=0
)
moving_matched_points = np.delete(
moving_matched_points, duplicate_ind, axis=0
)
quality = np.delete(quality, duplicate_ind)
return fixed_matched_points, moving_matched_points, quality
def perform_dfbregister(
self,
fixed_img: np.ndarray,
moving_img: np.ndarray,
fixed_mask: np.ndarray,
moving_mask: np.ndarray,
) -> Tuple[np.ndarray, np.ndarray, np.ndarray]:
"""Perform DFBR to align a pair of image.
This function aligns a pair of images using Deep
Feature based Registration (DFBR) method.
Args:
fixed_img (:class:`numpy.ndarray`):
A fixed image.
moving_img (:class:`numpy.ndarray`):
A moving image.
fixed_mask (:class:`numpy.ndarray`):
A binary tissue mask for the fixed image.
moving_mask (:class:`numpy.ndarray`):
A binary tissue mask for the moving image.
Returns:
tuple:
- :class:`numpy.ndarray` - An affine transformation matrix.
- :class:`numpy.ndarray` - A transformed moving image.
- :class:`numpy.ndarray` - A transformed moving mask.
"""
features = self.extract_features(fixed_img, moving_img)
fixed_matched_points, moving_matched_points, quality = self.feature_mapping(
features
)
(
fixed_matched_points,
moving_matched_points,
quality,
) = self.filtering_matching_points(
fixed_mask,
moving_mask,
fixed_matched_points,
moving_matched_points,
quality,
)
tissue_transform = DFBRegister.estimate_affine_transform(
fixed_matched_points, moving_matched_points
)
# Apply transformation
moving_img = apply_affine_transformation(
fixed_img, moving_img, tissue_transform
)
moving_mask = apply_affine_transformation(
fixed_img, moving_mask, tissue_transform
)
return tissue_transform, moving_img, moving_mask
def perform_dfbregister_block_wise(
self,
fixed_img: np.ndarray,
moving_img: np.ndarray,
fixed_mask: np.ndarray,
moving_mask: np.ndarray,
) -> Tuple[np.ndarray, np.ndarray, np.ndarray]:
"""Perform DFBR to align a pair of images in a block wise manner.
This function divides the images into four equal parts and then
perform feature matching for each part from the tissue and moving
images. Matching features of all the parts are then concatenated
for the estimation of affine transform.
Args:
fixed_img (:class:`numpy.ndarray`):
A fixed image.
moving_img (:class:`numpy.ndarray`):
A moving image.
fixed_mask (:class:`numpy.ndarray`):
A binary tissue mask for the fixed image.
moving_mask (:class:`numpy.ndarray`):
A binary tissue mask for the moving image.
Returns:
tuple:
- :class:`numpy.ndarray` - An affine transformation matrix.
- :class:`numpy.ndarray` - A transformed moving image.
- :class:`numpy.ndarray` - A transformed moving mask.
"""
left_upper_bounding_bbox = [
0,
int(np.floor(fixed_img.shape[0] / 2)),
0,
int(np.floor(fixed_img.shape[1] / 2)),
]
right_upper_bounding_bbox = [
0,
int(np.floor(fixed_img.shape[0] / 2)),
int(np.ceil(fixed_img.shape[1] / 2)),
fixed_img.shape[1],
]
left_lower_bounding_bbox = [
int(np.ceil(fixed_img.shape[0] / 2)),
fixed_img.shape[0],
0,
int(np.floor(fixed_img.shape[1] / 2)),
]
right_lower_bounding_bbox = [
int(np.ceil(fixed_img.shape[0] / 2)),
fixed_img.shape[0],
int(np.ceil(fixed_img.shape[1] / 2)),
fixed_img.shape[1],
]
blocks_bounding_box = [
left_upper_bounding_bbox,
right_upper_bounding_bbox,
left_lower_bounding_bbox,
right_lower_bounding_bbox,
]
fixed_matched_points, moving_matched_points, quality = [], [], []
for _index, bounding_box in enumerate(blocks_bounding_box):
fixed_block = fixed_img[
bounding_box[0] : bounding_box[1], bounding_box[2] : bounding_box[3], :
]
moving_block = moving_img[
bounding_box[0] : bounding_box[1], bounding_box[2] : bounding_box[3], :
]
features = self.extract_features(fixed_block, moving_block)
(
fixed_block_matched_points,
moving_block_matched_points,
block_quality,
) = self.feature_mapping(features)
fixed_matched_points.append(
fixed_block_matched_points + [bounding_box[2], bounding_box[0]]
)
moving_matched_points.append(
moving_block_matched_points + [bounding_box[2], bounding_box[0]]
)
quality.append(block_quality)
fixed_matched_points, moving_matched_points, quality = (
np.concatenate(fixed_matched_points),
np.concatenate(moving_matched_points),
np.concatenate(quality),
)
(
fixed_matched_points,
moving_matched_points,
_,
) = self.filtering_matching_points(
fixed_mask,
moving_mask,
fixed_matched_points,
moving_matched_points,
quality,
)
block_transform = DFBRegister.estimate_affine_transform(
fixed_matched_points, moving_matched_points
)
# Apply transformation
moving_img = apply_affine_transformation(fixed_img, moving_img, block_transform)
moving_mask = apply_affine_transformation(
fixed_img, moving_mask, block_transform
)
return block_transform, moving_img, moving_mask
def register(
self,
fixed_img: np.ndarray,
moving_img: np.ndarray,
fixed_mask: np.ndarray,
moving_mask: np.ndarray,
transform_initializer: np.ndarray = None,
) -> np.ndarray:
"""Perform whole slide image registration.
This function aligns a pair of images using Deep
Feature based Registration (DFBR) method.
Args:
fixed_img (:class:`numpy.ndarray`):
A fixed image.
moving_img (:class:`numpy.ndarray`):
A moving image.
fixed_mask (:class:`numpy.ndarray`):
A binary tissue mask for the fixed image.
moving_mask (:class:`numpy.ndarray`):
A binary tissue mask for the moving image.
transform_initializer (:class:`numpy.ndarray`):
A rigid transformation matrix.
Returns:
:class:`numpy.ndarray`:
An affine transformation matrix.
"""
if len(fixed_img.shape) != 3 or len(moving_img.shape) != 3:
raise ValueError(
"The required shape for fixed and moving images is n x m x 3."
)
if fixed_img.shape[2] != 3 or moving_img.shape[2] != 3:
raise ValueError("The input images are expected to have 3 channels.")
if len(fixed_mask.shape) > 2:
fixed_mask = fixed_mask[:, :, 0]
if len(moving_mask.shape) > 2:
moving_mask = moving_mask[:, :, 0]
fixed_mask = np.uint8(fixed_mask > 0)
moving_mask = np.uint8(moving_mask > 0)
# Perform Pre-alignment
if transform_initializer is None:
transform_initializer, moving_img, moving_mask, before_dice = prealignment(
fixed_img, moving_img, fixed_mask, moving_mask
)
else:
# Apply transformation to both image and mask
moving_img = apply_affine_transformation(
fixed_img, moving_img, transform_initializer
)
moving_mask = apply_affine_transformation(
fixed_img, moving_mask, transform_initializer
)
before_dice = dice(fixed_mask, moving_mask)
# Estimate transform using tissue regions
(
fixed_tissue_img,
fixed_tissue_mask,
moving_tissue_img,
moving_tissue_mask,
tissue_top_left_coord,
) = self.get_tissue_regions(fixed_img, fixed_mask, moving_img, moving_mask)
(
tissue_transform,
transform_tissue_img,
transform_tissue_mask,
) = self.perform_dfbregister(
fixed_tissue_img, moving_tissue_img, fixed_tissue_mask, moving_tissue_mask
)
# Use the estimated transform only if it improves DICE overlap
after_dice = dice(fixed_tissue_mask, transform_tissue_mask)
if after_dice > before_dice:
moving_tissue_img, moving_tissue_mask = (
transform_tissue_img,
transform_tissue_mask,
)
before_dice = after_dice
else:
tissue_transform = np.eye(3, 3)
# Perform transform using tissue regions in a block-wise manner
(
block_transform,
transform_tissue_img,
transform_tissue_mask,
) = self.perform_dfbregister_block_wise(
fixed_tissue_img, moving_tissue_img, fixed_tissue_mask, moving_tissue_mask
)
# Use the estimated tissue transform only if it improves DICE overlap
after_dice = dice(fixed_tissue_mask, transform_tissue_mask)
if after_dice > before_dice:
moving_tissue_img, moving_tissue_mask = (
transform_tissue_img,
transform_tissue_mask,
)
before_dice = after_dice
else:
block_transform = np.eye(3, 3)
# Fix translation offset
shift, _error, _diff_phase = phase_cross_correlation(
fixed_tissue_img, moving_tissue_img
)
translation_offset = np.array([[1, 0, shift[1]], [0, 1, shift[0]], [0, 0, 1]])
# Combining tissue and block transform
tissue_transform = translation_offset @ block_transform @ tissue_transform
# tissue_transform is computed for cropped images (tissue region only).
# It is converted using the tissue crop coordinates, so that it can be
# applied to the full image.
forward_translation = np.array(
[
[1, 0, -tissue_top_left_coord[1]],
[0, 1, -tissue_top_left_coord[0]],
[0, 0, 1],
]
)
inverse_translation = np.array(
[
[1, 0, tissue_top_left_coord[1]],
[0, 1, tissue_top_left_coord[0]],
[0, 0, 1],
]
)
image_transform = inverse_translation @ tissue_transform @ forward_translation
return image_transform @ transform_initializer
def estimate_bspline_transform(
fixed_image: np.ndarray,
moving_image: np.ndarray,
fixed_mask: np.ndarray,
moving_mask: np.ndarray,
**kwargs,
) -> sitk.BSplineTransform:
"""Estimate B-spline transformation.
This function performs registration using the `SimpleITK toolkit
<https://simpleitk.readthedocs.io/_/downloads/en/v1.2.4/pdf/>`_. We employed
a deformable registration using a multi-resolution B-spline approach. B-spline
registration uses B-spline curves to compute the deformation field mapping pixels
in a moving image to corresponding pixels in a fixed image.
Args:
fixed_image (:class:`numpy.ndarray`):
A fixed image.
moving_image (:class:`numpy.ndarray`):
A moving image.
fixed_mask (:class:`numpy.ndarray`):
A binary tissue mask for the fixed image.
moving_mask (:class:`numpy.ndarray`):
A binary tissue mask for the moving image.
**kwargs (dict):
Key-word arguments for B-spline parameters.
grid_space (float):
Grid_space (mm) to decide control points.
scale_factors (list):
Scaling factor of each B-spline per level in a multi-level setting.
shrink_factor (list):
Shrink factor per level to change the size and
complexity of the image.
smooth_sigmas (list):
Standard deviation for gaussian smoothing per level.
num_iterations (int):
Maximal number of iterations.
sampling_percent (float):
Fraction of image used for metric evaluation.
learning_rate (float):
Step size along traversal direction in parameter space.
convergence_min_value (float):
Value for checking convergence together with energy
profile of the similarity metric.
convergence_window_size (int):
Number of similarity metric values for estimating the
energy profile.
Returns:
2D deformation transformation represented by a grid of control points.
Examples:
>>> from tiatoolbox.tools.registration.wsi_registration import (
... estimate_bspline_transform, apply_bspline_transform
... )
>>> bspline_transform = estimate_bspline_transform(
... fixed_gray_thumbnail, moving_gray_thumbnail, fixed_mask, moving_mask,
... grid_space=50.0, sampling_percent=0.1,
... )
>>> bspline_registered_image = apply_bspline_transform(
... fixed_thumbnail, moving_thumbnail, bspline_transform
... )
"""
bspline_params = {
"grid_space": 50.0,
"scale_factors": [1, 2, 5],
"shrink_factor": [4, 2, 1],
"smooth_sigmas": [4, 2, 1],
"num_iterations": 100,
"sampling_percent": 0.2,
"learning_rate": 0.5,
"convergence_min_value": 1e-4,
"convergence_window_size": 5,
}
bspline_params.update(kwargs)
fixed_image, moving_image = np.squeeze(fixed_image), np.squeeze(moving_image)
if len(fixed_image.shape) > 3 or len(moving_image.shape) > 3:
raise ValueError("The input images can only be grayscale or RGB images.")
if (len(fixed_image.shape) == 3 and fixed_image.shape[2] != 3) or (
len(moving_image.shape) == 3 and moving_image.shape[2] != 3
):
raise ValueError("The input images can only have 3 channels.")
# Inverting intensity values
fixed_image_inv = np.invert(fixed_image)
moving_image_inv = np.invert(moving_image)
if len(fixed_mask.shape) > 2:
fixed_mask = fixed_mask[:, :, 0]
if len(moving_mask.shape) > 2:
moving_mask = moving_mask[:, :, 0]
fixed_mask = np.array(fixed_mask != 0, dtype=np.uint8)
moving_mask = np.array(moving_mask != 0, dtype=np.uint8)
# Background Removal
fixed_image_inv = cv2.bitwise_and(fixed_image_inv, fixed_image_inv, mask=fixed_mask)
moving_image_inv = cv2.bitwise_and(
moving_image_inv, moving_image_inv, mask=moving_mask
)
# Getting SimpleITK Images from numpy arrays
fixed_image_inv_sitk = sitk.GetImageFromArray(fixed_image_inv, isVector=True)
moving_image_inv_sitk = sitk.GetImageFromArray(moving_image_inv, isVector=True)
cast_filter = sitk.VectorIndexSelectionCastImageFilter()
cast_filter.SetOutputPixelType(sitk.sitkFloat32)
fixed_image_inv_sitk = cast_filter.Execute(fixed_image_inv_sitk)
moving_image_inv_sitk = cast_filter.Execute(moving_image_inv_sitk)
# Determine the number of B-spline control points using physical spacing
grid_physical_spacing = 2 * [
bspline_params["grid_space"]
] # A control point every grid_space (mm)
image_physical_size = [
size * spacing
for size, spacing in zip(
fixed_image_inv_sitk.GetSize(), fixed_image_inv_sitk.GetSpacing()
)
]
mesh_size = [
int(image_size / grid_spacing + 0.5)
for image_size, grid_spacing in zip(image_physical_size, grid_physical_spacing)
]
mesh_size = [int(sz / 4 + 0.5) for sz in mesh_size]
tx = sitk.BSplineTransformInitializer(
image1=fixed_image_inv_sitk, transformDomainMeshSize=mesh_size
)
print("Initial Number of B-spline Parameters:", tx.GetNumberOfParameters)
registration_method = sitk.ImageRegistrationMethod()
registration_method.SetInitialTransformAsBSpline(
tx, inPlace=True, scaleFactors=bspline_params["scale_factors"]
)
registration_method.SetMetricAsMattesMutualInformation(50)
registration_method.SetMetricSamplingStrategy(registration_method.RANDOM)
registration_method.SetMetricSamplingPercentage(
bspline_params["sampling_percent"], sitk.sitkWallClock
)
registration_method.SetShrinkFactorsPerLevel(bspline_params["shrink_factor"])
registration_method.SetSmoothingSigmasPerLevel(bspline_params["smooth_sigmas"])
registration_method.SetOptimizerAsGradientDescentLineSearch(
learningRate=bspline_params["learning_rate"],
numberOfIterations=bspline_params["num_iterations"],
convergenceMinimumValue=bspline_params["convergence_min_value"],
convergenceWindowSize=bspline_params["convergence_window_size"],
)
registration_method.SetInterpolator(sitk.sitkLinear)
return registration_method.Execute(fixed_image_inv_sitk, moving_image_inv_sitk)
def apply_bspline_transform(
fixed_image: np.ndarray, moving_image: np.ndarray, transform: sitk.BSplineTransform
) -> np.ndarray:
"""Apply the given B-spline transform to a moving image.
Args:
fixed_image (:class:`numpy.ndarray`):
A fixed image.
moving_image (:class:`numpy.ndarray`):
A moving image.
transform (sitk.BSplineTransform):
A B-spline transform.
Returns:
:class:`numpy.ndarray`:
A transformed moving image.
"""
fixed_image_sitk = sitk.GetImageFromArray(fixed_image, isVector=True)
moving_image_sitk = sitk.GetImageFromArray(moving_image, isVector=True)
resampler = sitk.ResampleImageFilter()
resampler.SetReferenceImage(fixed_image_sitk)
resampler.SetInterpolator(sitk.sitkLinear)
resampler.SetDefaultPixelValue(1)
resampler.SetTransform(transform)
sitk_registered_image_sitk = resampler.Execute(moving_image_sitk)
return sitk.GetArrayFromImage(sitk_registered_image_sitk)
class AffineWSITransformer:
"""Resampling regions from a whole slide image.
This class is used to resample tiles/patches from a whole slide image
using transformation.
Example:
>>> from tiatoolbox.tools.registration.wsi_registration import (
... AffineWSITransformer
... )
>>> from tiatoolbox.wsicore.wsireader import WSIReader
>>> wsi_reader = WSIReader.open(input_img=sample_ome_tiff)
>>> transform_level0 = np.eye(3)
>>> tfm = AffineWSITransformer(wsi_reader, transform_level0)
>>> output = tfm.read_rect(location, size, resolution=resolution, units="level")
"""
def __init__(self, reader: WSIReader, transform: np.ndarray) -> None:
"""Initialize object.
Args:
reader (WSIReader):
An object with base WSIReader as base class.
transform (:class:`numpy.ndarray`):
A 3x3 transformation matrix. The inverse transformation will be applied.
"""
self.wsi_reader = reader
self.transform_level0 = transform
@staticmethod
def transform_points(points: np.ndarray, transform: np.ndarray) -> np.ndarray:
"""Transform points using the given transformation matrix.
Args:
points (:class:`numpy.ndarray`):
A set of points of shape (N, 2).
transform (:class:`numpy.ndarray`):
Transformation matrix of shape (3, 3).
Returns:
:class:`numpy.ndarray`:
Warped points of shape (N, 2).
"""
points = np.array(points)
# Pad the data with ones, so that our transformation can do translations
points_pad = np.hstack([points, np.ones((points.shape[0], 1))])
points_warp = np.dot(points_pad, transform.T)
return points_warp[:, :-1]
def get_patch_dimensions(
self, size: Tuple[int, int], transform: np.ndarray
) -> Tuple[int, int]:
"""Compute patch size needed for transformation.
Args:
size (tuple(int)):
(width, height) tuple giving the desired output image size.
transform (:class:`numpy.ndarray`):
Transformation matrix of shape (3, 3).
Returns:
:py:obj:`tuple` - Maximum size of the patch needed for transformation.
- :py:obj:`int` - Width
- :py:obj:`int` - Height
"""
width, height = size[0], size[1]
x = [
np.linspace(1, width, width, endpoint=True),
np.ones(height) * width,
np.linspace(1, width, width, endpoint=True),
np.ones(height),
]
x = np.array(list(itertools.chain.from_iterable(x)))
y = [
np.ones(width),
np.linspace(1, height, height, endpoint=True),
np.ones(width) * height,
np.linspace(1, height, height, endpoint=True),
]
y = np.array(list(itertools.chain.from_iterable(y)))
points = np.array([x, y]).transpose()
transform_points = self.transform_points(points, transform)
width = np.max(transform_points[:, 0]) - np.min(transform_points[:, 0]) + 1
height = np.max(transform_points[:, 1]) - np.min(transform_points[:, 1]) + 1
width, height = np.ceil(width).astype(int), np.ceil(height).astype(int)
return (width, height)
def get_transformed_location(
self, location: Tuple[int, int], size: Tuple[int, int], level: int
) -> Tuple[int, int]:
"""Get corresponding location on unregistered image and the required patch size.
This function applies inverse transformation to the centre point of the region.
The transformed centre point is used to obtain the transformed top left pixel
of the region.
Args:
location (tuple(int)):
(x, y) tuple giving the top left pixel in the baseline (level 0)
reference frame.
size (tuple(int)):
(width, height) tuple giving the desired output image size.
level (int):
Pyramid level/resolution layer.
Returns:
tuple:
- :py:obj:`tuple` - Transformed location (top left pixel).
- :py:obj:`int` - X coordinate
- :py:obj:`int` - Y coordinate
- :py:obj:`tuple` - Maximum size suitable for transformation.
- :py:obj:`int` - Width
- :py:obj:`int` - Height
"""
inv_transform = inv(self.transform_level0)
size_level0 = [x * (2**level) for x in size]
center_level0 = [x + size_level0[i] / 2 for i, x in enumerate(location)]
center_level0 = np.expand_dims(np.array(center_level0), axis=0)
center_level0 = self.transform_points(center_level0, inv_transform)[0]
transformed_size = self.get_patch_dimensions(size, inv_transform)
transformed_location = [
center_level0[0] - (transformed_size[0] * (2**level)) / 2,
center_level0[1] - (transformed_size[1] * (2**level)) / 2,
]
transformed_location = tuple(
np.round(x).astype(int) for x in transformed_location
)
return transformed_location, transformed_size
def transform_patch(self, patch: np.ndarray, size: Tuple[int, int]) -> np.ndarray:
"""Apply transformation to the given patch.
This function applies the transformation matrix after removing the translation.
Args:
patch (:class:`numpy.ndarray`):
A region of whole slide image.
size (tuple(int)):
(width, height) tuple giving the desired output image size.
Returns:
:class:`numpy.ndarray`:
A transformed region/patch.
"""
transform = self.transform_level0 * [[1, 1, 0], [1, 1, 0], [1, 1, 1]]
translation = (-size[0] / 2 + 0.5, -size[1] / 2 + 0.5)
forward_translation = np.array(
[[1, 0, translation[0]], [0, 1, translation[1]], [0, 0, 1]]
)
inverse_translation = np.linalg.inv(forward_translation)
transform = inverse_translation @ transform @ forward_translation
return cv2.warpAffine(patch, transform[0:-1][:], patch.shape[:2][::-1])
def read_rect(
self,
location: Tuple[int, int],
size: Tuple[int, int],
resolution: Resolution,
units: str,
) -> np.ndarray:
"""Read a transformed region of the transformed whole slide image.
Location is in terms of the baseline image (level 0 / maximum resolution),
and size is the output image size.
Args:
location (tuple(int)):
(x, y) tuple giving the top left pixel in the baseline (level 0)
reference frame.
size (tuple(int)):
(width, height) tuple giving the desired output image size.
resolution (float or tuple(float)):
Pyramid level/resolution layer.
units (str):
Units of the scale.
Returns:
:class:`numpy.ndarray`:
A transformed region/patch.
"""
(
read_level,
_,
_,
_post_read_scale,
_baseline_read_size,
) = self.wsi_reader.find_read_rect_params(
location=location,
size=size,
resolution=resolution,
units=units,
)
transformed_location, max_size = self.get_transformed_location(
location, size, read_level
)
patch = self.wsi_reader.read_rect(
transformed_location, max_size, resolution=resolution, units=units
)
transformed_patch = self.transform_patch(patch, max_size)
start_row = int(max_size[1] / 2) - int(size[1] / 2)
end_row = int(max_size[1] / 2) + int(size[1] / 2)
start_col = int(max_size[0] / 2) - int(size[0] / 2)
end_col = int(max_size[0] / 2) + int(size[0] / 2)
return transformed_patch[start_row:end_row, start_col:end_col, :]
| 57,079 | 36.016861 | 88 | py |
tiatoolbox | tiatoolbox-master/tiatoolbox/tools/registration/__init__.py | """Models package for the models implemented in tiatoolbox."""
from tiatoolbox.tools.registration import wsi_registration
| 122 | 40 | 62 | py |
tiatoolbox | tiatoolbox-master/tiatoolbox/wsicore/wsireader.py | """This module defines classes which can read image data from WSI formats."""
from __future__ import annotations
import copy
import json
import logging
import math
import os
import pathlib
import re
from datetime import datetime
from numbers import Number
from typing import Iterable, List, Optional, Tuple, Union
import numpy as np
import openslide
import pandas as pd
import tifffile
import zarr
from defusedxml import ElementTree
from packaging.version import Version
from PIL import Image
from tiatoolbox import logger, utils
from tiatoolbox.annotation.storage import AnnotationStore, SQLiteStore
from tiatoolbox.utils.env_detection import pixman_warning
from tiatoolbox.utils.exceptions import FileNotSupported
from tiatoolbox.utils.visualization import AnnotationRenderer
from tiatoolbox.wsicore.metadata.ngff import Multiscales
from tiatoolbox.wsicore.wsimeta import WSIMeta
pixman_warning()
NumPair = Tuple[Number, Number]
IntPair = Tuple[int, int]
Bounds = Tuple[Number, Number, Number, Number]
IntBounds = Tuple[int, int, int, int]
Resolution = Union[Number, Tuple[Number, Number], np.ndarray]
MIN_NGFF_VERSION = Version("0.4")
MAX_NGFF_VERSION = Version("0.4")
def is_dicom(path: pathlib.Path) -> bool:
"""Check if the input is a DICOM file.
Args:
path (pathlib.Path): Path to the file to check.
Returns:
bool: True if the file is a DICOM file.
"""
path = pathlib.Path(path)
is_dcm = path.suffix.lower() == ".dcm"
is_dcm_dir = path.is_dir() and any(
p.suffix.lower() == ".dcm" for p in path.iterdir()
)
return is_dcm or is_dcm_dir
def is_tiled_tiff(path: pathlib.Path) -> bool:
"""Check if the input is a tiled TIFF file.
Args:
path (pathlib.Path):
Path to the file to check.
Returns:
bool:
True if the file is a tiled TIFF file.
"""
path = pathlib.Path(path)
try:
tif = tifffile.TiffFile(path)
except tifffile.TiffFileError:
return False
return tif.pages[0].is_tiled
def is_zarr(path: pathlib.Path) -> bool:
"""Check if the input is a Zarr file.
Args:
path (pathlib.Path):
Path to the file to check.
Returns:
bool:
True if the file is a Zarr file.
"""
path = pathlib.Path(path)
try:
_ = zarr.open(path, mode="r")
return True
except Exception: # noqa: PIE786 # skipcq: PYL-W0703
return False
def is_ngff(
path: pathlib.Path,
min_version: Version = MIN_NGFF_VERSION,
max_version: Version = MAX_NGFF_VERSION,
) -> bool:
"""Check if the input is a NGFF file.
Args:
path (pathlib.Path):
Path to the file to check.
min_version (Tuple[int, ...]):
Minimum version of the NGFF file to be considered valid.
Returns:
bool:
True if the file is a NGFF file.
"""
path = pathlib.Path(path)
zattrs_path = path / ".zattrs"
if not zattrs_path.is_file():
return False
with open(zattrs_path, "rb") as fh:
group_attrs = json.load(fh)
try:
multiscales: Multiscales = group_attrs["multiscales"]
omero = group_attrs["omero"]
_ARRAY_DIMENSIONS = group_attrs["_ARRAY_DIMENSIONS"] # noqa N806
if not all(
[
isinstance(multiscales, list),
isinstance(_ARRAY_DIMENSIONS, list),
isinstance(omero, dict),
all(isinstance(m, dict) for m in multiscales),
]
):
logger.warning(
"The NGFF file is not valid. "
"The multiscales, _ARRAY_DIMENSIONS and omero attributes "
"must be present and of the correct type."
)
return False
except KeyError:
return False
multiscales_versions = {
Version(scale["version"]) for scale in multiscales if "version" in scale
}
omero_version: Optional[str] = omero.get("version")
if omero_version:
omero_version: Version = Version(omero_version)
if omero_version < min_version:
logger.warning(
"The minimum supported version of the NGFF file is %s. "
"But the versions of the multiscales in the file are %s.",
min_version,
multiscales_versions,
)
return False
if omero_version > max_version:
logger.warning(
"The maximum supported version of the NGFF file is %s. "
"But the versions of the multiscales in the file are %s.",
max_version,
multiscales_versions,
)
return True
if len(multiscales_versions) > 1:
logger.warning(
"Found multiple versions for NGFF multiscales: %s",
multiscales_versions,
)
if any(version < min_version for version in multiscales_versions):
logger.warning(
"The minimum supported version of the NGFF file is %s. "
"But the versions of the multiscales in the file are %s.",
min_version,
multiscales_versions,
)
return False
if any(version > max_version for version in multiscales_versions):
logger.warning(
"The maximum supported version of the NGFF file is %s. "
"But the versions of the multiscales in the file are %s.",
max_version,
multiscales_versions,
)
return True
return is_zarr(path)
class WSIReader:
"""Base whole slide image (WSI) reader class.
This class defines functions for reading pixel data and metadata
from whole slide image (WSI) files.
Attributes:
input_path (pathlib.Path):
Input path to WSI file.
Args:
input_img (str, :obj:`pathlib.Path`, :obj:`ndarray` or :obj:`.WSIReader`):
Input path to WSI.
mpp (:obj:`tuple` or :obj:`list` or :obj:`None`, optional):
The MPP of the WSI. If not provided, the MPP is approximated
from the objective power.
power (:obj:`float` or :obj:`None`, optional):
The objective power of the WSI. If not provided, the power
is approximated from the MPP.
"""
@staticmethod # noqa: A003
def open( # noqa: A003
input_img: Union[str, pathlib.Path, np.ndarray, WSIReader],
mpp: Optional[Tuple[Number, Number]] = None,
power: Optional[Number] = None,
**kwargs,
) -> "WSIReader":
"""Returns an appropriate :class:`.WSIReader` object.
Args:
input_img (str, pathlib.Path, :obj:`numpy.ndarray` or :obj:`.WSIReader`):
Input to create a WSI object from. Supported types of
input are: `str` and :obj:`pathlib.Path` which point to the
location on the disk where image is stored,
:class:`numpy.ndarray` in which the input image in the
form of numpy array (HxWxC) is stored, or :obj:`.WSIReader`
which is an already created tiatoolbox WSI handler. In
the latter case, the function directly passes the
input_imge to the output.
mpp (tuple):
(x, y) tuple of the MPP in the units of the input image.
power (float):
Objective power of the input image.
Returns:
WSIReader:
An object with base :class:`.WSIReader` as base class.
Examples:
>>> from tiatoolbox.wsicore.wsireader import WSIReader
>>> wsi = WSIReader.open(input_img="./sample.svs")
"""
# Validate inputs
if not isinstance(input_img, (WSIReader, np.ndarray, str, pathlib.Path)):
raise TypeError(
"Invalid input: Must be a WSIRead, numpy array, string or pathlib.Path"
)
if isinstance(input_img, np.ndarray):
return VirtualWSIReader(input_img, mpp=mpp, power=power)
if isinstance(input_img, WSIReader):
return input_img
# Input is a string or pathlib.Path, normalise to pathlib.Path
input_path = pathlib.Path(input_img)
WSIReader.verify_supported_wsi(input_path)
# Handle special cases first (DICOM, Zarr/NGFF, OME-TIFF)
if is_dicom(input_path):
return DICOMWSIReader(input_path, mpp=mpp, power=power)
_, _, suffixes = utils.misc.split_path_name_ext(input_path)
last_suffix = suffixes[-1]
if last_suffix == ".db":
return AnnotationStoreReader(input_path, **kwargs)
if last_suffix in (".zarr",):
if not is_ngff(input_path):
raise FileNotSupported(
f"File {input_path} does not appear to be a v0.4 NGFF zarr."
)
return NGFFWSIReader(input_path, mpp=mpp, power=power)
if suffixes[-2:] in ([".ome", ".tiff"],):
return TIFFWSIReader(input_path, mpp=mpp, power=power)
if last_suffix in (".tif", ".tiff") and is_tiled_tiff(input_path):
try:
return OpenSlideWSIReader(input_path, mpp=mpp, power=power)
except openslide.OpenSlideError:
return TIFFWSIReader(input_path, mpp=mpp, power=power)
# Handle homogeneous cases (based on final suffix)
def np_virtual_wsi(
input_path: np.ndarray, *args, **kwargs
) -> "VirtualWSIReader":
"""Create a virtual WSI from a numpy array."""
return VirtualWSIReader(input_path, *args, **kwargs)
suffix_to_reader = {
".npy": np_virtual_wsi,
".jp2": OmnyxJP2WSIReader,
".jpeg": VirtualWSIReader,
".jpg": VirtualWSIReader,
".png": VirtualWSIReader,
".tif": VirtualWSIReader,
".tiff": VirtualWSIReader,
}
if last_suffix in suffix_to_reader:
return suffix_to_reader[last_suffix](input_path, mpp=mpp, power=power)
# Try openslide last
return OpenSlideWSIReader(input_path, mpp=mpp, power=power)
@staticmethod
def verify_supported_wsi(input_path: pathlib.Path) -> None:
"""Verify that an input image is supported.
Args:
input_path (:class:`pathlib.Path`):
Input path to WSI.
Raises:
FileNotSupported:
If the input image is not supported.
"""
if is_ngff(input_path) or is_dicom(input_path):
return
_, _, suffixes = utils.misc.split_path_name_ext(input_path)
if suffixes and suffixes[-1] not in [
".svs",
".npy",
".ndpi",
".mrxs",
".tif",
".tiff",
".jp2",
".png",
".jpg",
".jpeg",
".zarr",
".db",
]:
raise FileNotSupported(f"File {input_path} is not a supported file format.")
def __init__(
self,
input_img: Union[str, pathlib.Path, np.ndarray, AnnotationStore],
mpp: Optional[Tuple[Number, Number]] = None,
power: Optional[Number] = None,
) -> None:
if isinstance(input_img, (np.ndarray, AnnotationStore)):
self.input_path = None
else:
self.input_path = pathlib.Path(input_img)
if not self.input_path.exists():
raise FileNotFoundError(f"Input path does not exist: {self.input_path}")
self._m_info = None
# Set a manual mpp value
if mpp and isinstance(mpp, Number):
mpp = (mpp, mpp)
if mpp and (not hasattr(mpp, "__len__") or len(mpp) != 2):
raise TypeError("`mpp` must be a number or iterable of length 2.")
self._manual_mpp = tuple(mpp) if mpp else None
# Set a manual power value
if power and not isinstance(power, Number):
raise TypeError("`power` must be a number.")
self._manual_power = power
@property
def info(self) -> WSIMeta:
"""WSI metadata property.
This property is cached and only generated on the first call.
Returns:
WSIMeta:
An object containing normalized slide metadata
"""
# In Python>=3.8 this could be replaced with functools.cached_property
if self._m_info is not None:
return copy.deepcopy(self._m_info)
self._m_info = self._info()
if self._manual_mpp:
self._m_info.mpp = np.array(self._manual_mpp)
if self._manual_power:
self._m_info.objective_power = self._manual_power
return self._m_info
@info.setter
def info(self, meta: WSIMeta) -> None:
"""WSI metadata setter.
Args:
meta (WSIMeta): Metadata object.
"""
self._m_info = meta
def _info(self) -> WSIMeta:
"""WSI metadata internal getter used to update info property.
Missing values for MPP and objective power are approximated and
a warning raised. Objective power is calculated as the mean of
the :func:utils.transforms.mpp2common_objective_power in x and
y. MPP (x and y) is approximated using objective power via
:func:utils.transforms.objective_power2mpp.
Returns:
WSIMeta:
An object containing normalized slide metadata.
"""
raise NotImplementedError
def _find_optimal_level_and_downsample(
self, resolution: Resolution, units: str, precision: int = 3
) -> Tuple[int, np.ndarray]:
"""Find the optimal level to read at for a desired resolution and units.
The optimal level is the most downscaled level of the image
pyramid (or multi-resolution layer) which is larger than the
desired target resolution. The returned scale is the downsample
factor required, post read, to achieve the desired resolution.
Args:
resolution (float or tuple(float)):
Resolution to find optimal read parameters for
units (str):
Units of the scale. Allowed values are the same as for
`WSIReader._relative_level_scales`
precision (int or optional):
Decimal places to use when finding optimal scale. This
can be adjusted to avoid errors when an unnecessary
precision is used. E.g. 1.1e-10 > 1 is insignificant in
most cases. Defaults to 3.
Returns:
tuple:
Optimal read level and scale factor between the optimal
level and the target scale (usually <= 1):
- :py:obj:`int` - Optimal read level.
- :class:`numpy.ndarray` - Scale factor in X and Y.
"""
level_scales = self.info.relative_level_scales(resolution, units)
level_resolution_sufficient = [
all(np.round(x, decimals=precision) <= 1) for x in level_scales
]
# Check if level 0 is lower resolution than required (scale > 1)
if not any(level_resolution_sufficient):
level = 0
else:
# Find the first level with relative scale >= 1.
# Note: np.argmax finds the index of the first True element.
# Here it is used on a reversed list to find the first
# element <=1, which is the same element as the last <=1
# element when counting forward in the regular list.
reverse_index = np.argmax(level_resolution_sufficient[::-1])
# Convert the index from the reversed list to the regular index (level)
level = (len(level_scales) - 1) - reverse_index
scale = level_scales[level]
# Check for requested resolution > than baseline resolution
if any(np.array(scale) > 1):
logger.warning(
"Read: Scale > 1."
"This means that the desired resolution is higher"
" than the WSI baseline (maximum encoded resolution)."
" Interpolation of read regions may occur.",
)
return level, scale
def find_read_rect_params(
self,
location: IntPair,
size: IntPair,
resolution: Resolution,
units: str,
precision: int = 3,
) -> Tuple[int, IntPair, IntPair, NumPair, IntPair]:
"""Find optimal parameters for reading a rect at a given resolution.
Reading the image at full baseline resolution and re-sampling to
the desired resolution would require a large amount of memory
and be very slow. This function checks the other resolutions
stored in the WSI's pyramid of resolutions to find the lowest
resolution (the smallest level) which is higher resolution (a larger
level) than the requested output resolution.
In addition to finding this 'optimal level', the scale factor to
apply after reading in order to obtain the desired resolution is
found along with conversions of the location and size into level
and baseline coordinates.
Args:
location (tuple(int)):
Location in terms of the baseline image (level 0)
resolution.
size (tuple(int)):
Desired output size in pixels (width, height) tuple.
resolution (float):
Desired output resolution.
units (str):
Units of scale, default = "level". Supported units are:
- microns per pixel ('mpp')
- objective power ('power')
- pyramid / resolution level ('level')
- pixels per baseline pixel ("baseline")
precision (int, optional):
Decimal places to use when finding optimal scale. See
:func:`find_optimal_level_and_downsample` for more.
Returns:
tuple:
Parameters for reading the requested region.
- :py:obj:`int` - Optimal read level.
- :py:obj:`tuple` - Read location in level coordinates.
- :py:obj:`int` - X location.
- :py:obj:`int` - Y location.
- :py:obj:`tuple` - Region size in level coordinates.
- :py:obj:`int` - Width.
- :py:obj:`int` - Height.
- :py:obj:`tuple` - Scaling to apply after level read.
- :py:obj:`float` - X scale factor.
- :py:obj:`float` - Y scale factor.
- :py:obj:`tuple` - Region size in baseline coordinates.
- :py:obj:`int` - Width.
- :py:obj:`int` - Height.
"""
read_level, post_read_scale_factor = self._find_optimal_level_and_downsample(
resolution, units, precision
)
info = self.info
level_downsample = info.level_downsamples[read_level]
baseline_read_size = np.round(
np.array(size) * level_downsample / post_read_scale_factor
).astype(int)
level_read_size = np.round(np.array(size) / post_read_scale_factor).astype(int)
level_location = np.round(np.array(location) / level_downsample).astype(int)
return (
read_level,
level_location,
level_read_size,
post_read_scale_factor,
baseline_read_size,
)
def _find_read_params_at_resolution(
self, location: IntPair, size: IntPair, resolution: Resolution, units: str
) -> Tuple[int, NumPair, IntPair, IntPair, IntPair, IntPair]:
"""Works similarly to `_find_read_rect_params`.
Return the information necessary for scaling. While
`_find_read_rect_params` assumes location to be at baseline.
This function assumes location to be at requested resolution.
Args:
location (tuple(int)):
Location in the requested resolution system.
size (tuple(int)):
Desired output size in pixels (width, height) tuple and
in the requested resolution system.
resolution (float):
Desired output resolution.
units (str):
Units of scale, default = "level". Supported units are:
- microns per pixel ('mpp') - objective power ('power')
- pyramid / resolution level ('level') - pixels per
baseline pixel ("baseline")
Returns:
tuple:
Parameters for reading the requested region:
- :py:obj:`int` - Optimal read level.
- :py:obj:`tuple` - Scaling to apply after level read to
achieve desired output resolution.
- :py:obj:`float` - X scale factor.
- :py:obj:`float` - Y scale factor.
- :py:obj:`tuple` - Region size in read level
coordinates.
- :py:obj:`int` - Width.
- :py:obj:`int` - Height.
- :py:obj:`tuple` - Region location in read level
coordinates.
- :py:obj:`int` - X location.
- :py:obj:`int` - Y location.
- :py:obj:`tuple` - Region size in level 0 coordinates.
- :py:obj:`int` - Width.
- :py:obj:`int` - Height.
- :py:obj:`tuple` - Region location level 0 coordinates.
- :py:obj:`int` - X location.
- :py:obj:`int` - Y location.
"""
(
read_level,
# read_level to requested resolution (full)
read_level_to_resolution_scale_factor,
) = self._find_optimal_level_and_downsample(
resolution,
units,
)
info = self.info
# Do we need sanity check for input form ?
requested_location = np.array(location)
requested_size = np.array(size)
baseline_to_read_level_scale_factor = 1 / info.level_downsamples[read_level]
baseline_to_resolution_scale_factor = (
baseline_to_read_level_scale_factor * read_level_to_resolution_scale_factor
)
size_at_baseline = requested_size / baseline_to_resolution_scale_factor
location_at_baseline = (
requested_location.astype(np.float32) / baseline_to_resolution_scale_factor
)
size_at_read_level = requested_size / read_level_to_resolution_scale_factor
location_at_read_level = (
requested_location.astype(np.float32)
/ read_level_to_resolution_scale_factor
)
output = (
size_at_read_level,
location_at_read_level,
size_at_baseline,
location_at_baseline,
)
output = tuple(np.ceil(v).astype(np.int64) for v in output)
return (
read_level,
read_level_to_resolution_scale_factor,
) + output
def _bounds_at_resolution_to_baseline(
self, bounds: Bounds, resolution: Resolution, units: str
) -> Bounds:
"""Find corresponding bounds in baseline.
Find corresponding bounds in baseline given the input is at
requested resolution.
"""
bounds_at_resolution = np.array(bounds)
tl_at_resolution = bounds_at_resolution[:2] # is in XY
br_at_resolution = bounds_at_resolution[2:]
size_at_resolution = br_at_resolution - tl_at_resolution
# Find parameters for optimal read
(
_, # read_level,
_, # read_level_to_requested_scale_factor,
_, # size_at_read_level,
_, # location_at_read_level,
size_at_baseline,
location_at_baseline,
) = self._find_read_params_at_resolution(
tl_at_resolution, size_at_resolution, resolution, units
)
tl_at_baseline = location_at_baseline
br_at_baseline = tl_at_baseline + size_at_baseline
return np.concatenate([tl_at_baseline, br_at_baseline]) # bounds at baseline
def slide_dimensions(
self, resolution: Resolution, units: str, precisions: int = 3
) -> IntPair:
"""Return the size of WSI at requested resolution.
Args:
resolution (int or float or tuple(float)):
Resolution to read thumbnail at, default = 1.25
(objective power).
units (str):
resolution units, default="power".
Returns:
:py:obj:`tuple`:
Size of the WSI in (width, height).
Examples:
>>> from tiatoolbox.wsicore.wsireader import WSIReader
>>> wsi = WSIReader.open(input_img="./CMU-1.ndpi")
>>> slide_shape = wsi.slide_dimensions(0.55, 'mpp')
"""
wsi_shape_at_baseline = self.info.slide_dimensions
# Find parameters for optimal read
(
_,
_,
wsi_shape_at_resolution,
_,
) = self._find_read_bounds_params(
[0, 0] + list(wsi_shape_at_baseline), resolution, units, precisions
)
return wsi_shape_at_resolution
def _find_read_bounds_params(
self, bounds: Bounds, resolution: Resolution, units: str, precision: int = 3
) -> Tuple[int, IntBounds, IntPair, IntPair, np.ndarray]:
"""Find optimal parameters for reading bounds at a given resolution.
Args:
bounds (tuple(int)):
Tuple of (start_x, start_y, end_x, end_y) i.e. (left,
top, right, bottom) of the region in baseline reference
frame.
resolution (float):
desired output resolution
units (str):
units of scale, default = "level". Supported units are:
microns per pixel (mpp), objective power (power),
pyramid / resolution level (level), pixels per baseline
pixel (baseline).
precision (int, optional):
Decimal places to use when finding optimal scale. See
:func:`find_optimal_level_and_downsample` for more.
Returns:
tuple:
Parameters for reading the requested bounds area:
- :py:obj:`int` - Optimal read level
- :py:obj:`tuple` - Bounds of the region in level coordinates
- :py:obj:`int` - Left (start x value)
- :py:obj:`int` - Top (start y value)
- :py:obj:`int` - Right (end x value)
- :py:obj:`int` - Bottom (end y value)
- :py:obj:`tuple` - Expected size of the output image
- :py:obj:`int` - Width
- :py:obj:`int` - Height
- np.ndarray - Scale factor of re-sampling to apply after reading.
"""
start_x, start_y, end_x, end_y = bounds
read_level, post_read_scale_factor = self._find_optimal_level_and_downsample(
resolution, units, precision
)
info = self.info
level_downsample = info.level_downsamples[read_level]
location = np.array([start_x, start_y])
size = np.array([end_x - start_x, end_y - start_y])
level_size = np.round(np.array(size) / level_downsample).astype(int)
level_location = np.round(location / level_downsample).astype(int)
level_bounds = (*level_location, *(level_location + level_size))
output_size = np.round(level_size * post_read_scale_factor).astype(int)
return (read_level, level_bounds, output_size, post_read_scale_factor)
@staticmethod
def _check_unit_conversion_integrity(
input_unit, output_unit, baseline_mpp, baseline_power
):
"""Checks integrity of units before unit conversion.
Args:
input_unit (str):
input units
output_unit (str):
output units
baseline_mpp:
baseline microns per pixel (mpp)
baseline_power:
baseline magnification level.
Raises:
ValueError:
If the checks on unit conversion fails.
"""
if input_unit not in {"mpp", "power", "level", "baseline"}:
raise ValueError(
"Invalid input_unit: argument accepts only one of the following "
" options: `'mpp'`, `'power'`, `'level'`, `'baseline'`."
)
if output_unit not in {"mpp", "power", "baseline", None}:
raise ValueError(
"Invalid output_unit: argument accepts only one of the following"
" options: `'mpp'`, `'power'`, `'baseline'`, or None (to return"
" all units)."
)
if baseline_mpp is None and input_unit == "mpp":
raise ValueError(
"Missing 'mpp': `input_unit` has been set to 'mpp' while there "
"is no information about 'mpp' in WSI meta data."
)
if baseline_power is None and input_unit == "power":
raise ValueError(
"Missing 'objective_power': `input_unit` has been set to 'power' while "
"there is no information about 'objective_power' in WSI meta data."
)
def _prepare_output_dict(
self, input_unit, input_res, baseline_mpp, baseline_power
) -> dict:
"""Calculate output_res as dictionary based on input_unit and resolution."""
output_dict = {
"mpp": None,
"power": None,
"baseline": None,
}
if input_unit == "mpp":
if isinstance(input_res, (list, tuple, np.ndarray)):
output_dict["mpp"] = np.array(input_res)
else:
output_dict["mpp"] = np.array([input_res, input_res])
output_dict["baseline"] = baseline_mpp[0] / output_dict["mpp"][0]
if baseline_power is not None:
output_dict["power"] = output_dict["baseline"] * baseline_power
return output_dict
if input_unit == "power":
output_dict["baseline"] = input_res / baseline_power
output_dict["power"] = input_res
elif input_unit == "level":
level_scales = self.info.relative_level_scales(input_res, input_unit)
output_dict["baseline"] = level_scales[0]
if baseline_power is not None:
output_dict["power"] = output_dict["baseline"] * baseline_power
else: # input_unit == 'baseline'
output_dict["baseline"] = input_res
if baseline_power is not None:
output_dict["power"] = baseline_power * output_dict["baseline"]
if baseline_mpp is not None:
output_dict["mpp"] = baseline_mpp / output_dict["baseline"]
return output_dict
def convert_resolution_units(self, input_res, input_unit, output_unit=None):
"""Converts resolution value between different units.
This function accepts a resolution and its units in the input
and converts it to all other units ('mpp', 'power', 'baseline').
To achieve resolution in 'mpp' and 'power' units in the output,
WSI metadata should contain `mpp` and `objective_power`
information, respectively.
Args:
input_res (float):
the resolution which we want to convert to the other
units.
input_unit (str):
The unit of the input resolution (`input_res`).
Acceptable input_units are 'mpp', 'power', 'baseline',
and 'level'. output_unit (str): the desired unit to
which we want to convert the `input_res`. Acceptable
values for `output_unit` are: 'mpp', 'power', and
'baseline'. If `output_unit` is not provided, all the
conversions to all the mentioned units will be
returned in a dictionary.
output_unit (str):
Units of scale, Supported units are:
- microns per pixel ('mpp')
- objective power ('power')
- pyramid / resolution level ('level')
- pixels per baseline pixel ("baseline")
Returns:
output_res (float or dictionary):
Either a float which is the converted `input_res` to the
desired `output_unit` or a dictionary containing the
converted `input_res` to all acceptable units (`'mpp'`,
`'power'`, `'baseline'`). If there is not enough metadata
to calculate a unit (like `mpp` or `power`), they
will be set to None in the dictionary.
"""
baseline_mpp = self.info.mpp
baseline_power = self.info.objective_power
self._check_unit_conversion_integrity(
input_unit, output_unit, baseline_mpp, baseline_power
)
output_dict = self._prepare_output_dict(
input_unit, input_res, baseline_mpp, baseline_power
)
out_res = output_dict[output_unit] if output_unit is not None else output_dict
if out_res is None:
logger.warning(
"Although unit conversion from input_unit has been done, the requested "
"output_unit is returned as None. Probably due to missing 'mpp' or "
"'objective_power' in slide's meta data.",
)
return out_res
def _find_tile_params(
self, tile_objective_value: Number
) -> Tuple[int, IntPair, int, Number]:
"""Find the params for save tiles."""
rescale = self.info.objective_power / tile_objective_value
if not rescale.is_integer():
raise ValueError(
"Tile objective value must be an integer multiple of the "
"objective power of the slide."
)
try:
level = np.log2(rescale)
if not level.is_integer():
raise ValueError
level = np.int_(level)
slide_dimension = self.info.level_dimensions[level]
rescale = 1
# Raise index error if desired pyramid level not embedded
# in level_dimensions
except IndexError:
level = 0
slide_dimension = self.info.level_dimensions[level]
rescale = np.int_(rescale)
logger.warning(
"Reading WSI at level 0. Desired tile_objective_value %s "
"not available.",
str(tile_objective_value),
)
except ValueError:
level = 0
slide_dimension = self.info.level_dimensions[level]
rescale = 1
logger.warning(
"Reading WSI at level 0. Reading at tile_objective_value %s "
"not allowed.",
str(tile_objective_value),
)
tile_objective_value = self.info.objective_power
return level, slide_dimension, rescale, tile_objective_value
def _read_rect_at_resolution(
self,
location: NumPair,
size: NumPair,
resolution: Resolution = 0,
units: str = "level",
interpolation: str = "optimise",
pad_mode: str = "constant",
pad_constant_values: Union[Number, Iterable[NumPair]] = 0,
**kwargs,
) -> np.ndarray:
"""Internal helper to perform `read_rect` at resolution.
In actuality, `read_rect` at resolution is synonymous with
calling `read_bound` at resolution because `size` has always
been within the resolution system.
"""
tl = np.array(location)
br = location + np.array(size)
bounds = np.concatenate([tl, br])
return self.read_bounds(
bounds,
resolution=resolution,
units=units,
interpolation=interpolation,
pad_mode=pad_mode,
pad_constant_values=pad_constant_values,
coord_space="resolution",
**kwargs,
)
def read_rect(
self,
location: IntPair,
size: IntPair,
resolution: Resolution = 0,
units: str = "level",
interpolation: str = "optimise",
pad_mode: str = "constant",
pad_constant_values: Union[Number, Iterable[NumPair]] = 0,
coord_space: str = "baseline",
**kwargs,
) -> np.ndarray:
"""Read a region of the whole slide image at a location and size.
Location is in terms of the baseline image (level 0 / maximum
resolution), and size is the output image size.
Reads can be performed at different resolutions by supplying a
pair of arguments for the resolution and the units of
resolution. If metadata does not specify `mpp` or
`objective_power` then `baseline` units should be selected with
resolution 1.0
The field of view varies with resolution. For a fixed field of
view see :func:`read_bounds`.
Args:
location (tuple(int)):
(x, y) tuple giving the top left pixel in the baseline
(level 0) reference frame.
size (tuple(int)):
(width, height) tuple giving the desired output image
size.
resolution (int or float or tuple(float)):
Resolution at which to read the image, default = 0.
Either a single number or a sequence of two numbers for
x and y are valid. This value is in terms of the
corresponding units. For example: resolution=0.5 and
units="mpp" will read the slide at 0.5 microns
per-pixel, and resolution=3, units="level" will read at
level at pyramid level / resolution layer 3.
units (str):
The units of resolution, default = "level". Supported
units are: microns per pixel (mpp), objective power
(power), pyramid / resolution level (level), pixels per
baseline pixel (baseline).
interpolation (str):
Method to use when resampling the output image. Possible
values are "linear", "cubic", "lanczos", "area", and
"optimise". Defaults to 'optimise' which will use cubic
interpolation for upscaling and area interpolation for
downscaling to avoid moiré patterns.
pad_mode (str):
Method to use when padding at the edges of the image.
Defaults to 'constant'. See :func:`numpy.pad` for
available modes.
pad_constant_values (int, tuple(int)):
Constant values to use when padding with constant pad mode.
Passed to the :func:`numpy.pad` `constant_values` argument.
Default is 0.
coord_space (str):
Defaults to "baseline". This is a flag to indicate if
the input `bounds` is in the baseline coordinate system
("baseline") or is in the requested resolution system
("resolution").
**kwargs (dict):
Extra key-word arguments for reader specific parameters.
Currently only used by VirtualWSIReader. See class
docstrings for more information.
Returns:
:class:`numpy.ndarray`:
Array of size MxNx3 M=size[0], N=size[1]
Example:
>>> from tiatoolbox.wsicore.wsireader import WSIReader
>>> # Load a WSI image
>>> wsi = WSIReader.open(input_img="./CMU-1.ndpi")
>>> location = (0, 0)
>>> size = (256, 256)
>>> # Read a region at level 0 (baseline / full resolution)
>>> img = wsi.read_rect(location, size)
>>> # Read a region at 0.5 microns per pixel (mpp)
>>> img = wsi.read_rect(location, size, 0.5, "mpp")
>>> # This could also be written more verbosely as follows
>>> img = wsi.read_rect(
... location,
... size,
... resolution=(0.5, 0.5),
... units="mpp",
... )
Note: The field of view varies with resolution when using
:func:`read_rect`.
.. figure:: ../images/read_rect_tissue.png
:width: 512
:alt: Diagram illustrating read_rect
As the location is in the baseline reference frame but the size
(width and height) is the output image size, the field of view
therefore changes as resolution changes.
If the WSI does not have a resolution layer corresponding
exactly to the requested resolution (shown above in white with a
dashed outline), a larger resolution is downscaled to achieve
the correct requested output resolution.
If the requested resolution is higher than the baseline (maximum
resultion of the image), then bicubic interpolation is applied
to the output image.
.. figure:: ../images/read_rect-interpolated-reads.png
:width: 512
:alt: Diagram illustrating read_rect interpolting between levels
When reading between the levels stored in the WSI, the
coordinates of the requested region are projected to the next
highest resolution. This resolution is then decoded and
downsampled to produce the desired output. This is a major
source of variability in the time take to perform a read
operation. Reads which require reading a large region before
downsampling will be significantly slower than reading at a
fixed level.
Examples:
>>> from tiatoolbox.wsicore.wsireader import WSIReader
>>> # Load a WSI image
>>> wsi = WSIReader.open(input_img="./CMU-1.ndpi")
>>> location = (0, 0)
>>> size = (256, 256)
>>> # The resolution can be different in x and y, e.g.
>>> img = wsi.read_rect(
... location,
... size,
... resolution=(0.5, 0.75),
... units="mpp",
... )
>>> # Several units can be used including: objective power,
>>> # microns per pixel, pyramid/resolution level, and
>>> # fraction of baseline.
>>> # E.g. Read a region at an objective power of 10x
>>> img = wsi.read_rect(
... location,
... size,
... resolution=10,
... units="power",
... )
>>> # Read a region at pyramid / resolution level 1
>>> img = wsi.read_rect(
... location,
... size,
... resolution=1,
... units="level",
... )
>>> # Read at a fractional level, this will linearly
>>> # interpolate the downsampling factor between levels.
>>> # E.g. if levels 0 and 1 have a downsampling of 1x and
>>> # 2x of baseline, then level 0.5 will correspond to a
>>> # downsampling factor 1.5x of baseline.
>>> img = wsi.read_rect(
... location,
... size,
... resolution=0.5,
... units="level",
... )
>>> # Read a region at half of the full / baseline
>>> # resolution.
>>> img = wsi.read_rect(
... location,
... size,
... resolution=0.5,
... units="baseline",
... )
>>> # Read at a higher resolution than the baseline
>>> # (interpolation applied to output)
>>> img = wsi.read_rect(
... location,
... size,
... resolution=1.25,
... units="baseline",
... )
>>> # Assuming the image has a native mpp of 0.5,
>>> # interpolation will be applied here.
>>> img = wsi.read_rect(
... location,
... size,
... resolution=0.25,
... units="mpp",
... )
"""
raise NotImplementedError
def read_bounds(
self,
bounds: Bounds,
resolution: Resolution = 0,
units: str = "level",
interpolation: str = "optimise",
pad_mode: str = "constant",
pad_constant_values: Union[Number, Iterable[NumPair]] = 0,
coord_space: str = "baseline",
**kwargs,
) -> np.ndarray:
"""Read a region of the whole slide image within given bounds.
Bounds are in terms of the baseline image (level 0 / maximum
resolution).
Reads can be performed at different resolutions by supplying a
pair of arguments for the resolution and the units of
resolution. If metadata does not specify `mpp` or
`objective_power` then `baseline` units should be selected with
resolution 1.0
The output image size may be different to the width and height
of the bounds as the resolution will affect this. To read a
region with a fixed output image size see :func:`read_rect`.
Args:
bounds (tuple(int)):
By default, this is a tuple of (start_x, start_y, end_x,
end_y) i.e. (left, top, right, bottom) of the region in
baseline reference frame. However, with
`coord_space="resolution"`, the bound is expected to be
at the requested resolution system.
resolution (int or float or tuple(float)):
Resolution at which to read the image, default = 0.
Either a single number or a sequence of two numbers for
x and y are valid. This value is in terms of the
corresponding units. For example: resolution=0.5 and
units="mpp" will read the slide at 0.5 microns
per-pixel, and resolution=3, units="level" will read at
level at pyramid level / resolution layer 3.
units (str):
Units of resolution, default="level". Supported units
are: microns per pixel (mpp), objective power (power),
pyramid / resolution level (level), pixels per baseline
pixel (baseline).
interpolation (str):
Method to use when resampling the output image. Possible
values are "linear", "cubic", "lanczos", "area", and
"optimise". Defaults to 'optimise' which will use cubic
interpolation for upscaling and area interpolation for
downscaling to avoid moiré patterns.
pad_mode (str):
Method to use when padding at the edges of the image.
Defaults to 'constant'. See :func:`numpy.pad` for
available modes.
pad_constant_values (int, tuple(int)):
Constant values to use when padding with constant pad mode.
Passed to the :func:`numpy.pad` `constant_values` argument.
Default is 0.
coord_space (str):
Defaults to "baseline". This is a flag to indicate if
the input `bounds` is in the baseline coordinate system
("baseline") or is in the requested resolution system
("resolution").
**kwargs (dict):
Extra key-word arguments for reader specific parameters.
Currently only used by :obj:`VirtualWSIReader`. See
class docstrings for more information.
Returns:
:class:`numpy.ndarray`:
Array of size MxNx3 M=end_h-start_h, N=end_w-start_w
Examples:
>>> from tiatoolbox.wsicore.wsireader import WSIReader
>>> from matplotlib import pyplot as plt
>>> wsi = WSIReader.open(input_img="./CMU-1.ndpi")
>>> # Read a region at level 0 (baseline / full resolution)
>>> bounds = [1000, 2000, 2000, 3000]
>>> img = wsi.read_bounds(bounds)
>>> plt.imshow(img)
>>> # This could also be written more verbosely as follows
>>> img = wsi.read_bounds(
... bounds,
... resolution=0,
... units="level",
... )
>>> plt.imshow(img)
Note: The field of view remains the same as resolution is varied
when using :func:`read_bounds`.
.. figure:: ../images/read_bounds_tissue.png
:width: 512
:alt: Diagram illustrating read_bounds
This is because the bounds are in the baseline (level 0)
reference frame. Therefore, varying the resolution does not
change what is visible within the output image.
If the WSI does not have a resolution layer corresponding
exactly to the requested resolution (shown above in white with a
dashed outline), a larger resolution is downscaled to achieve
the correct requested output resolution.
If the requested resolution is higher than the baseline (maximum
resultion of the image), then bicubic interpolation is applied
to the output image.
"""
raise NotImplementedError
def read_region(self, location: NumPair, level: int, size: IntPair) -> np.ndarray:
"""Read a region of the whole slide image (OpenSlide format args).
This function is to help with writing code which is backwards
compatible with OpenSlide. As such, it has the same arguments.
This internally calls :func:`read_rect` which should be
implemented by any :class:`WSIReader` subclass. Therefore, some
WSI formats which are not supported by OpenSlide, such as Omnyx
JP2 files, may also be readable with the same syntax.
Args:
location (tuple(int)):
(x, y) tuple giving the top left pixel in the level 0
reference frame.
level (int):
The level number.
size (tuple(int)):
(width, height) tuple giving the region size.
Returns:
:class:`numpy.ndarray`:
Array of size MxNx3.
"""
return self.read_rect(
location=location, size=size, resolution=level, units="level"
)
def slide_thumbnail(self, resolution: Resolution = 1.25, units: str = "power"):
"""Read the whole slide image thumbnail (1.25x by default).
For more information on resolution and units see
:func:`read_rect`
Args:
resolution (int or float or tuple(float)):
Resolution to read thumbnail at, default = 1.25
(objective power)
units (str):
Resolution units, default="power".
Returns:
:class:`numpy.ndarray`:
Thumbnail image.
Examples:
>>> from tiatoolbox.wsicore.wsireader import WSIReader
>>> wsi = WSIReader.open(input_img="./CMU-1.ndpi")
>>> slide_thumbnail = wsi.slide_thumbnail()
"""
slide_dimensions = self.info.slide_dimensions
bounds = (0, 0, *slide_dimensions)
return self.read_bounds(bounds, resolution=resolution, units=units)
def tissue_mask(
self,
method: str = "otsu",
resolution: Resolution = 1.25,
units: str = "power",
**masker_kwargs,
) -> "VirtualWSIReader":
"""Create a tissue mask and wrap it in a VirtualWSIReader.
For the morphological method, mpp is used for calculating the
scale of the morphological operations. If no mpp is available,
objective power is used instead to estimate a good scale. This
can be overridden with a custom size, via passing a
`kernel_size` key-word argument in `masker_kwargs`, see
:class:`tissuemask.MorphologicalMasker` for more.
Args:
method (str):
Method to use for creating the mask. Defaults
to 'otsu'. Methods are: otsu, morphological.
resolution (float):
Resolution to produce the mask at.
Defaults to 1.25.
units (str):
Units of resolution. Defaults to "power".
**masker_kwargs:
Extra kwargs passed to the masker class.
"""
from tiatoolbox.tools import tissuemask
thumbnail = self.slide_thumbnail(resolution, units)
if method not in ["otsu", "morphological"]:
raise ValueError(f"Invalid tissue masking method: {method}.")
if method == "morphological":
mpp = None
power = None
if units == "mpp":
mpp = resolution
elif units == "power":
power = resolution
masker = tissuemask.MorphologicalMasker(
mpp=mpp, power=power, **masker_kwargs
)
elif method == "otsu":
masker = tissuemask.OtsuTissueMasker(**masker_kwargs)
mask_img = masker.fit_transform([thumbnail])[0]
return VirtualWSIReader(mask_img.astype(np.uint8), info=self.info, mode="bool")
def save_tiles(
self,
output_dir: Union[str, pathlib.Path] = "tiles",
tile_objective_value: int = 20,
tile_read_size: Tuple[int, int] = (5000, 5000),
tile_format: str = ".jpg",
verbose: bool = False,
) -> None:
"""Generate image tiles from whole slide images.
Args:
output_dir(str or :obj:`pathlib.Path`):
Output directory to save the tiles.
tile_objective_value (int):
Objective value at which tile is generated, default = 20
tile_read_size (tuple(int)):
Tile (width, height), default = (5000, 5000).
tile_format (str):
File format to save image tiles, defaults = ".jpg".
verbose (bool):
Print output, default=False
Examples:
>>> from tiatoolbox.wsicore.wsireader import WSIReader
>>> wsi = WSIReader.open(input_img="./CMU-1.ndpi")
>>> wsi.save_tiles(output_dir='./dev_test',
... tile_objective_value=10,
... tile_read_size=(2000, 2000))
>>> from tiatoolbox.wsicore.wsireader import WSIReader
>>> wsi = WSIReader.open(input_img="./CMU-1.ndpi")
>>> slide_param = wsi.info
"""
if verbose:
logger.setLevel(logging.DEBUG)
logger.debug("Processing %s.", self.input_path.name)
output_dir = pathlib.Path(output_dir, self.input_path.name)
level, slide_dimension, rescale, tile_objective_value = self._find_tile_params(
tile_objective_value
)
tile_read_size = np.multiply(tile_read_size, rescale)
slide_h = slide_dimension[1]
slide_w = slide_dimension[0]
tile_h = tile_read_size[1]
tile_w = tile_read_size[0]
output_dir = pathlib.Path(output_dir)
output_dir.mkdir(parents=True)
data = []
vertical_tiles = int(math.ceil((slide_h - tile_h) / tile_h + 1))
horizontal_tiles = int(math.ceil((slide_w - tile_w) / tile_w + 1))
for iter_tot, (h, w) in enumerate(np.ndindex(vertical_tiles, horizontal_tiles)):
start_h = h * tile_h
end_h = (h * tile_h) + tile_h
start_w = w * tile_w
end_w = (w * tile_w) + tile_w
end_h = min(end_h, slide_h)
end_w = min(end_w, slide_w)
# convert to baseline reference frame
bounds = start_w, start_h, end_w, end_h
baseline_bounds = tuple(bound * (2**level) for bound in bounds)
# Read image region
im = self.read_bounds(baseline_bounds, level)
logger.debug(
"Tile %d: start_w: %d, end_w: %d, start_h: %d, end_h: %d, "
"width: %d, height: %d",
iter_tot,
start_w,
end_w,
start_h,
end_h,
end_w - start_w,
end_h - start_h,
)
# Rescale to the correct objective value
if rescale != 1:
im = utils.transforms.imresize(img=im, scale_factor=rescale)
img_save_name = (
"_".join(
[
"Tile",
str(tile_objective_value),
str(int(start_w / rescale)),
str(int(start_h / rescale)),
]
)
+ tile_format
)
utils.misc.imwrite(image_path=output_dir / img_save_name, img=im)
data.append(
[
iter_tot,
img_save_name,
start_w,
end_w,
start_h,
end_h,
im.shape[0],
im.shape[1],
]
)
# Save information on each slide to relate to the whole slide image
df = pd.DataFrame(
data,
columns=[
"iter",
"Tile_Name",
"start_w",
"end_w",
"start_h",
"end_h",
"size_w",
"size_h",
],
)
df.to_csv(output_dir / "Output.csv", index=False)
# Save slide thumbnail
slide_thumb = self.slide_thumbnail()
utils.misc.imwrite(
output_dir / f"slide_thumbnail{tile_format}", img=slide_thumb
)
if verbose:
logger.setLevel(logging.INFO)
class OpenSlideWSIReader(WSIReader):
"""Reader for OpenSlide supported whole-slide images.
Supported WSI formats:
- Aperio (.svs, .tif)
- Hamamatsu (.vms, .vmu, .ndpi)
- Leica (.scn)
- MIRAX (.mrxs)
- Philips (.tiff)
- Sakura (.svslide)
- Trestle (.tif)
- Ventana (.bif, .tif)
- Generic tiled TIFF (.tif)
Attributes:
openslide_wsi (:obj:`openslide.OpenSlide`)
"""
def __init__(
self,
input_img: Union[str, pathlib.Path, np.ndarray],
mpp: Optional[Tuple[Number, Number]] = None,
power: Optional[Number] = None,
) -> None:
super().__init__(input_img=input_img, mpp=mpp, power=power)
self.openslide_wsi = openslide.OpenSlide(filename=str(self.input_path))
def read_rect(
self,
location,
size,
resolution=0,
units="level",
interpolation="optimise",
pad_mode="constant",
pad_constant_values=0,
coord_space="baseline",
**kwargs,
):
"""Read a region of the whole slide image at a location and size.
Location is in terms of the baseline image (level 0 / maximum
resolution), and size is the output image size.
Reads can be performed at different resolutions by supplying a
pair of arguments for the resolution and the units of
resolution. If metadata does not specify `mpp` or
`objective_power` then `baseline` units should be selected with
resolution 1.0
The field of view varies with resolution. For a fixed field of
view see :func:`read_bounds`.
Args:
location (tuple(int)):
(x, y) tuple giving the top left pixel in the baseline
(level 0) reference frame.
size (tuple(int)):
(width, height) tuple giving the desired output image
size.
resolution (int or float or tuple(float)):
Resolution at which to read the image, default = 0.
Either a single number or a sequence of two numbers for
x and y are valid. This value is in terms of the
corresponding units. For example: resolution=0.5 and
units="mpp" will read the slide at 0.5 microns
per-pixel, and resolution=3, units="level" will read at
level at pyramid level / resolution layer 3.
units (str):
The units of resolution, default = "level". Supported
units are: microns per pixel (mpp), objective power
(power), pyramid / resolution level (level), pixels per
baseline pixel (baseline).
interpolation (str):
Method to use when resampling the output image. Possible
values are "linear", "cubic", "lanczos", "area", and
"optimise". Defaults to 'optimise' which will use cubic
interpolation for upscaling and area interpolation for
downscaling to avoid moiré patterns.
pad_mode (str):
Method to use when padding at the edges of the image.
Defaults to 'constant'. See :func:`numpy.pad` for
available modes.
pad_constant_values (int, tuple(int)):
Constant values to use when padding with constant pad mode.
Passed to the :func:`numpy.pad` `constant_values` argument.
Default is 0.
coord_space (str):
Defaults to "baseline". This is a flag to indicate if
the input `bounds` is in the baseline coordinate system
("baseline") or is in the requested resolution system
("resolution").
**kwargs (dict):
Extra key-word arguments for reader specific parameters.
Currently only used by VirtualWSIReader. See class
docstrings for more information.
Returns:
:class:`numpy.ndarray`:
Array of size MxNx3 M=size[0], N=size[1]
Example:
>>> from tiatoolbox.wsicore.wsireader import WSIReader
>>> # Load a WSI image
>>> wsi = WSIReader.open(input_img="./CMU-1.ndpi")
>>> location = (0, 0)
>>> size = (256, 256)
>>> # Read a region at level 0 (baseline / full resolution)
>>> img = wsi.read_rect(location, size)
>>> # Read a region at 0.5 microns per pixel (mpp)
>>> img = wsi.read_rect(location, size, 0.5, "mpp")
>>> # This could also be written more verbosely as follows
>>> img = wsi.read_rect(
... location,
... size,
... resolution=(0.5, 0.5),
... units="mpp",
... )
Note: The field of view varies with resolution when using
:func:`read_rect`.
.. figure:: ../images/read_rect_tissue.png
:width: 512
:alt: Diagram illustrating read_rect
As the location is in the baseline reference frame but the size
(width and height) is the output image size, the field of view
therefore changes as resolution changes.
If the WSI does not have a resolution layer corresponding
exactly to the requested resolution (shown above in white with a
dashed outline), a larger resolution is downscaled to achieve
the correct requested output resolution.
If the requested resolution is higher than the baseline (maximum
resultion of the image), then bicubic interpolation is applied
to the output image.
.. figure:: ../images/read_rect-interpolated-reads.png
:width: 512
:alt: Diagram illustrating read_rect interpolting between levels
When reading between the levels stored in the WSI, the
coordinates of the requested region are projected to the next
highest resolution. This resolution is then decoded and
downsampled to produce the desired output. This is a major
source of variability in the time take to perform a read
operation. Reads which require reading a large region before
downsampling will be significantly slower than reading at a
fixed level.
Examples:
>>> from tiatoolbox.wsicore.wsireader import WSIReader
>>> # Load a WSI image
>>> wsi = WSIReader.open(input_img="./CMU-1.ndpi")
>>> location = (0, 0)
>>> size = (256, 256)
>>> # The resolution can be different in x and y, e.g.
>>> img = wsi.read_rect(
... location,
... size,
... resolution=(0.5, 0.75),
... units="mpp",
... )
>>> # Several units can be used including: objective power,
>>> # microns per pixel, pyramid/resolution level, and
>>> # fraction of baseline.
>>> # E.g. Read a region at an objective power of 10x
>>> img = wsi.read_rect(
... location,
... size,
... resolution=10,
... units="power",
... )
>>> # Read a region at pyramid / resolution level 1
>>> img = wsi.read_rect(
... location,
... size,
... resolution=1,
... units="level",
... )
>>> # Read at a fractional level, this will linearly
>>> # interpolate the downsampling factor between levels.
>>> # E.g. if levels 0 and 1 have a downsampling of 1x and
>>> # 2x of baseline, then level 0.5 will correspond to a
>>> # downsampling factor 1.5x of baseline.
>>> img = wsi.read_rect(
... location,
... size,
... resolution=0.5,
... units="level",
... )
>>> # Read a region at half of the full / baseline
>>> # resolution.
>>> img = wsi.read_rect(
... location,
... size,
... resolution=0.5,
... units="baseline",
... )
>>> # Read at a higher resolution than the baseline
>>> # (interpolation applied to output)
>>> img = wsi.read_rect(
... location,
... size,
... resolution=1.25,
... units="baseline",
... )
>>> # Assuming the image has a native mpp of 0.5,
>>> # interpolation will be applied here.
>>> img = wsi.read_rect(
... location,
... size,
... resolution=0.25,
... units="mpp",
... )
"""
if coord_space == "resolution":
return self._read_rect_at_resolution(
location,
size,
resolution=resolution,
units=units,
interpolation=interpolation,
pad_mode=pad_mode,
pad_constant_values=pad_constant_values,
)
# Find parameters for optimal read
(
read_level,
level_location,
level_size,
post_read_scale,
_,
) = self.find_read_rect_params(
location=location,
size=size,
resolution=resolution,
units=units,
)
wsi = self.openslide_wsi
# Read at optimal level and corrected read size
im_region = wsi.read_region(location, read_level, level_size)
im_region = np.array(im_region)
# Apply padding outside the slide area
im_region = utils.image.crop_and_pad_edges(
bounds=utils.transforms.locsize2bounds(level_location, level_size),
max_dimensions=self.info.level_dimensions[read_level],
region=im_region,
pad_mode=pad_mode,
pad_constant_values=pad_constant_values,
)
# Resize to correct scale if required
im_region = utils.transforms.imresize(
img=im_region,
scale_factor=post_read_scale,
output_size=size,
interpolation=interpolation,
)
return utils.transforms.background_composite(image=im_region)
def read_bounds(
self,
bounds,
resolution=0,
units="level",
interpolation="optimise",
pad_mode="constant",
pad_constant_values=0,
coord_space="baseline",
**kwargs,
):
"""Read a region of the whole slide image within given bounds.
Bounds are in terms of the baseline image (level 0 / maximum
resolution).
Reads can be performed at different resolutions by supplying a
pair of arguments for the resolution and the units of
resolution. If metadata does not specify `mpp` or
`objective_power` then `baseline` units should be selected with
resolution 1.0
The output image size may be different to the width and height
of the bounds as the resolution will affect this. To read a
region with a fixed output image size see :func:`read_rect`.
Args:
bounds (tuple(int)):
By default, this is a tuple of (start_x, start_y, end_x,
end_y) i.e. (left, top, right, bottom) of the region in
baseline reference frame. However, with
`coord_space="resolution"`, the bound is expected to be
at the requested resolution system.
resolution (int or float or tuple(float)):
Resolution at which to read the image, default = 0.
Either a single number or a sequence of two numbers for
x and y are valid. This value is in terms of the
corresponding units. For example: resolution=0.5 and
units="mpp" will read the slide at 0.5 microns
per-pixel, and resolution=3, units="level" will read at
level at pyramid level / resolution layer 3.
units (str):
Units of resolution, default="level". Supported units
are: microns per pixel (mpp), objective power (power),
pyramid / resolution level (level), pixels per baseline
pixel (baseline).
interpolation (str):
Method to use when resampling the output image. Possible
values are "linear", "cubic", "lanczos", "area", and
"optimise". Defaults to 'optimise' which will use cubic
interpolation for upscaling and area interpolation for
downscaling to avoid moiré patterns.
pad_mode (str):
Method to use when padding at the edges of the image.
Defaults to 'constant'. See :func:`numpy.pad` for
available modes.
pad_constant_values (int, tuple(int)):
Constant values to use when padding with constant pad mode.
Passed to the :func:`numpy.pad` `constant_values` argument.
Default is 0.
coord_space (str):
Defaults to "baseline". This is a flag to indicate if
the input `bounds` is in the baseline coordinate system
("baseline") or is in the requested resolution system
("resolution").
**kwargs (dict):
Extra key-word arguments for reader specific parameters.
Currently only used by :obj:`VirtualWSIReader`. See
class docstrings for more information.
Returns:
:class:`numpy.ndarray`:
Array of size MxNx3 M=end_h-start_h, N=end_w-start_w
Examples:
>>> from tiatoolbox.wsicore.wsireader import WSIReader
>>> from matplotlib import pyplot as plt
>>> wsi = WSIReader.open(input_img="./CMU-1.ndpi")
>>> # Read a region at level 0 (baseline / full resolution)
>>> bounds = [1000, 2000, 2000, 3000]
>>> img = wsi.read_bounds(bounds)
>>> plt.imshow(img)
>>> # This could also be written more verbosely as follows
>>> img = wsi.read_bounds(
... bounds,
... resolution=0,
... units="level",
... )
>>> plt.imshow(img)
Note: The field of view remains the same as resolution is varied
when using :func:`read_bounds`.
.. figure:: ../images/read_bounds_tissue.png
:width: 512
:alt: Diagram illustrating read_bounds
This is because the bounds are in the baseline (level 0)
reference frame. Therefore, varying the resolution does not
change what is visible within the output image.
If the WSI does not have a resolution layer corresponding
exactly to the requested resolution (shown above in white with a
dashed outline), a larger resolution is downscaled to achieve
the correct requested output resolution.
If the requested resolution is higher than the baseline (maximum
resultion of the image), then bicubic interpolation is applied
to the output image.
"""
# convert from requested to `baseline`
bounds_at_baseline = bounds
if coord_space == "resolution":
bounds_at_baseline = self._bounds_at_resolution_to_baseline(
bounds, resolution, units
)
_, size_at_requested = utils.transforms.bounds2locsize(bounds)
# don't use the `output_size` (`size_at_requested`) here
# because the rounding error at `bounds_at_baseline` leads to
# different `size_at_requested` (keeping same read resolution
# but base image is of different scale)
(
read_level,
bounds_at_read_level,
_,
post_read_scale,
) = self._find_read_bounds_params(
bounds_at_baseline, resolution=resolution, units=units
)
else: # duplicated portion with VirtualReader, factoring out ?
# Find parameters for optimal read
(
read_level,
bounds_at_read_level,
size_at_requested,
post_read_scale,
) = self._find_read_bounds_params(
bounds_at_baseline, resolution=resolution, units=units
)
wsi = self.openslide_wsi
# Read at optimal level and corrected read size
location_at_baseline = bounds_at_baseline[:2]
_, size_at_read_level = utils.transforms.bounds2locsize(bounds_at_read_level)
im_region = wsi.read_region(
location=location_at_baseline, level=read_level, size=size_at_read_level
)
im_region = np.array(im_region)
# Apply padding outside the slide area
im_region = utils.image.crop_and_pad_edges(
bounds=bounds_at_read_level,
max_dimensions=self.info.level_dimensions[read_level],
region=im_region,
pad_mode=pad_mode,
pad_constant_values=pad_constant_values,
)
# Resize to correct scale if required
if coord_space == "resolution":
im_region = utils.transforms.imresize(
img=im_region,
output_size=size_at_requested,
interpolation=interpolation,
)
else:
im_region = utils.transforms.imresize(
img=im_region,
scale_factor=post_read_scale,
output_size=size_at_requested,
interpolation=interpolation,
)
return utils.transforms.background_composite(image=im_region)
@staticmethod
def _estimate_mpp(props):
"""Find microns per pixel (mpp)
Args:
props (:class:`OpenSlide.properties`):
OpenSlide properties.
Returns:
tuple:
Estimated microns per pixel (mpp).
"""
# Check OpenSlide for mpp metadata first
try:
mpp_x = float(props[openslide.PROPERTY_NAME_MPP_X])
mpp_y = float(props[openslide.PROPERTY_NAME_MPP_Y])
return mpp_x, mpp_y
# Fallback to TIFF resolution units and convert to mpp
except KeyError:
tiff_res_units = props.get("tiff.ResolutionUnit")
try:
x_res = float(props["tiff.XResolution"])
y_res = float(props["tiff.YResolution"])
mpp_x = utils.misc.ppu2mpp(x_res, tiff_res_units)
mpp_y = utils.misc.ppu2mpp(y_res, tiff_res_units)
logger.warning(
"Metadata: Falling back to TIFF resolution tag"
" for microns-per-pixel (MPP)."
)
return mpp_x, mpp_y
except KeyError:
logger.warning("Metadata: Unable to determine microns-per-pixel (MPP).")
# Return None value if metadata cannot be determined.
return None
def _info(self):
"""Openslide WSI meta data reader.
Returns:
WSIMeta:
Metadata information.
"""
props = self.openslide_wsi.properties
if openslide.PROPERTY_NAME_OBJECTIVE_POWER in props:
objective_power = float(props[openslide.PROPERTY_NAME_OBJECTIVE_POWER])
else:
objective_power = None
slide_dimensions = self.openslide_wsi.level_dimensions[0]
level_count = self.openslide_wsi.level_count
level_dimensions = self.openslide_wsi.level_dimensions
level_downsamples = self.openslide_wsi.level_downsamples
vendor = props.get(openslide.PROPERTY_NAME_VENDOR)
mpp = self._estimate_mpp(props)
# Fallback to calculating objective power from mpp
if objective_power is None:
if mpp is not None:
objective_power = utils.misc.mpp2common_objective_power(
float(np.mean(mpp))
)
logger.warning(
"Metadata: Objective power inferred from microns-per-pixel (MPP).",
)
else:
logger.warning("Metadata: Unable to determine objective power.")
return WSIMeta(
file_path=self.input_path,
axes="YXS",
objective_power=objective_power,
slide_dimensions=slide_dimensions,
level_count=level_count,
level_dimensions=level_dimensions,
level_downsamples=level_downsamples,
vendor=vendor,
mpp=mpp,
raw=dict(**props),
)
class OmnyxJP2WSIReader(WSIReader):
"""Class for reading Omnyx JP2 images.
Supported WSI formats:
- Omnyx JPEG-2000 (.jp2)
Attributes:
glymur_wsi (:obj:`glymur.Jp2k`)
"""
def __init__(
self,
input_img: Union[str, pathlib.Path, np.ndarray],
mpp: Optional[Tuple[Number, Number]] = None,
power: Optional[Number] = None,
) -> None:
super().__init__(input_img=input_img, mpp=mpp, power=power)
import glymur
glymur.set_option("lib.num_threads", os.cpu_count() or 1)
self.glymur_wsi = glymur.Jp2k(filename=str(self.input_path))
def read_rect(
self,
location,
size,
resolution=0,
units="level",
interpolation="optimise",
pad_mode="constant",
pad_constant_values=0,
coord_space="baseline",
**kwargs,
):
"""Read a region of the whole slide image at a location and size.
Location is in terms of the baseline image (level 0 / maximum
resolution), and size is the output image size.
Reads can be performed at different resolutions by supplying a
pair of arguments for the resolution and the units of
resolution. If metadata does not specify `mpp` or
`objective_power` then `baseline` units should be selected with
resolution 1.0
The field of view varies with resolution. For a fixed field of
view see :func:`read_bounds`.
Args:
location (tuple(int)):
(x, y) tuple giving the top left pixel in the baseline
(level 0) reference frame.
size (tuple(int)):
(width, height) tuple giving the desired output image
size.
resolution (int or float or tuple(float)):
Resolution at which to read the image, default = 0.
Either a single number or a sequence of two numbers for
x and y are valid. This value is in terms of the
corresponding units. For example: resolution=0.5 and
units="mpp" will read the slide at 0.5 microns
per-pixel, and resolution=3, units="level" will read at
level at pyramid level / resolution layer 3.
units (str):
The units of resolution, default = "level". Supported
units are: microns per pixel (mpp), objective power
(power), pyramid / resolution level (level), pixels per
baseline pixel (baseline).
interpolation (str):
Method to use when resampling the output image. Possible
values are "linear", "cubic", "lanczos", "area", and
"optimise". Defaults to 'optimise' which will use cubic
interpolation for upscaling and area interpolation for
downscaling to avoid moiré patterns.
pad_mode (str):
Method to use when padding at the edges of the image.
Defaults to 'constant'. See :func:`numpy.pad` for
available modes.
pad_constant_values (int, tuple(int)):
Constant values to use when padding with constant pad mode.
Passed to the :func:`numpy.pad` `constant_values` argument.
Default is 0.
coord_space (str):
Defaults to "baseline". This is a flag to indicate if
the input `bounds` is in the baseline coordinate system
("baseline") or is in the requested resolution system
("resolution").
**kwargs (dict):
Extra key-word arguments for reader specific parameters.
Currently only used by VirtualWSIReader. See class
docstrings for more information.
Returns:
:class:`numpy.ndarray`:
Array of size MxNx3 M=size[0], N=size[1]
Example:
>>> from tiatoolbox.wsicore.wsireader import WSIReader
>>> # Load a WSI image
>>> wsi = WSIReader.open(input_img="./CMU-1.ndpi")
>>> location = (0, 0)
>>> size = (256, 256)
>>> # Read a region at level 0 (baseline / full resolution)
>>> img = wsi.read_rect(location, size)
>>> # Read a region at 0.5 microns per pixel (mpp)
>>> img = wsi.read_rect(location, size, 0.5, "mpp")
>>> # This could also be written more verbosely as follows
>>> img = wsi.read_rect(
... location,
... size,
... resolution=(0.5, 0.5),
... units="mpp",
... )
Note: The field of view varies with resolution when using
:func:`read_rect`.
.. figure:: ../images/read_rect_tissue.png
:width: 512
:alt: Diagram illustrating read_rect
As the location is in the baseline reference frame but the size
(width and height) is the output image size, the field of view
therefore changes as resolution changes.
If the WSI does not have a resolution layer corresponding
exactly to the requested resolution (shown above in white with a
dashed outline), a larger resolution is downscaled to achieve
the correct requested output resolution.
If the requested resolution is higher than the baseline (maximum
resultion of the image), then bicubic interpolation is applied
to the output image.
.. figure:: ../images/read_rect-interpolated-reads.png
:width: 512
:alt: Diagram illustrating read_rect interpolting between levels
When reading between the levels stored in the WSI, the
coordinates of the requested region are projected to the next
highest resolution. This resolution is then decoded and
downsampled to produce the desired output. This is a major
source of variability in the time take to perform a read
operation. Reads which require reading a large region before
downsampling will be significantly slower than reading at a
fixed level.
Examples:
>>> from tiatoolbox.wsicore.wsireader import WSIReader
>>> # Load a WSI image
>>> wsi = WSIReader.open(input_img="./CMU-1.ndpi")
>>> location = (0, 0)
>>> size = (256, 256)
>>> # The resolution can be different in x and y, e.g.
>>> img = wsi.read_rect(
... location,
... size,
... resolution=(0.5, 0.75),
... units="mpp",
... )
>>> # Several units can be used including: objective power,
>>> # microns per pixel, pyramid/resolution level, and
>>> # fraction of baseline.
>>> # E.g. Read a region at an objective power of 10x
>>> img = wsi.read_rect(
... location,
... size,
... resolution=10,
... units="power",
... )
>>> # Read a region at pyramid / resolution level 1
>>> img = wsi.read_rect(
... location,
... size,
... resolution=1,
... units="level",
... )
>>> # Read at a fractional level, this will linearly
>>> # interpolate the downsampling factor between levels.
>>> # E.g. if levels 0 and 1 have a downsampling of 1x and
>>> # 2x of baseline, then level 0.5 will correspond to a
>>> # downsampling factor 1.5x of baseline.
>>> img = wsi.read_rect(
... location,
... size,
... resolution=0.5,
... units="level",
... )
>>> # Read a region at half of the full / baseline
>>> # resolution.
>>> img = wsi.read_rect(
... location,
... size,
... resolution=0.5,
... units="baseline",
... )
>>> # Read at a higher resolution than the baseline
>>> # (interpolation applied to output)
>>> img = wsi.read_rect(
... location,
... size,
... resolution=1.25,
... units="baseline",
... )
>>> # Assuming the image has a native mpp of 0.5,
>>> # interpolation will be applied here.
>>> img = wsi.read_rect(
... location,
... size,
... resolution=0.25,
... units="mpp",
... )
"""
if coord_space == "resolution":
return self._read_rect_at_resolution(
location,
size,
resolution=resolution,
units=units,
interpolation=interpolation,
pad_mode=pad_mode,
pad_constant_values=pad_constant_values,
)
# Find parameters for optimal read
(
read_level,
_,
_,
post_read_scale,
baseline_read_size,
) = self.find_read_rect_params(
location=location,
size=size,
resolution=resolution,
units=units,
)
stride = 2**read_level
glymur_wsi = self.glymur_wsi
bounds = utils.transforms.locsize2bounds(
location=location, size=baseline_read_size
)
im_region = utils.image.safe_padded_read(
image=glymur_wsi,
bounds=bounds,
stride=stride,
pad_mode=pad_mode,
pad_constant_values=pad_constant_values,
)
im_region = utils.transforms.imresize(
img=im_region,
scale_factor=post_read_scale,
output_size=size,
interpolation=interpolation,
)
return utils.transforms.background_composite(image=im_region)
def read_bounds(
self,
bounds,
resolution=0,
units="level",
interpolation="optimise",
pad_mode="constant",
pad_constant_values=0,
coord_space="baseline",
**kwargs,
):
"""Read a region of the whole slide image within given bounds.
Bounds are in terms of the baseline image (level 0 / maximum
resolution).
Reads can be performed at different resolutions by supplying a
pair of arguments for the resolution and the units of
resolution. If metadata does not specify `mpp` or
`objective_power` then `baseline` units should be selected with
resolution 1.0
The output image size may be different to the width and height
of the bounds as the resolution will affect this. To read a
region with a fixed output image size see :func:`read_rect`.
Args:
bounds (tuple(int)):
By default, this is a tuple of (start_x, start_y, end_x,
end_y) i.e. (left, top, right, bottom) of the region in
baseline reference frame. However, with
`coord_space="resolution"`, the bound is expected to be
at the requested resolution system.
resolution (int or float or tuple(float)):
Resolution at which to read the image, default = 0.
Either a single number or a sequence of two numbers for
x and y are valid. This value is in terms of the
corresponding units. For example: resolution=0.5 and
units="mpp" will read the slide at 0.5 microns
per-pixel, and resolution=3, units="level" will read at
level at pyramid level / resolution layer 3.
units (str):
Units of resolution, default="level". Supported units
are: microns per pixel (mpp), objective power (power),
pyramid / resolution level (level), pixels per baseline
pixel (baseline).
interpolation (str):
Method to use when resampling the output image. Possible
values are "linear", "cubic", "lanczos", "area", and
"optimise". Defaults to 'optimise' which will use cubic
interpolation for upscaling and area interpolation for
downscaling to avoid moiré patterns.
pad_mode (str):
Method to use when padding at the edges of the image.
Defaults to 'constant'. See :func:`numpy.pad` for
available modes.
pad_constant_values (int, tuple(int)):
Constant values to use when padding with constant pad mode.
Passed to the :func:`numpy.pad` `constant_values` argument.
Default is 0.
coord_space (str):
Defaults to "baseline". This is a flag to indicate if
the input `bounds` is in the baseline coordinate system
("baseline") or is in the requested resolution system
("resolution").
**kwargs (dict):
Extra key-word arguments for reader specific parameters.
Currently only used by :obj:`VirtualWSIReader`. See
class docstrings for more information.
Returns:
:class:`numpy.ndarray`:
Array of size MxNx3 M=end_h-start_h, N=end_w-start_w
Examples:
>>> from tiatoolbox.wsicore.wsireader import WSIReader
>>> from matplotlib import pyplot as plt
>>> wsi = WSIReader.open(input_img="./CMU-1.ndpi")
>>> # Read a region at level 0 (baseline / full resolution)
>>> bounds = [1000, 2000, 2000, 3000]
>>> img = wsi.read_bounds(bounds)
>>> plt.imshow(img)
>>> # This could also be written more verbosely as follows
>>> img = wsi.read_bounds(
... bounds,
... resolution=0,
... units="level",
... )
>>> plt.imshow(img)
Note: The field of view remains the same as resolution is varied
when using :func:`read_bounds`.
.. figure:: ../images/read_bounds_tissue.png
:width: 512
:alt: Diagram illustrating read_bounds
This is because the bounds are in the baseline (level 0)
reference frame. Therefore, varying the resolution does not
change what is visible within the output image.
If the WSI does not have a resolution layer corresponding
exactly to the requested resolution (shown above in white with a
dashed outline), a larger resolution is downscaled to achieve
the correct requested output resolution.
If the requested resolution is higher than the baseline (maximum
resultion of the image), then bicubic interpolation is applied
to the output image.
"""
bounds_at_baseline = bounds
if coord_space == "resolution":
bounds_at_baseline = self._bounds_at_resolution_to_baseline(
bounds, resolution, units
)
_, size_at_requested = utils.transforms.bounds2locsize(bounds)
# don't use the `output_size` (`size_at_requested`) here
# because the rounding error at `bounds_at_baseline` leads to
# different `size_at_requested` (keeping same read resolution
# but base image is of different scale)
(
read_level,
_,
_,
post_read_scale,
) = self._find_read_bounds_params(
bounds_at_baseline, resolution=resolution, units=units
)
else: # duplicated portion with VirtualReader, factoring out ?
# Find parameters for optimal read
(
read_level,
_, # bounds_at_read_level,
size_at_requested,
post_read_scale,
) = self._find_read_bounds_params(
bounds_at_baseline, resolution=resolution, units=units
)
glymur_wsi = self.glymur_wsi
stride = 2**read_level
im_region = utils.image.safe_padded_read(
image=glymur_wsi,
bounds=bounds_at_baseline,
stride=stride,
pad_mode=pad_mode,
pad_constant_values=pad_constant_values,
)
# Resize to correct scale if required
if coord_space == "resolution":
im_region = utils.transforms.imresize(
img=im_region,
output_size=size_at_requested,
interpolation=interpolation,
)
else:
im_region = utils.transforms.imresize(
img=im_region,
scale_factor=post_read_scale,
output_size=size_at_requested,
interpolation=interpolation,
)
return utils.transforms.background_composite(image=im_region)
def _info(self):
"""JP2 metadata reader.
Returns:
WSIMeta:
Metadata information.
"""
import glymur
glymur_wsi = self.glymur_wsi
box = glymur_wsi.box
description = box[3].xml.find("description")
matches = re.search(r"(?<=AppMag = )\d\d", description.text)
objective_power = np.int_(matches[0])
image_header = box[2].box[0]
slide_dimensions = (image_header.width, image_header.height)
# Determine level_count
cod = None
for segment in glymur_wsi.codestream.segment:
if isinstance(segment, glymur.codestream.CODsegment):
cod = segment
if cod is None:
logger.warning(
"Metadata: JP2 codestream missing COD segment! "
"Cannot determine number of decompositions (levels)"
)
level_count = 1
else:
level_count = cod.num_res
level_downsamples = [2**n for n in range(level_count)]
level_dimensions = [
(int(slide_dimensions[0] / 2**n), int(slide_dimensions[1] / 2**n))
for n in range(level_count)
]
vendor = "Omnyx JP2"
matches = re.search(r"(?<=MPP = )\d*\.\d+", description.text)
mpp_x = float(matches[0])
mpp_y = float(matches[0])
mpp = [mpp_x, mpp_y]
return WSIMeta(
file_path=self.input_path,
axes="YXS",
objective_power=objective_power,
slide_dimensions=slide_dimensions,
level_count=level_count,
level_dimensions=level_dimensions,
level_downsamples=level_downsamples,
vendor=vendor,
mpp=mpp,
raw=self.glymur_wsi.box,
)
class VirtualWSIReader(WSIReader):
"""Class for reading non-pyramidal images e.g., visual fields.
Supported formats:
- .jpg
- .png
- :class:`numpy.ndarray`
This reader uses :func:`tiatoolbox.utils.image.sub_pixel_read` to
allow reading low resolution images as if they are larger i.e. with
'virtual' pyramid resolutions. This is useful for reading low
resolution masks as if they were stretched to overlay a higher
resolution WSI.
Extra key-word arguments given to :func:`~WSIReader.read_region` and
:func:`~WSIReader.read_bounds` will be passed to
:func:`~tiatoolbox.utils.image.sub_pixel_read`.
Attributes:
img (:class:`numpy.ndarray`)
mode (str)
Args:
input_img (str, :obj:`pathlib.Path`, :class:`numpy.ndarray`):
Input path to WSI.
info (WSIMeta):
Metadata for the virtual wsi.
mode (str):
Mode of the input image. Default is 'rgb'. Allowed values
are: rgb, bool.
"""
def __init__(
self,
input_img: Union[str, pathlib.Path, np.ndarray],
mpp: Optional[Tuple[Number, Number]] = None,
power: Optional[Number] = None,
info: WSIMeta = None,
mode="rgb",
) -> None:
super().__init__(
input_img=input_img,
mpp=mpp,
power=power,
)
if mode.lower() not in ["rgb", "bool"]:
raise ValueError("Invalid mode.")
self.mode = mode.lower()
if isinstance(input_img, np.ndarray):
self.img = input_img
else:
self.img = utils.misc.imread(self.input_path)
if info is not None:
self._m_info = info
def _info(self):
"""Visual Field metadata getter.
This generates a WSIMeta object for the slide if none exists.
There is 1 level with dimensions equal to the image and no mpp,
objective power, or vendor data.
Returns:
WSIMeta:
Metadata information.
"""
param = WSIMeta(
file_path=self.input_path,
axes="YSX",
objective_power=None,
# align to XY to match with OpenSlide
slide_dimensions=self.img.shape[:2][::-1],
level_count=1,
level_dimensions=(self.img.shape[:2][::-1],),
level_downsamples=[1.0],
vendor=None,
mpp=None,
raw=None,
)
if self._m_info is None:
self._m_info = param
return self._m_info
def _find_params_from_baseline(self, location, baseline_read_size):
"""Convert read parameters from (virtual) baseline coordinates.
Args:
location (tuple(int)):
Location of the location to read in (virtual) baseline
coordinates.
baseline_read_size (tuple(int)):
Size of the region to read in (virtual) baseline
coordinates.
"""
baseline_size = np.array(self.info.slide_dimensions)
image_size = np.array(self.img.shape[:2][::-1])
size_ratio = image_size / baseline_size
image_location = np.array(location, dtype=np.float32) * size_ratio
read_size = np.array(baseline_read_size) * size_ratio
return image_location, read_size
def read_rect(
self,
location,
size,
resolution=0,
units="level",
interpolation="optimise",
pad_mode="constant",
pad_constant_values=0,
coord_space="baseline",
**kwargs,
):
"""Read a region of the whole slide image at a location and size.
Location is in terms of the baseline image (level 0 / maximum
resolution), and size is the output image size.
Reads can be performed at different resolutions by supplying a
pair of arguments for the resolution and the units of
resolution. If metadata does not specify `mpp` or
`objective_power` then `baseline` units should be selected with
resolution 1.0
The field of view varies with resolution. For a fixed field of
view see :func:`read_bounds`.
Args:
location (tuple(int)):
(x, y) tuple giving the top left pixel in the baseline
(level 0) reference frame.
size (tuple(int)):
(width, height) tuple giving the desired output image
size.
resolution (int or float or tuple(float)):
Resolution at which to read the image, default = 0.
Either a single number or a sequence of two numbers for
x and y are valid. This value is in terms of the
corresponding units. For example: resolution=0.5 and
units="mpp" will read the slide at 0.5 microns
per-pixel, and resolution=3, units="level" will read at
level at pyramid level / resolution layer 3.
units (str):
The units of resolution, default = "level". Supported
units are: microns per pixel (mpp), objective power
(power), pyramid / resolution level (level), pixels per
baseline pixel (baseline).
interpolation (str):
Method to use when resampling the output image. Possible
values are "linear", "cubic", "lanczos", "area", and
"optimise". Defaults to 'optimise' which will use cubic
interpolation for upscaling and area interpolation for
downscaling to avoid moiré patterns.
pad_mode (str):
Method to use when padding at the edges of the image.
Defaults to 'constant'. See :func:`numpy.pad` for
available modes.
pad_constant_values (int, tuple(int)):
Constant values to use when padding with constant pad mode.
Passed to the :func:`numpy.pad` `constant_values` argument.
Default is 0.
coord_space (str):
Defaults to "baseline". This is a flag to indicate if
the input `bounds` is in the baseline coordinate system
("baseline") or is in the requested resolution system
("resolution").
**kwargs (dict):
Extra key-word arguments for reader specific parameters.
Currently, only used by VirtualWSIReader. See class
docstrings for more information.
Returns:
:class:`numpy.ndarray`:
Array of size MxNx3 M=size[0], N=size[1]
Example:
>>> from tiatoolbox.wsicore.wsireader import WSIReader
>>> # Load a WSI image
>>> wsi = WSIReader.open(input_img="./CMU-1.ndpi")
>>> location = (0, 0)
>>> size = (256, 256)
>>> # Read a region at level 0 (baseline / full resolution)
>>> img = wsi.read_rect(location, size)
>>> # Read a region at 0.5 microns per pixel (mpp)
>>> img = wsi.read_rect(location, size, 0.5, "mpp")
>>> # This could also be written more verbosely as follows
>>> img = wsi.read_rect(
... location,
... size,
... resolution=(0.5, 0.5),
... units="mpp",
... )
Note: The field of view varies with resolution when using
:func:`read_rect`.
.. figure:: ../images/read_rect_tissue.png
:width: 512
:alt: Diagram illustrating read_rect
As the location is in the baseline reference frame but the size
(width and height) is the output image size, the field of view
therefore changes as resolution changes.
If the WSI does not have a resolution layer corresponding
exactly to the requested resolution (shown above in white with a
dashed outline), a larger resolution is downscaled to achieve
the correct requested output resolution.
If the requested resolution is higher than the baseline (maximum
resultion of the image), then bicubic interpolation is applied
to the output image.
.. figure:: ../images/read_rect-interpolated-reads.png
:width: 512
:alt: Diagram illustrating read_rect interpolting between levels
When reading between the levels stored in the WSI, the
coordinates of the requested region are projected to the next
highest resolution. This resolution is then decoded and
downsampled to produce the desired output. This is a major
source of variability in the time take to perform a read
operation. Reads which require reading a large region before
downsampling will be significantly slower than reading at a
fixed level.
Examples:
>>> from tiatoolbox.wsicore.wsireader import WSIReader
>>> # Load a WSI image
>>> wsi = WSIReader.open(input_img="./CMU-1.ndpi")
>>> location = (0, 0)
>>> size = (256, 256)
>>> # The resolution can be different in x and y, e.g.
>>> img = wsi.read_rect(
... location,
... size,
... resolution=(0.5, 0.75),
... units="mpp",
... )
>>> # Several units can be used including: objective power,
>>> # microns per pixel, pyramid/resolution level, and
>>> # fraction of baseline.
>>> # E.g. Read a region at an objective power of 10x
>>> img = wsi.read_rect(
... location,
... size,
... resolution=10,
... units="power",
... )
>>> # Read a region at pyramid / resolution level 1
>>> img = wsi.read_rect(
... location,
... size,
... resolution=1,
... units="level",
... )
>>> # Read at a fractional level, this will linearly
>>> # interpolate the downsampling factor between levels.
>>> # E.g. if levels 0 and 1 have a downsampling of 1x and
>>> # 2x of baseline, then level 0.5 will correspond to a
>>> # downsampling factor 1.5x of baseline.
>>> img = wsi.read_rect(
... location,
... size,
... resolution=0.5,
... units="level",
... )
>>> # Read a region at half of the full / baseline
>>> # resolution.
>>> img = wsi.read_rect(
... location,
... size,
... resolution=0.5,
... units="baseline",
... )
>>> # Read at a higher resolution than the baseline
>>> # (interpolation applied to output)
>>> img = wsi.read_rect(
... location,
... size,
... resolution=1.25,
... units="baseline",
... )
>>> # Assuming the image has a native mpp of 0.5,
>>> # interpolation will be applied here.
>>> img = wsi.read_rect(
... location,
... size,
... resolution=0.25,
... units="mpp",
... )
"""
if coord_space == "resolution":
return self._read_rect_at_resolution(
location,
size,
resolution=resolution,
units=units,
interpolation=interpolation,
pad_mode=pad_mode,
pad_constant_values=pad_constant_values,
)
# Find parameters for optimal read
(
_,
_,
_,
_,
baseline_read_size,
) = self.find_read_rect_params(
location=location,
size=size,
resolution=resolution,
units=units,
)
image_location, image_read_size = self._find_params_from_baseline(
location, baseline_read_size
)
bounds = utils.transforms.locsize2bounds(
location=image_location,
size=image_read_size,
)
output_size = None if interpolation in [None, "none"] else size
im_region = utils.image.sub_pixel_read(
self.img,
bounds,
output_size=output_size,
interpolation=interpolation,
pad_mode=pad_mode,
pad_constant_values=pad_constant_values,
read_kwargs=kwargs,
)
if self.mode == "rgb":
return utils.transforms.background_composite(image=im_region)
return im_region
def read_bounds(
self,
bounds,
resolution=0,
units="level",
interpolation="optimise",
pad_mode="constant",
pad_constant_values=0,
coord_space="baseline",
**kwargs,
):
"""Read a region of the whole slide image within given bounds.
Bounds are in terms of the baseline image (level 0 / maximum
resolution).
Reads can be performed at different resolutions by supplying a
pair of arguments for the resolution and the units of
resolution. If metadata does not specify `mpp` or
`objective_power` then `baseline` units should be selected with
resolution 1.0
The output image size may be different to the width and height
of the bounds as the resolution will affect this. To read a
region with a fixed output image size see :func:`read_rect`.
Args:
bounds (tuple(int)):
By default, this is a tuple of (start_x, start_y, end_x,
end_y) i.e. (left, top, right, bottom) of the region in
baseline reference frame. However, with
`coord_space="resolution"`, the bound is expected to be
at the requested resolution system.
resolution (int or float or tuple(float)):
Resolution at which to read the image, default = 0.
Either a single number or a sequence of two numbers for
x and y are valid. This value is in terms of the
corresponding units. For example: resolution=0.5 and
units="mpp" will read the slide at 0.5 microns
per-pixel, and resolution=3, units="level" will read at
level at pyramid level / resolution layer 3.
units (str):
Units of resolution, default="level". Supported units
are: microns per pixel (mpp), objective power (power),
pyramid / resolution level (level), pixels per baseline
pixel (baseline).
interpolation (str):
Method to use when resampling the output image. Possible
values are "linear", "cubic", "lanczos", "area", and
"optimise". Defaults to 'optimise' which will use cubic
interpolation for upscaling and area interpolation for
downscaling to avoid moiré patterns.
pad_mode (str):
Method to use when padding at the edges of the image.
Defaults to 'constant'. See :func:`numpy.pad` for
available modes.
pad_constant_values (int, tuple(int)):
Constant values to use when padding with constant pad mode.
Passed to the :func:`numpy.pad` `constant_values` argument.
Default is 0.
coord_space (str):
Defaults to "baseline". This is a flag to indicate if
the input `bounds` is in the baseline coordinate system
("baseline") or is in the requested resolution system
("resolution").
**kwargs (dict):
Extra key-word arguments for reader specific parameters.
Currently only used by :obj:`VirtualWSIReader`. See
class docstrings for more information.
Returns:
:class:`numpy.ndarray`:
Array of size MxNx3 M=end_h-start_h, N=end_w-start_w
Examples:
>>> from tiatoolbox.wsicore.wsireader import WSIReader
>>> from matplotlib import pyplot as plt
>>> wsi = WSIReader.open(input_img="./CMU-1.ndpi")
>>> # Read a region at level 0 (baseline / full resolution)
>>> bounds = [1000, 2000, 2000, 3000]
>>> img = wsi.read_bounds(bounds)
>>> plt.imshow(img)
>>> # This could also be written more verbosely as follows
>>> img = wsi.read_bounds(
... bounds,
... resolution=0,
... units="level",
... )
>>> plt.imshow(img)
Note: The field of view remains the same as resolution is varied
when using :func:`read_bounds`.
.. figure:: ../images/read_bounds_tissue.png
:width: 512
:alt: Diagram illustrating read_bounds
This is because the bounds are in the baseline (level 0)
reference frame. Therefore, varying the resolution does not
change what is visible within the output image.
If the WSI does not have a resolution layer corresponding
exactly to the requested resolution (shown above in white with a
dashed outline), a larger resolution is downscaled to achieve
the correct requested output resolution.
If the requested resolution is higher than the baseline (maximum
resultion of the image), then bicubic interpolation is applied
to the output image.
"""
# convert from requested to `baseline`
bounds_at_baseline = bounds
if coord_space == "resolution":
bounds_at_baseline = self._bounds_at_resolution_to_baseline(
bounds, resolution, units
)
_, size_at_requested = utils.transforms.bounds2locsize(bounds)
# * Find parameters for optimal read
# don't use the `output_size` (`size_at_requested`) here
# because the rounding error at `bounds_at_baseline` leads to
# different `size_at_requested` (keeping same read resolution
# but base image is of different scale)
_, _, _, post_read_scale = self._find_read_bounds_params(
bounds_at_baseline,
resolution=resolution,
units=units,
)
else:
# * Find parameters for optimal read
_, _, size_at_requested, post_read_scale = self._find_read_bounds_params(
bounds_at_baseline,
resolution=resolution,
units=units,
)
location_at_read, size_at_read = self._find_params_from_baseline(
*utils.transforms.bounds2locsize(bounds_at_baseline)
)
bounds_at_read = utils.transforms.locsize2bounds(location_at_read, size_at_read)
if interpolation in [None, "none"]:
interpolation = None
im_region = utils.image.sub_pixel_read(
self.img,
bounds_at_read,
output_size=size_at_requested,
interpolation=interpolation,
pad_mode=pad_mode,
pad_constant_values=pad_constant_values,
read_kwargs=kwargs,
)
if coord_space == "resolution":
# do this to enforce output size is as defined by input bounds
im_region = utils.transforms.imresize(
img=im_region, output_size=size_at_requested
)
else:
im_region = utils.transforms.imresize(
img=im_region,
scale_factor=post_read_scale,
output_size=size_at_requested,
)
if self.mode == "rgb":
return utils.transforms.background_composite(image=im_region)
return im_region
class ArrayView:
"""An object for viewing a zarr array with a different index ordering.
Used to allow YXS index order reads for arrays with axes in other
orders such as SYX. Currently supported axes are:
- YXS
- SYX
"""
def __init__(self, array: zarr.Array, axes: str) -> None:
"""Initialise the view object.
Args:
array (zarr.Array):
Zarr Array to read from.
axes (str):
Axes ordering string. Allowed values are YXS and SYX.
"""
self.array = array
self.axes = axes
self._shape = dict(zip(self.axes, self.array.shape))
@property
def shape(self):
try:
return tuple(self._shape[c] for c in "YXC")
except KeyError:
return tuple(self._shape[c] for c in "YXS")
def __getitem__(self, index):
# Normalize to a tuple of length = len(self.axes)
if not isinstance(index, tuple):
index = (index,)
while len(index) < len(self.axes):
index = (*index, slice(None))
if self.axes in ("YXS", "YXC"):
return self.array[index]
if self.axes in ("SYX", "CYX"):
y, x, s = index
index = (s, y, x)
return np.rollaxis(self.array[index], 0, 3)
raise ValueError(f"Unsupported axes `{self.axes}`.")
class TIFFWSIReader(WSIReader):
"""Define Tiff WSI Reader."""
def __init__(
self,
input_img: Union[str, pathlib.Path, np.ndarray],
mpp: Optional[Tuple[Number, Number]] = None,
power: Optional[Number] = None,
series="auto",
cache_size=2**28,
) -> None:
super().__init__(input_img=input_img, mpp=mpp, power=power)
self.tiff = tifffile.TiffFile(self.input_path)
self._axes = self.tiff.pages[0].axes
# Flag which is True if the image is a simple single page tile TIFF
is_single_page_tiled = all(
[
self.tiff.pages[0].is_tiled,
# Not currently supporting multi-page images
not self.tiff.is_multipage,
# Currently only supporting single page generic tiled TIFF
len(self.tiff.pages) == 1,
]
)
if not any([self.tiff.is_svs, self.tiff.is_ome, is_single_page_tiled]):
raise ValueError("Unsupported TIFF WSI format.")
self.series_n = series
if self.tiff.series is None or len(self.tiff.series) == 0: # pragma: no cover
raise Exception("TIFF does not contain any valid series.")
# Find the largest series if series="auto"
if self.series_n == "auto":
all_series = self.tiff.series or []
def page_area(page: tifffile.TiffPage) -> float:
"""Calculate the area of a page."""
return np.prod(self._canonical_shape(page.shape)[:2])
series_areas = [page_area(s.pages[0]) for s in all_series] # skipcq
self.series_n = np.argmax(series_areas)
self._tiff_series = self.tiff.series[self.series_n]
self._zarr_store = tifffile.imread(
self.input_path, series=self.series_n, aszarr=True
)
self._zarr_lru_cache = zarr.LRUStoreCache(self._zarr_store, max_size=cache_size)
self._zarr_group = zarr.open(self._zarr_lru_cache)
if not isinstance(self._zarr_group, zarr.hierarchy.Group):
group = zarr.hierarchy.group()
group[0] = self._zarr_group
self._zarr_group = group
self.level_arrays = {
int(key): ArrayView(array, axes=self.info.axes)
for key, array in self._zarr_group.items()
}
def _canonical_shape(self, shape):
"""Make a level shape tuple in YXS order.
Args:
shape (tuple(int)):
Input shape tuple.
Returns:
tuple:
Shape in YXS order.
"""
if self._axes == "YXS":
return shape
if self._axes == "SYX":
return np.roll(shape, -1)
raise ValueError(f"Unsupported axes `{self._axes}`.")
def _parse_svs_metadata(self) -> dict:
"""Extract SVS specific metadata.
Returns:
dict:
Dictionary of kwargs for WSIMeta.
"""
raw = {}
mpp = None
objective_power = None
vendor = "Aperio"
description = self.tiff.pages[0].description
raw["Description"] = description
parts = description.split("|")
description_headers, key_value_pairs = parts[0], parts[1:]
description_headers = description_headers.split(";")
software, photometric_info = description_headers[0].splitlines()
raw["Software"] = software
raw["Photometric Info"] = photometric_info
def parse_svs_tag(string: str) -> Tuple[str, Union[Number, str]]:
"""Parse SVS key-value string.
Infers type(s) of data by trial and error with a fallback to
the original string type.
Args:
string (str):
Key-value string in SVS format: "key=value".
Returns:
tuple:
Key-value pair.
"""
pair = string.split("=")
if len(pair) != 2:
raise ValueError(
"Invalid metadata. Expected string of the format 'key=value'."
)
key, value_string = pair
key = key.strip()
value_string = value_string.strip()
value = value_string.strip()
def us_date(string: str) -> datetime:
"""Returns datetime parsed according to US date format."""
return datetime.strptime(string, r"%m/%d/%y")
def time(string: str) -> datetime:
"""Returns datetime parsed according to HMS format."""
return datetime.strptime(string, r"%H:%M:%S")
casting_precedence = [us_date, time, int, float]
value = value_string
for cast in casting_precedence:
try:
value = cast(value_string)
return key, value
except ValueError:
continue
return key, value
svs_tags = dict(parse_svs_tag(string) for string in key_value_pairs)
raw["SVS Tags"] = svs_tags
mpp = svs_tags.get("MPP")
if mpp is not None:
mpp = [mpp] * 2
objective_power = svs_tags.get("AppMag")
return {
"objective_power": objective_power,
"vendor": vendor,
"mpp": mpp,
"raw": raw,
}
def _get_ome_xml(self) -> ElementTree.Element:
"""Parse OME-XML from the description of the first IFD (page).
Returns:
ElementTree.Element:
OME-XML root element.
"""
description = self.tiff.pages[0].description
return ElementTree.fromstring(description)
def _parse_ome_metadata(self) -> dict:
"""Extract OME specific metadata.
Returns:
dict:
Dictionary of kwargs for WSIMeta.
"""
# The OME-XML should be in each IFD but is optional. It must be
# present in the first IFD. We simply get the description from
# the first IFD.
xml = self._get_ome_xml()
objective_power = self._get_ome_objective_power(xml)
mpp = self._get_ome_mpp(xml)
return {
"objective_power": objective_power,
"vendor": None,
"mpp": mpp,
"raw": {
"Description": self.tiff.pages[0].description,
"OME-XML": xml,
},
}
def _get_ome_objective_power(
self, xml: Optional[ElementTree.Element] = None
) -> Optional[float]:
"""Get the objective power from the OME-XML.
Args:
xml (ElementTree.Element, optional):
OME-XML root element. Defaults to None. If None, the
OME-XML will be parsed from the first IFD.
Returns:
float:
Objective power.
"""
xml = xml or self._get_ome_xml()
namespaces = {"ome": "http://www.openmicroscopy.org/Schemas/OME/2016-06"}
xml_series = xml.findall("ome:Image", namespaces)[self.series_n]
instrument_ref = xml_series.find("ome:InstrumentRef", namespaces)
if instrument_ref is None:
return None
objective_settings = xml_series.find("ome:ObjectiveSettings", namespaces)
instrument_ref_id = instrument_ref.attrib["ID"]
objective_settings_id = objective_settings.attrib["ID"]
instruments = {
instrument.attrib["ID"]: instrument
for instrument in xml.findall("ome:Instrument", namespaces)
}
objectives = {
(instrument_id, objective.attrib["ID"]): objective
for instrument_id, instrument in instruments.items()
for objective in instrument.findall("ome:Objective", namespaces)
}
try:
objective = objectives[(instrument_ref_id, objective_settings_id)]
return float(objective.attrib.get("NominalMagnification"))
except KeyError as e:
raise KeyError(
"No matching Instrument for image InstrumentRef in OME-XML."
) from e
def _get_ome_mpp(
self, xml: Optional[ElementTree.Element] = None
) -> Optional[List[float]]:
"""Get the microns per pixel from the OME-XML.
Args:
xml (ElementTree.Element, optional):
OME-XML root element. Defaults to None. If None, the
OME-XML will be parsed from the first IFD.
Returns:
Optional[List[float]]:
Microns per pixel.
"""
xml = xml or self._get_ome_xml()
namespaces = {"ome": "http://www.openmicroscopy.org/Schemas/OME/2016-06"}
xml_series = xml.findall("ome:Image", namespaces)[self.series_n]
xml_pixels = xml_series.find("ome:Pixels", namespaces)
mppx = xml_pixels.attrib.get("PhysicalSizeX")
mppy = xml_pixels.attrib.get("PhysicalSizeY")
if mppx is not None and mppy is not None:
return [mppx, mppy]
if mppx is not None or mppy is not None:
logger.warning("Only one MPP value found. Using it for both X and Y.")
return [mppx or mppy] * 2
return None
def _parse_generic_tiff_metadata(self) -> dict:
"""Extract generic tiled metadata.
Returns:
dict: Dictionary of kwargs for WSIMeta.
"""
mpp = None
objective_power = None
vendor = "Generic"
description = self.tiff.pages[0].description
raw = {"Description": description}
# Check for MPP in the tiff resolution tags
# res_units: 1 = undefined, 2 = inch, 3 = centimeter
res_units = self.tiff.pages[0].tags.get("ResolutionUnit")
res_x = self.tiff.pages[0].tags.get("XResolution")
res_y = self.tiff.pages[0].tags.get("YResolution")
if (
all(x is not None for x in [res_units, res_x, res_y])
and res_units.value != 1
):
mpp = [
utils.misc.ppu2mpp(res_x.value[0] / res_x.value[1], res_units.value),
utils.misc.ppu2mpp(res_y.value[0] / res_y.value[1], res_units.value),
]
return {
"objective_power": objective_power,
"vendor": vendor,
"mpp": mpp,
"raw": raw,
}
def _info(self):
"""TIFF metadata constructor.
Returns:
WSIMeta:
Containing metadata.
"""
level_count = len(self._zarr_group)
level_dimensions = [
np.array(self._canonical_shape(p.shape)[:2][::-1])
for p in self._zarr_group.values()
]
slide_dimensions = level_dimensions[0]
level_downsamples = [(level_dimensions[0] / x)[0] for x in level_dimensions]
# The tags attribute object will not pickle or deepcopy,
# so a copy with only python values or tifffile enums is made.
tifffile_tags = self.tiff.pages[0].tags.items()
tiff_tags = {
code: {
"code": code,
"value": tag.value,
"name": tag.name,
"count": tag.count,
"type": tag.dtype,
}
for code, tag in tifffile_tags
}
if self.tiff.is_svs:
filetype_params = self._parse_svs_metadata()
elif self.tiff.is_ome:
filetype_params = self._parse_ome_metadata()
else:
filetype_params = self._parse_generic_tiff_metadata()
filetype_params["raw"]["TIFF Tags"] = tiff_tags
return WSIMeta(
file_path=self.input_path,
slide_dimensions=slide_dimensions,
axes=self._axes,
level_count=level_count,
level_dimensions=level_dimensions,
level_downsamples=level_downsamples,
**filetype_params,
)
def read_rect(
self,
location,
size,
resolution=0,
units="level",
interpolation="optimise",
pad_mode="constant",
pad_constant_values=0,
coord_space="baseline",
**kwargs,
):
"""Read a region of the whole slide image at a location and size.
Location is in terms of the baseline image (level 0 / maximum
resolution), and size is the output image size.
Reads can be performed at different resolutions by supplying a
pair of arguments for the resolution and the units of
resolution. If metadata does not specify `mpp` or
`objective_power` then `baseline` units should be selected with
resolution 1.0
The field of view varies with resolution. For a fixed field of
view see :func:`read_bounds`.
Args:
location (tuple(int)):
(x, y) tuple giving the top left pixel in the baseline
(level 0) reference frame.
size (tuple(int)):
(width, height) tuple giving the desired output image
size.
resolution (int or float or tuple(float)):
Resolution at which to read the image, default = 0.
Either a single number or a sequence of two numbers for
x and y are valid. This value is in terms of the
corresponding units. For example: resolution=0.5 and
units="mpp" will read the slide at 0.5 microns
per-pixel, and resolution=3, units="level" will read at
level at pyramid level / resolution layer 3.
units (str):
The units of resolution, default = "level". Supported
units are: microns per pixel (mpp), objective power
(power), pyramid / resolution level (level), pixels per
baseline pixel (baseline).
interpolation (str):
Method to use when resampling the output image. Possible
values are "linear", "cubic", "lanczos", "area", and
"optimise". Defaults to 'optimise' which will use cubic
interpolation for upscaling and area interpolation for
downscaling to avoid moiré patterns.
pad_mode (str):
Method to use when padding at the edges of the image.
Defaults to 'constant'. See :func:`numpy.pad` for
available modes.
pad_constant_values (int, tuple(int)):
Constant values to use when padding with constant pad mode.
Passed to the :func:`numpy.pad` `constant_values` argument.
Default is 0.
coord_space (str):
Defaults to "baseline". This is a flag to indicate if
the input `bounds` is in the baseline coordinate system
("baseline") or is in the requested resolution system
("resolution").
**kwargs (dict):
Extra key-word arguments for reader specific parameters.
Currently only used by VirtualWSIReader. See class
docstrings for more information.
Returns:
:class:`numpy.ndarray`:
Array of size MxNx3 M=size[0], N=size[1]
Example:
>>> from tiatoolbox.wsicore.wsireader import WSIReader
>>> # Load a WSI image
>>> wsi = WSIReader.open(input_img="./CMU-1.ndpi")
>>> location = (0, 0)
>>> size = (256, 256)
>>> # Read a region at level 0 (baseline / full resolution)
>>> img = wsi.read_rect(location, size)
>>> # Read a region at 0.5 microns per pixel (mpp)
>>> img = wsi.read_rect(location, size, 0.5, "mpp")
>>> # This could also be written more verbosely as follows
>>> img = wsi.read_rect(
... location,
... size,
... resolution=(0.5, 0.5),
... units="mpp",
... )
Note: The field of view varies with resolution when using
:func:`read_rect`.
.. figure:: ../images/read_rect_tissue.png
:width: 512
:alt: Diagram illustrating read_rect
As the location is in the baseline reference frame but the size
(width and height) is the output image size, the field of view
therefore changes as resolution changes.
If the WSI does not have a resolution layer corresponding
exactly to the requested resolution (shown above in white with a
dashed outline), a larger resolution is downscaled to achieve
the correct requested output resolution.
If the requested resolution is higher than the baseline (maximum
resultion of the image), then bicubic interpolation is applied
to the output image.
.. figure:: ../images/read_rect-interpolated-reads.png
:width: 512
:alt: Diagram illustrating read_rect interpolting between levels
When reading between the levels stored in the WSI, the
coordinates of the requested region are projected to the next
highest resolution. This resolution is then decoded and
downsampled to produce the desired output. This is a major
source of variability in the time take to perform a read
operation. Reads which require reading a large region before
downsampling will be significantly slower than reading at a
fixed level.
Examples:
>>> from tiatoolbox.wsicore.wsireader import WSIReader
>>> # Load a WSI image
>>> wsi = WSIReader.open(input_img="./CMU-1.ndpi")
>>> location = (0, 0)
>>> size = (256, 256)
>>> # The resolution can be different in x and y, e.g.
>>> img = wsi.read_rect(
... location,
... size,
... resolution=(0.5, 0.75),
... units="mpp",
... )
>>> # Several units can be used including: objective power,
>>> # microns per pixel, pyramid/resolution level, and
>>> # fraction of baseline.
>>> # E.g. Read a region at an objective power of 10x
>>> img = wsi.read_rect(
... location,
... size,
... resolution=10,
... units="power",
... )
>>> # Read a region at pyramid / resolution level 1
>>> img = wsi.read_rect(
... location,
... size,
... resolution=1,
... units="level",
... )
>>> # Read at a fractional level, this will linearly
>>> # interpolate the downsampling factor between levels.
>>> # E.g. if levels 0 and 1 have a downsampling of 1x and
>>> # 2x of baseline, then level 0.5 will correspond to a
>>> # downsampling factor 1.5x of baseline.
>>> img = wsi.read_rect(
... location,
... size,
... resolution=0.5,
... units="level",
... )
>>> # Read a region at half of the full / baseline
>>> # resolution.
>>> img = wsi.read_rect(
... location,
... size,
... resolution=0.5,
... units="baseline",
... )
>>> # Read at a higher resolution than the baseline
>>> # (interpolation applied to output)
>>> img = wsi.read_rect(
... location,
... size,
... resolution=1.25,
... units="baseline",
... )
>>> # Assuming the image has a native mpp of 0.5,
>>> # interpolation will be applied here.
>>> img = wsi.read_rect(
... location,
... size,
... resolution=0.25,
... units="mpp",
... )
"""
if coord_space == "resolution":
im_region = self._read_rect_at_resolution(
location,
size,
resolution=resolution,
units=units,
interpolation=interpolation,
pad_mode=pad_mode,
pad_constant_values=pad_constant_values,
)
return utils.transforms.background_composite(im_region)
# Find parameters for optimal read
(
read_level,
_,
_,
post_read_scale,
baseline_read_size,
) = self.find_read_rect_params(
location=location,
size=size,
resolution=resolution,
units=units,
)
bounds = utils.transforms.locsize2bounds(
location=location, size=baseline_read_size
)
im_region = utils.image.safe_padded_read(
image=self.level_arrays[read_level],
bounds=bounds,
pad_mode=pad_mode,
pad_constant_values=pad_constant_values,
)
im_region = utils.transforms.imresize(
img=im_region,
scale_factor=post_read_scale,
output_size=size,
interpolation=interpolation,
)
return utils.transforms.background_composite(image=im_region)
def read_bounds(
self,
bounds,
resolution=0,
units="level",
interpolation="optimise",
pad_mode="constant",
pad_constant_values=0,
coord_space="baseline",
**kwargs,
):
"""Read a region of the whole slide image within given bounds.
Bounds are in terms of the baseline image (level 0 / maximum
resolution).
Reads can be performed at different resolutions by supplying a
pair of arguments for the resolution and the units of
resolution. If metadata does not specify `mpp` or
`objective_power` then `baseline` units should be selected with
resolution 1.0
The output image size may be different to the width and height
of the bounds as the resolution will affect this. To read a
region with a fixed output image size see :func:`read_rect`.
Args:
bounds (tuple(int)):
By default, this is a tuple of (start_x, start_y, end_x,
end_y) i.e. (left, top, right, bottom) of the region in
baseline reference frame. However, with
`coord_space="resolution"`, the bound is expected to be
at the requested resolution system.
resolution (int or float or tuple(float)):
Resolution at which to read the image, default = 0.
Either a single number or a sequence of two numbers for
x and y are valid. This value is in terms of the
corresponding units. For example: resolution=0.5 and
units="mpp" will read the slide at 0.5 microns
per-pixel, and resolution=3, units="level" will read at
level at pyramid level / resolution layer 3.
units (str):
Units of resolution, default="level". Supported units
are: microns per pixel (mpp), objective power (power),
pyramid / resolution level (level), pixels per baseline
pixel (baseline).
interpolation (str):
Method to use when resampling the output image. Possible
values are "linear", "cubic", "lanczos", "area", and
"optimise". Defaults to 'optimise' which will use cubic
interpolation for upscaling and area interpolation for
downscaling to avoid moiré patterns.
pad_mode (str):
Method to use when padding at the edges of the image.
Defaults to 'constant'. See :func:`numpy.pad` for
available modes.
pad_constant_values (int, tuple(int)):
Constant values to use when padding with constant pad mode.
Passed to the :func:`numpy.pad` `constant_values` argument.
Default is 0.
coord_space (str):
Defaults to "baseline". This is a flag to indicate if
the input `bounds` is in the baseline coordinate system
("baseline") or is in the requested resolution system
("resolution").
**kwargs (dict):
Extra key-word arguments for reader specific parameters.
Currently only used by :obj:`VirtualWSIReader`. See
class docstrings for more information.
Returns:
:class:`numpy.ndarray`:
Array of size MxNx3 M=end_h-start_h, N=end_w-start_w
Examples:
>>> from tiatoolbox.wsicore.wsireader import WSIReader
>>> from matplotlib import pyplot as plt
>>> wsi = WSIReader.open(input_img="./CMU-1.ndpi")
>>> # Read a region at level 0 (baseline / full resolution)
>>> bounds = [1000, 2000, 2000, 3000]
>>> img = wsi.read_bounds(bounds)
>>> plt.imshow(img)
>>> # This could also be written more verbosely as follows
>>> img = wsi.read_bounds(
... bounds,
... resolution=0,
... units="level",
... )
>>> plt.imshow(img)
Note: The field of view remains the same as resolution is varied
when using :func:`read_bounds`.
.. figure:: ../images/read_bounds_tissue.png
:width: 512
:alt: Diagram illustrating read_bounds
This is because the bounds are in the baseline (level 0)
reference frame. Therefore, varying the resolution does not
change what is visible within the output image.
If the WSI does not have a resolution layer corresponding
exactly to the requested resolution (shown above in white with a
dashed outline), a larger resolution is downscaled to achieve
the correct requested output resolution.
If the requested resolution is higher than the baseline (maximum
resultion of the image), then bicubic interpolation is applied
to the output image.
"""
bounds_at_baseline = bounds
if coord_space == "resolution":
bounds_at_baseline = self._bounds_at_resolution_to_baseline(
bounds, resolution, units
)
_, size_at_requested = utils.transforms.bounds2locsize(bounds)
# don't use the `output_size` (`size_at_requested`) here
# because the rounding error at `bounds_at_baseline` leads to
# different `size_at_requested` (keeping same read resolution
# but base image is of different scale)
(
read_level,
_,
_,
post_read_scale,
) = self._find_read_bounds_params(
bounds_at_baseline, resolution=resolution, units=units
)
else: # duplicated portion with VirtualReader, factoring out ?
# Find parameters for optimal read
(
read_level,
_,
size_at_requested,
post_read_scale,
) = self._find_read_bounds_params(
bounds_at_baseline, resolution=resolution, units=units
)
im_region = utils.image.sub_pixel_read(
image=self.level_arrays[read_level],
bounds=bounds_at_baseline,
output_size=size_at_requested,
interpolation=interpolation,
pad_mode=pad_mode,
pad_constant_values=pad_constant_values,
read_kwargs=kwargs,
)
if coord_space == "resolution":
# do this to enforce output size is as defined by input bounds
im_region = utils.transforms.imresize(
img=im_region, output_size=size_at_requested
)
else:
im_region = utils.transforms.imresize(
img=im_region,
scale_factor=post_read_scale,
output_size=size_at_requested,
)
return im_region
class DICOMWSIReader(WSIReader):
"""Defines DICOM WSI Reader."""
wsidicom = None
def __init__(
self,
input_img: Union[str, pathlib.Path, np.ndarray],
mpp: Optional[Tuple[Number, Number]] = None,
power: Optional[Number] = None,
) -> None:
from wsidicom import WsiDicom
super().__init__(input_img, mpp, power)
self.wsi = WsiDicom.open(input_img)
def _info(self) -> WSIMeta:
"""WSI metadata constructor.
Returns:
WSIMeta:
Containing metadata.
"""
level_dimensions = [
(level.size.width, level.size.height) for level in self.wsi.levels
]
level_downsamples = [
np.mean(
[
level_dimensions[0][0] / level.size.width,
level_dimensions[0][1] / level.size.height,
]
)
for level in self.wsi.levels
]
dataset = self.wsi.levels.base_level.datasets[0]
# Get pixel spacing in mm from DICOM file and convert to um/px (mpp)
mm_per_pixel = dataset.pixel_spacing
mpp = (mm_per_pixel.width * 1e3, mm_per_pixel.height * 1e3)
return WSIMeta(
slide_dimensions=level_dimensions[0],
level_dimensions=level_dimensions,
level_downsamples=level_downsamples,
axes="YXS",
mpp=mpp,
level_count=len(level_dimensions),
vendor=dataset.Manufacturer,
)
def read_rect(
self,
location,
size,
resolution=0,
units="level",
interpolation="optimise",
pad_mode="constant",
pad_constant_values=0,
coord_space="baseline",
**kwargs,
):
"""Read a region of the whole slide image at a location and size.
Location is in terms of the baseline image (level 0 / maximum
resolution), and size is the output image size.
Reads can be performed at different resolutions by supplying a
pair of arguments for the resolution and the units of
resolution. If metadata does not specify `mpp` or
`objective_power` then `baseline` units should be selected with
resolution 1.0
The field of view varies with resolution. For a fixed field of
view see :func:`read_bounds`.
Args:
location (tuple(int)):
(x, y) tuple giving the top left pixel in the baseline
(level 0) reference frame.
size (tuple(int)):
(width, height) tuple giving the desired output image
size.
resolution (int or float or tuple(float)):
Resolution at which to read the image, default = 0.
Either a single number or a sequence of two numbers for
x and y are valid. This value is in terms of the
corresponding units. For example: resolution=0.5 and
units="mpp" will read the slide at 0.5 microns
per-pixel, and resolution=3, units="level" will read at
level at pyramid level / resolution layer 3.
units (str):
The units of resolution, default = "level". Supported
units are: microns per pixel (mpp), objective power
(power), pyramid / resolution level (level), pixels per
baseline pixel (baseline).
interpolation (str):
Method to use when resampling the output image. Possible
values are "linear", "cubic", "lanczos", "area", and
"optimise". Defaults to 'optimise' which will use cubic
interpolation for upscaling and area interpolation for
downscaling to avoid moiré patterns.
pad_mode (str):
Method to use when padding at the edges of the image.
Defaults to 'constant'. See :func:`numpy.pad` for
available modes.
pad_constant_values (int, tuple(int)):
Constant values to use when padding with constant pad mode.
Passed to the :func:`numpy.pad` `constant_values` argument.
Default is 0.
coord_space (str):
Defaults to "baseline". This is a flag to indicate if
the input `bounds` is in the baseline coordinate system
("baseline") or is in the requested resolution system
("resolution").
**kwargs (dict):
Extra key-word arguments for reader specific parameters.
Currently only used by VirtualWSIReader. See class
docstrings for more information.
Returns:
:class:`numpy.ndarray`:
Array of size MxNx3 M=size[0], N=size[1]
Example:
>>> from tiatoolbox.wsicore.wsireader import WSIReader
>>> # Load a WSI image
>>> wsi = WSIReader.open(input_img="./CMU-1.ndpi")
>>> location = (0, 0)
>>> size = (256, 256)
>>> # Read a region at level 0 (baseline / full resolution)
>>> img = wsi.read_rect(location, size)
>>> # Read a region at 0.5 microns per pixel (mpp)
>>> img = wsi.read_rect(location, size, 0.5, "mpp")
>>> # This could also be written more verbosely as follows
>>> img = wsi.read_rect(
... location,
... size,
... resolution=(0.5, 0.5),
... units="mpp",
... )
Note: The field of view varies with resolution when using
:func:`read_rect`.
.. figure:: ../images/read_rect_tissue.png
:width: 512
:alt: Diagram illustrating read_rect
As the location is in the baseline reference frame but the size
(width and height) is the output image size, the field of view
therefore changes as resolution changes.
If the WSI does not have a resolution layer corresponding
exactly to the requested resolution (shown above in white with a
dashed outline), a larger resolution is downscaled to achieve
the correct requested output resolution.
If the requested resolution is higher than the baseline (maximum
resultion of the image), then bicubic interpolation is applied
to the output image.
.. figure:: ../images/read_rect-interpolated-reads.png
:width: 512
:alt: Diagram illustrating read_rect interpolting between levels
When reading between the levels stored in the WSI, the
coordinates of the requested region are projected to the next
highest resolution. This resolution is then decoded and
downsampled to produce the desired output. This is a major
source of variability in the time take to perform a read
operation. Reads which require reading a large region before
downsampling will be significantly slower than reading at a
fixed level.
Examples:
>>> from tiatoolbox.wsicore.wsireader import WSIReader
>>> # Load a WSI image
>>> wsi = WSIReader.open(input_img="./CMU-1.ndpi")
>>> location = (0, 0)
>>> size = (256, 256)
>>> # The resolution can be different in x and y, e.g.
>>> img = wsi.read_rect(
... location,
... size,
... resolution=(0.5, 0.75),
... units="mpp",
... )
>>> # Several units can be used including: objective power,
>>> # microns per pixel, pyramid/resolution level, and
>>> # fraction of baseline.
>>> # E.g. Read a region at an objective power of 10x
>>> img = wsi.read_rect(
... location,
... size,
... resolution=10,
... units="power",
... )
>>> # Read a region at pyramid / resolution level 1
>>> img = wsi.read_rect(
... location,
... size,
... resolution=1,
... units="level",
... )
>>> # Read at a fractional level, this will linearly
>>> # interpolate the downsampling factor between levels.
>>> # E.g. if levels 0 and 1 have a downsampling of 1x and
>>> # 2x of baseline, then level 0.5 will correspond to a
>>> # downsampling factor 1.5x of baseline.
>>> img = wsi.read_rect(
... location,
... size,
... resolution=0.5,
... units="level",
... )
>>> # Read a region at half of the full / baseline
>>> # resolution.
>>> img = wsi.read_rect(
... location,
... size,
... resolution=0.5,
... units="baseline",
... )
>>> # Read at a higher resolution than the baseline
>>> # (interpolation applied to output)
>>> img = wsi.read_rect(
... location,
... size,
... resolution=1.25,
... units="baseline",
... )
>>> # Assuming the image has a native mpp of 0.5,
>>> # interpolation will be applied here.
>>> img = wsi.read_rect(
... location,
... size,
... resolution=0.25,
... units="mpp",
... )
"""
if coord_space == "resolution":
return self._read_rect_at_resolution(
location,
size,
resolution=resolution,
units=units,
interpolation=interpolation,
pad_mode=pad_mode,
pad_constant_values=pad_constant_values,
)
# Find parameters for optimal read
(
read_level,
level_location,
level_read_size,
post_read_scale,
_,
) = self.find_read_rect_params(
location=location,
size=size,
resolution=resolution,
units=units,
)
wsi = self.wsi
# Read at optimal level and corrected read size
level_size = self.info.level_dimensions[read_level]
constrained_read_bounds = utils.image.find_overlap(
read_location=level_location,
read_size=level_read_size,
image_size=level_size,
)
_, constrained_read_size = utils.transforms.bounds2locsize(
constrained_read_bounds
)
dicom_level = wsi.levels[read_level].level
im_region = wsi.read_region(location, dicom_level, constrained_read_size)
im_region = np.array(im_region)
# Apply padding outside the slide area
level_read_bounds = utils.transforms.locsize2bounds(
level_location, level_read_size
)
im_region = utils.image.crop_and_pad_edges(
bounds=level_read_bounds,
max_dimensions=level_size,
region=im_region,
pad_mode=pad_mode,
pad_constant_values=pad_constant_values,
)
# Resize to correct scale if required
im_region = utils.transforms.imresize(
img=im_region,
scale_factor=post_read_scale,
output_size=tuple(np.array(size).astype(int)),
interpolation=interpolation,
)
return utils.transforms.background_composite(image=im_region)
def read_bounds(
self,
bounds,
resolution=0,
units="level",
interpolation="optimise",
pad_mode="constant",
pad_constant_values=0,
coord_space="baseline",
**kwargs,
):
"""Read a region of the whole slide image within given bounds.
Bounds are in terms of the baseline image (level 0 / maximum
resolution).
Reads can be performed at different resolutions by supplying a
pair of arguments for the resolution and the units of
resolution. If metadata does not specify `mpp` or
`objective_power` then `baseline` units should be selected with
resolution 1.0
The output image size may be different to the width and height
of the bounds as the resolution will affect this. To read a
region with a fixed output image size see :func:`read_rect`.
Args:
bounds (tuple(int)):
By default, this is a tuple of (start_x, start_y, end_x,
end_y) i.e. (left, top, right, bottom) of the region in
baseline reference frame. However, with
`coord_space="resolution"`, the bound is expected to be
at the requested resolution system.
resolution (int or float or tuple(float)):
Resolution at which to read the image, default = 0.
Either a single number or a sequence of two numbers for
x and y are valid. This value is in terms of the
corresponding units. For example: resolution=0.5 and
units="mpp" will read the slide at 0.5 microns
per-pixel, and resolution=3, units="level" will read at
level at pyramid level / resolution layer 3.
units (str):
Units of resolution, default="level". Supported units
are: microns per pixel (mpp), objective power (power),
pyramid / resolution level (level), pixels per baseline
pixel (baseline).
interpolation (str):
Method to use when resampling the output image. Possible
values are "linear", "cubic", "lanczos", "area", and
"optimise". Defaults to 'optimise' which will use cubic
interpolation for upscaling and area interpolation for
downscaling to avoid moiré patterns.
pad_mode (str):
Method to use when padding at the edges of the image.
Defaults to 'constant'. See :func:`numpy.pad` for
available modes.
pad_constant_values (int, tuple(int)):
Constant values to use when padding with constant pad mode.
Passed to the :func:`numpy.pad` `constant_values` argument.
Default is 0.
coord_space (str):
Defaults to "baseline". This is a flag to indicate if
the input `bounds` is in the baseline coordinate system
("baseline") or is in the requested resolution system
("resolution").
**kwargs (dict):
Extra key-word arguments for reader specific parameters.
Currently only used by :obj:`VirtualWSIReader`. See
class docstrings for more information.
Returns:
:class:`numpy.ndarray`:
Array of size MxNx3 M=end_h-start_h, N=end_w-start_w
Examples:
>>> from tiatoolbox.wsicore.wsireader import WSIReader
>>> from matplotlib import pyplot as plt
>>> wsi = WSIReader.open(input_img="./CMU-1.ndpi")
>>> # Read a region at level 0 (baseline / full resolution)
>>> bounds = [1000, 2000, 2000, 3000]
>>> img = wsi.read_bounds(bounds)
>>> plt.imshow(img)
>>> # This could also be written more verbosely as follows
>>> img = wsi.read_bounds(
... bounds,
... resolution=0,
... units="level",
... )
>>> plt.imshow(img)
Note: The field of view remains the same as resolution is varied
when using :func:`read_bounds`.
.. figure:: ../images/read_bounds_tissue.png
:width: 512
:alt: Diagram illustrating read_bounds
This is because the bounds are in the baseline (level 0)
reference frame. Therefore, varying the resolution does not
change what is visible within the output image.
If the WSI does not have a resolution layer corresponding
exactly to the requested resolution (shown above in white with a
dashed outline), a larger resolution is downscaled to achieve
the correct requested output resolution.
If the requested resolution is higher than the baseline (maximum
resultion of the image), then bicubic interpolation is applied
to the output image.
"""
# convert from requested to `baseline`
bounds_at_baseline = bounds
if coord_space == "resolution":
bounds_at_baseline = self._bounds_at_resolution_to_baseline(
bounds, resolution, units
)
_, size_at_requested = utils.transforms.bounds2locsize(bounds)
# don't use the `output_size` (`size_at_requested`) here
# because the rounding error at `bounds_at_baseline` leads to
# different `size_at_requested` (keeping same read resolution
# but base image is of different scale)
(
read_level,
bounds_at_read_level,
_,
post_read_scale,
) = self._find_read_bounds_params(
bounds_at_baseline, resolution=resolution, units=units
)
else: # duplicated portion with VirtualReader, factoring out ?
# Find parameters for optimal read
(
read_level,
bounds_at_read_level,
size_at_requested,
post_read_scale,
) = self._find_read_bounds_params(
bounds_at_baseline, resolution=resolution, units=units
)
wsi = self.wsi
# Read at optimal level and corrected read size
location_at_baseline = bounds_at_baseline[:2]
level_location, size_at_read_level = utils.transforms.bounds2locsize(
bounds_at_read_level
)
level_size = self.info.level_dimensions[read_level]
read_bounds = utils.image.find_overlap(
level_location, size_at_read_level, level_size
)
_, read_size = utils.transforms.bounds2locsize(read_bounds)
dicom_level = wsi.levels[read_level].level
im_region = wsi.read_region(
location=location_at_baseline, level=dicom_level, size=read_size
)
im_region = np.array(im_region)
# Apply padding outside the slide area
im_region = utils.image.crop_and_pad_edges(
bounds=bounds_at_read_level,
max_dimensions=self.info.level_dimensions[read_level],
region=im_region,
pad_mode=pad_mode,
pad_constant_values=pad_constant_values,
)
# Resize to correct scale if required
if coord_space == "resolution":
im_region = utils.transforms.imresize(
img=im_region,
output_size=size_at_requested,
interpolation=interpolation,
)
else:
im_region = utils.transforms.imresize(
img=im_region,
scale_factor=post_read_scale,
output_size=size_at_requested,
interpolation=interpolation,
)
return utils.transforms.background_composite(image=im_region)
class NGFFWSIReader(WSIReader):
"""Reader for NGFF WSI zarr(s).
Support is currently experimental. This supports reading from
NGFF version 0.4.
"""
def __init__(self, path, **kwargs):
super().__init__(path, **kwargs)
from imagecodecs import numcodecs
from tiatoolbox.wsicore.metadata import ngff
numcodecs.register_codecs()
self._zarr_group: zarr.hierarchy.Group = zarr.open(path, mode="r")
attrs = self._zarr_group.attrs
multiscales = attrs["multiscales"][0]
axes = multiscales["axes"]
datasets = multiscales["datasets"]
omero = attrs["omero"]
self.zattrs = ngff.Zattrs(
_creator=ngff.Creator(
name=attrs.get("name"),
version=attrs.get("version"),
),
multiscales=ngff.Multiscales(
version=multiscales.get("version"),
axes=[ngff.Axis(**axis) for axis in axes],
datasets=[
ngff.Dataset(
path=dataset["path"],
coordinateTransformations=dataset.get(
"coordinateTransformations",
),
)
for dataset in datasets
],
),
omero=ngff.Omero(
name=omero.get("name"),
id=omero.get("id"),
channels=[ngff.Channel(**channel) for channel in omero["channels"]],
rdefs=ngff.RDefs(**omero["rdefs"]),
version=omero.get("version"),
),
_ARRAY_DIMENSIONS=attrs["_ARRAY_DIMENSIONS"],
)
self.level_arrays = {
int(key): ArrayView(array, axes=self.info.axes)
for key, array in self._zarr_group.arrays()
}
def _info(self):
"""WSI metadata constructor.
Returns:
WSIMeta:
Containing metadata.
"""
multiscales = self.zattrs.multiscales
return WSIMeta(
axes="".join(axis.name.upper() for axis in multiscales.axes),
level_dimensions=[
array.shape[:2][::-1]
for _, array in sorted(self._zarr_group.arrays(), key=lambda x: x[0])
],
slide_dimensions=self._zarr_group[0].shape[:2][::-1],
vendor=self.zattrs._creator.name, # skipcq
raw=self._zarr_group.attrs,
mpp=self._get_mpp(),
)
def _get_mpp(self) -> Optional[Tuple[float, float]]:
"""Get the microns-per-pixel (MPP) of the slide.
Returns:
Tuple[float, float]:
The mpp of the slide an x,y tuple. None if not available.
"""
# Check that the required axes are present
multiscales = self.zattrs.multiscales
axes_dict = {a.name.lower(): a for a in multiscales.axes}
if "x" not in axes_dict or "y" not in axes_dict:
return None
x = axes_dict["x"]
y = axes_dict["y"]
# Check the units,
# Currently only handle micrometer units
if x.unit != y.unit != "micrometer":
logger.warning(
"Expected units of micrometer, got %s and %s", x.unit, y.unit
)
return None
# Check that datasets is non-empty and has at least one coordinateTransformation
if (
not multiscales.datasets
or not multiscales.datasets[0].coordinateTransformations
):
return None
# Currently simply using the first scale transform
transforms = multiscales.datasets[0].coordinateTransformations
for t in transforms:
if "scale" in t and t.get("type") == "scale":
x_index = multiscales.axes.index(x)
y_index = multiscales.axes.index(y)
return (t["scale"][x_index], t["scale"][y_index])
return None
def read_rect(
self,
location,
size,
resolution=0,
units="level",
interpolation="optimise",
pad_mode="constant",
pad_constant_values=0,
coord_space="baseline",
**kwargs,
):
"""Read a region of the whole slide image at a location and size.
Location is in terms of the baseline image (level 0 / maximum
resolution), and size is the output image size.
Reads can be performed at different resolutions by supplying a
pair of arguments for the resolution and the units of
resolution. If metadata does not specify `mpp` or
`objective_power` then `baseline` units should be selected with
resolution 1.0
The field of view varies with resolution. For a fixed field of
view see :func:`read_bounds`.
Args:
location (tuple(int)):
(x, y) tuple giving the top left pixel in the baseline
(level 0) reference frame.
size (tuple(int)):
(width, height) tuple giving the desired output image
size.
resolution (int or float or tuple(float)):
Resolution at which to read the image, default = 0.
Either a single number or a sequence of two numbers for
x and y are valid. This value is in terms of the
corresponding units. For example: resolution=0.5 and
units="mpp" will read the slide at 0.5 microns
per-pixel, and resolution=3, units="level" will read at
level at pyramid level / resolution layer 3.
units (str):
The units of resolution, default = "level". Supported
units are: microns per pixel (mpp), objective power
(power), pyramid / resolution level (level), pixels per
baseline pixel (baseline).
interpolation (str):
Method to use when resampling the output image. Possible
values are "linear", "cubic", "lanczos", "area", and
"optimise". Defaults to 'optimise' which will use cubic
interpolation for upscaling and area interpolation for
downscaling to avoid moiré patterns.
pad_mode (str):
Method to use when padding at the edges of the image.
Defaults to 'constant'. See :func:`numpy.pad` for
available modes.
pad_constant_values (int, tuple(int)):
Constant values to use when padding with constant pad mode.
Passed to the :func:`numpy.pad` `constant_values` argument.
Default is 0.
coord_space (str):
Defaults to "baseline". This is a flag to indicate if
the input `bounds` is in the baseline coordinate system
("baseline") or is in the requested resolution system
("resolution").
**kwargs (dict):
Extra key-word arguments for reader specific parameters.
Currently only used by VirtualWSIReader. See class
docstrings for more information.
Returns:
:class:`numpy.ndarray`:
Array of size MxNx3 M=size[0], N=size[1]
Example:
>>> from tiatoolbox.wsicore.wsireader import WSIReader
>>> # Load a WSI image
>>> wsi = WSIReader.open(input_img="./CMU-1.ome.zarr")
>>> location = (0, 0)
>>> size = (256, 256)
>>> # Read a region at level 0 (baseline / full resolution)
>>> img = wsi.read_rect(location, size)
>>> # Read a region at 0.5 microns per pixel (mpp)
>>> img = wsi.read_rect(location, size, 0.5, "mpp")
>>> # This could also be written more verbosely as follows
>>> img = wsi.read_rect(
... location,
... size,
... resolution=(0.5, 0.5),
... units="mpp",
... )
Note: The field of view varies with resolution when using
:func:`read_rect`.
.. figure:: ../images/read_rect_tissue.png
:width: 512
:alt: Diagram illustrating read_rect
As the location is in the baseline reference frame but the size
(width and height) is the output image size, the field of view
therefore changes as resolution changes.
If the WSI does not have a resolution layer corresponding
exactly to the requested resolution (shown above in white with a
dashed outline), a larger resolution is downscaled to achieve
the correct requested output resolution.
If the requested resolution is higher than the baseline (maximum
resultion of the image), then bicubic interpolation is applied
to the output image.
.. figure:: ../images/read_rect-interpolated-reads.png
:width: 512
:alt: Diagram illustrating read_rect interpolting between levels
When reading between the levels stored in the WSI, the
coordinates of the requested region are projected to the next
highest resolution. This resolution is then decoded and
downsampled to produce the desired output. This is a major
source of variability in the time take to perform a read
operation. Reads which require reading a large region before
downsampling will be significantly slower than reading at a
fixed level.
Examples:
>>> from tiatoolbox.wsicore.wsireader import WSIReader
>>> # Load a WSI image
>>> wsi = WSIReader.open(input_img="./CMU-1.ome.zarr")
>>> location = (0, 0)
>>> size = (256, 256)
>>> # The resolution can be different in x and y, e.g.
>>> img = wsi.read_rect(
... location,
... size,
... resolution=(0.5, 0.75),
... units="mpp",
... )
>>> # Several units can be used including: objective power,
>>> # microns per pixel, pyramid/resolution level, and
>>> # fraction of baseline.
>>> # E.g. Read a region at an objective power of 10x
>>> img = wsi.read_rect(
... location,
... size,
... resolution=10,
... units="power",
... )
>>> # Read a region at pyramid / resolution level 1
>>> img = wsi.read_rect(
... location,
... size,
... resolution=1,
... units="level",
... )
>>> # Read at a fractional level, this will linearly
>>> # interpolate the downsampling factor between levels.
>>> # E.g. if levels 0 and 1 have a downsampling of 1x and
>>> # 2x of baseline, then level 0.5 will correspond to a
>>> # downsampling factor 1.5x of baseline.
>>> img = wsi.read_rect(
... location,
... size,
... resolution=0.5,
... units="level",
... )
>>> # Read a region at half of the full / baseline
>>> # resolution.
>>> img = wsi.read_rect(
... location,
... size,
... resolution=0.5,
... units="baseline",
... )
>>> # Read at a higher resolution than the baseline
>>> # (interpolation applied to output)
>>> img = wsi.read_rect(
... location,
... size,
... resolution=1.25,
... units="baseline",
... )
>>> # Assuming the image has a native mpp of 0.5,
>>> # interpolation will be applied here.
>>> img = wsi.read_rect(
... location,
... size,
... resolution=0.25,
... units="mpp",
... )
"""
if coord_space == "resolution":
im_region = self._read_rect_at_resolution(
location,
size,
resolution=resolution,
units=units,
interpolation=interpolation,
pad_mode=pad_mode,
pad_constant_values=pad_constant_values,
)
return utils.transforms.background_composite(image=im_region)
# Find parameters for optimal read
(
read_level,
_,
_,
post_read_scale,
baseline_read_size,
) = self.find_read_rect_params(
location=location,
size=size,
resolution=resolution,
units=units,
)
bounds = utils.transforms.locsize2bounds(
location=location, size=baseline_read_size
)
im_region = utils.image.safe_padded_read(
image=self.level_arrays[read_level],
bounds=bounds,
pad_mode=pad_mode,
pad_constant_values=pad_constant_values,
)
im_region = utils.transforms.imresize(
img=im_region,
scale_factor=post_read_scale,
output_size=size,
interpolation=interpolation,
)
return utils.transforms.background_composite(image=im_region)
def read_bounds(
self,
bounds,
resolution=0,
units="level",
interpolation="optimise",
pad_mode="constant",
pad_constant_values=0,
coord_space="baseline",
**kwargs,
):
"""Read a region of the whole slide image within given bounds.
Bounds are in terms of the baseline image (level 0 / maximum
resolution).
Reads can be performed at different resolutions by supplying a
pair of arguments for the resolution and the units of
resolution. If metadata does not specify `mpp` or
`objective_power` then `baseline` units should be selected with
resolution 1.0
The output image size may be different to the width and height
of the bounds as the resolution will affect this. To read a
region with a fixed output image size see :func:`read_rect`.
Args:
bounds (tuple(int)):
By default, this is a tuple of (start_x, start_y, end_x,
end_y) i.e. (left, top, right, bottom) of the region in
baseline reference frame. However, with
`coord_space="resolution"`, the bound is expected to be
at the requested resolution system.
resolution (int or float or tuple(float)):
Resolution at which to read the image, default = 0.
Either a single number or a sequence of two numbers for
x and y are valid. This value is in terms of the
corresponding units. For example: resolution=0.5 and
units="mpp" will read the slide at 0.5 microns
per-pixel, and resolution=3, units="level" will read at
level at pyramid level / resolution layer 3.
units (str):
Units of resolution, default="level". Supported units
are: microns per pixel (mpp), objective power (power),
pyramid / resolution level (level), pixels per baseline
pixel (baseline).
interpolation (str):
Method to use when resampling the output image. Possible
values are "linear", "cubic", "lanczos", "area", and
"optimise". Defaults to 'optimise' which will use cubic
interpolation for upscaling and area interpolation for
downscaling to avoid moiré patterns.
pad_mode (str):
Method to use when padding at the edges of the image.
Defaults to 'constant'. See :func:`numpy.pad` for
available modes.
pad_constant_values (int, tuple(int)):
Constant values to use when padding with constant pad mode.
Passed to the :func:`numpy.pad` `constant_values` argument.
Default is 0.
coord_space (str):
Defaults to "baseline". This is a flag to indicate if
the input `bounds` is in the baseline coordinate system
("baseline") or is in the requested resolution system
("resolution").
**kwargs (dict):
Extra key-word arguments for reader specific parameters.
Currently only used by :obj:`VirtualWSIReader`. See
class docstrings for more information.
Returns:
:class:`numpy.ndarray`:
Array of size MxNx3 M=end_h-start_h, N=end_w-start_w
Examples:
>>> from tiatoolbox.wsicore.wsireader import WSIReader
>>> from matplotlib import pyplot as plt
>>> wsi = WSIReader.open(input_img="./CMU-1.ome.zarr")
>>> # Read a region at level 0 (baseline / full resolution)
>>> bounds = [1000, 2000, 2000, 3000]
>>> img = wsi.read_bounds(bounds)
>>> plt.imshow(img)
>>> # This could also be written more verbosely as follows
>>> img = wsi.read_bounds(
... bounds,
... resolution=0,
... units="level",
... )
>>> plt.imshow(img)
Note: The field of view remains the same as resolution is varied
when using :func:`read_bounds`.
.. figure:: ../images/read_bounds_tissue.png
:width: 512
:alt: Diagram illustrating read_bounds
This is because the bounds are in the baseline (level 0)
reference frame. Therefore, varying the resolution does not
change what is visible within the output image.
If the WSI does not have a resolution layer corresponding
exactly to the requested resolution (shown above in white with a
dashed outline), a larger resolution is downscaled to achieve
the correct requested output resolution.
If the requested resolution is higher than the baseline (maximum
resultion of the image), then bicubic interpolation is applied
to the output image.
"""
bounds_at_baseline = bounds
if coord_space == "resolution":
bounds_at_baseline = self._bounds_at_resolution_to_baseline(
bounds, resolution, units
)
_, size_at_requested = utils.transforms.bounds2locsize(bounds)
# don't use the `output_size` (`size_at_requested`) here
# because the rounding error at `bounds_at_baseline` leads to
# different `size_at_requested` (keeping same read resolution
# but base image is of different scale)
(
read_level,
_,
_,
post_read_scale,
) = self._find_read_bounds_params(
bounds_at_baseline, resolution=resolution, units=units
)
else: # duplicated portion with VirtualReader, factoring out ?
# Find parameters for optimal read
(
read_level,
_,
size_at_requested,
post_read_scale,
) = self._find_read_bounds_params(
bounds_at_baseline, resolution=resolution, units=units
)
im_region = utils.image.sub_pixel_read(
image=self.level_arrays[read_level],
bounds=bounds_at_baseline,
output_size=size_at_requested,
interpolation=interpolation,
pad_mode=pad_mode,
pad_constant_values=pad_constant_values,
read_kwargs=kwargs,
)
if coord_space == "resolution":
# do this to enforce output size is as defined by input bounds
im_region = utils.transforms.imresize(
img=im_region, output_size=size_at_requested
)
else:
im_region = utils.transforms.imresize(
img=im_region,
scale_factor=post_read_scale,
output_size=size_at_requested,
)
return im_region
class AnnotationStoreReader(WSIReader):
"""Reader for Annotation stores.
This reader is used to read annotation store data as if it were a WSI,
rendering the annotations in the specified region to be read. Can be used
either to render annotations as a stand-alone mask, or to render annotations
on top of its parent WSI as a virtual 'annotated slide'.
Note: Currently only supports annotations stored at the same resolution as
the parent WSI base resolution. Support for annotations stored at arbitrary
resolutions will be added in the future.
Args:
store (AnnotationStore | str | Path):
An AnnotationStore or a path to an annotation store .db file.
info (WSIMeta):
Metadata of the base WSI for the annotations in the store.
If this is not provided, will attempt to read it read from
the store metadata, or the base_wsi if provided.
If no source of metadata is found, will raise an error.
renderer (AnnotationRenderer):
Renderer to use for rendering annotations. Providing a renderer
allows for customisation of the rendering process. If not provided,
a sensible default will be created.
base_wsi (WSIReader | str):
Base WSI reader or path to use for reading the base WSI. Annotations
will be rendered on top of the base WSI. If not provided,
will render annotation masks without a base image.
alpha (float):
Opacity of the overlaid annotations. Must be between 0 and 1.
Has no effect if base_wsi is not provided.
"""
def __init__(
self,
store: Union[AnnotationStore, str, pathlib.Path],
info: Optional[WSIMeta] = None,
renderer: AnnotationRenderer = None,
base_wsi: Union[WSIReader, str] = None,
alpha=1.0,
**kwargs,
):
super().__init__(store, **kwargs)
self.store = (
SQLiteStore(pathlib.Path(store))
if isinstance(store, (str, pathlib.Path))
else store
)
self.base_wsi = base_wsi
if isinstance(base_wsi, (str, pathlib.Path)):
self.base_wsi = WSIReader.open(base_wsi)
if info is None:
# try to get metadata from store
try:
info = WSIMeta(**json.loads(self.store.metadata["wsi_meta"]))
except KeyError:
if self.base_wsi is not None:
# get the metadata from the base reader.
# assumes annotations saved at WSI baseline res
info = self.base_wsi.info
else:
# we cant find any metadata
raise ValueError(
"""No metadata found in store. Please provide either
info or base slide."""
)
self.info = info
if renderer is None:
types = self.store.pquery("props['type']")
if len(types) == 0:
renderer = AnnotationRenderer(max_scale=1000)
else:
renderer = AnnotationRenderer("type", list(types), max_scale=1000)
renderer.edge_thickness = 0
self.renderer = renderer
if self.base_wsi is not None:
self.on_slide = True
self.alpha = alpha
def _info(self):
"""Get the metadata of the slide."""
return self.info
def read_rect(
self,
location,
size,
resolution=0,
units="level",
interpolation="optimise",
pad_mode="constant",
pad_constant_values=0,
coord_space="baseline",
**kwargs,
):
"""Read a region of the annotation mask, or annotated whole slide
image at a location and size.
Location is in terms of the baseline image (level 0 / maximum
resolution), and size is the output image size.
Reads can be performed at different resolutions by supplying a
pair of arguments for the resolution and the units of
resolution. If metadata does not specify `mpp` or
`objective_power` then `baseline` units should be selected with
resolution 1.0
The field of view varies with resolution. For a fixed field of
view see :func:`read_bounds`.
Args:
location (tuple(int)):
(x, y) tuple giving the top left pixel in the baseline
(level 0) reference frame.
size (tuple(int)):
(width, height) tuple giving the desired output image
size.
resolution (int or float or tuple(float)):
Resolution at which to read the image, default = 0.
Either a single number or a sequence of two numbers for
x and y are valid. This value is in terms of the
corresponding units. For example: resolution=0.5 and
units="mpp" will read the slide at 0.5 microns
per-pixel, and resolution=3, units="level" will read at
level at pyramid level / resolution layer 3.
units (str):
The units of resolution, default = "level". Supported
units are: microns per pixel (mpp), objective power
(power), pyramid / resolution level (level), pixels per
baseline pixel (baseline).
interpolation (str):
Method to use when resampling the output image. Possible
values are "linear", "cubic", "lanczos", "area", and
"optimise". Defaults to 'optimise' which will use cubic
interpolation for upscaling and area interpolation for
downscaling to avoid moiré patterns.
pad_mode (str):
Method to use when padding at the edges of the image.
Defaults to 'constant'. See :func:`numpy.pad` for
available modes.
pad_constant_values (int, tuple(int)):
Constant values to use when padding with constant pad mode.
Passed to the :func:`numpy.pad` `constant_values` argument.
Default is 0.
coord_space (str):
Defaults to "baseline". This is a flag to indicate if
the input `bounds` is in the baseline coordinate system
("baseline") or is in the requested resolution system
("resolution").
**kwargs (dict):
Extra key-word arguments for reader specific parameters.
Currently only used by VirtualWSIReader. See class
docstrings for more information.
Returns:
:class:`numpy.ndarray`:
Array of size MxNx3 M=size[0], N=size[1]
Example:
>>> from tiatoolbox.wsicore.wsireader import WSIReader
>>> # Load an annotation store and associated wsi to be
>>> # overlaid upon.
>>> annotated_wsi = WSIReader.open(input_img="./CMU-1.db",
>>> base_wsi="./CMU-1.ndpi")
>>> location = (0, 0)
>>> size = (256, 256)
>>> # Read a region at level 0 (baseline / full resolution)
>>> img = annotated_wsi.read_rect(location, size)
>>> # Read a region at 0.5 microns per pixel (mpp)
>>> img = annotated_wsi.read_rect(location, size, 0.5, "mpp")
>>> # This could also be written more verbosely as follows
>>> img = annotated_wsi.read_rect(
... location,
... size,
... resolution=(0.5, 0.5),
... units="mpp",
... )
Note: The field of view varies with resolution when using
:func:`read_rect`.
.. figure:: ../images/read_rect_tissue.png
:width: 512
:alt: Diagram illustrating read_rect
As the location is in the baseline reference frame but the size
(width and height) is the output image size, the field of view
therefore changes as resolution changes.
If the WSI does not have a resolution layer corresponding
exactly to the requested resolution (shown above in white with a
dashed outline), a larger resolution is downscaled to achieve
the correct requested output resolution.
If the requested resolution is higher than the baseline (maximum
resultion of the image), then bicubic interpolation is applied
to the output image.
.. figure:: ../images/read_rect-interpolated-reads.png
:width: 512
:alt: Diagram illustrating read_rect interpolting between levels
When reading between the levels stored in the WSI, the
coordinates of the requested region are projected to the next
highest resolution. This resolution is then decoded and
downsampled to produce the desired output. This is a major
source of variability in the time take to perform a read
operation. Reads which require reading a large region before
downsampling will be significantly slower than reading at a
fixed level.
Examples:
>>> from tiatoolbox.wsicore.wsireader import WSIReader
>>> # Load an annotation store and associated wsi to be
>>> # overlaid upon.
>>> annotated_wsi = WSIReader.open(input_img="./CMU-1.db",
>>> base_wsi="./CMU-1.ndpi")
>>> location = (0, 0)
>>> size = (256, 256)
>>> # The resolution can be different in x and y, e.g.
>>> img = annotated_wsi.read_rect(
... location,
... size,
... resolution=(0.5, 0.75),
... units="mpp",
... )
>>> # Several units can be used including: objective power,
>>> # microns per pixel, pyramid/resolution level, and
>>> # fraction of baseline.
>>> # E.g. Read a region at an objective power of 10x
>>> img = annotated_wsi.read_rect(
... location,
... size,
... resolution=10,
... units="power",
... )
>>> # Read a region at pyramid / resolution level 1
>>> img = annotated_wsi.read_rect(
... location,
... size,
... resolution=1,
... units="level",
... )
>>> # Read at a fractional level, this will linearly
>>> # interpolate the downsampling factor between levels.
>>> # E.g. if levels 0 and 1 have a downsampling of 1x and
>>> # 2x of baseline, then level 0.5 will correspond to a
>>> # downsampling factor 1.5x of baseline.
>>> img = annotated_wsi.read_rect(
... location,
... size,
... resolution=0.5,
... units="level",
... )
>>> # Read a region at half of the full / baseline
>>> # resolution.
>>> img = annotated_wsi.read_rect(
... location,
... size,
... resolution=0.5,
... units="baseline",
... )
>>> # Read at a higher resolution than the baseline
>>> # (interpolation applied to output)
>>> img = annotated_wsi.read_rect(
... location,
... size,
... resolution=1.25,
... units="baseline",
... )
>>> # Assuming the image has a native mpp of 0.5,
>>> # interpolation will be applied here.
>>> img = annotated_wsi.read_rect(
... location,
... size,
... resolution=0.25,
... units="mpp",
... )
Annotations can also be displayed as a stand-alone mask not
overlaid on the WSI. In this case, the metadata of the store
must contain the resolution at which the annotations were saved
at, and the slide dimensions at that resolution.
Alternatively, an instance of WSIMeta can be provided describing the
slide the annotations are associated with (in which case annotations
are assumed to be saved at the baseline resolution given in the metadata).
Example:
>>> from tiatoolbox.wsicore.wsireader import WSIReader
>>> # get metadata from the slide (could also manually create a
>>> # WSIMeta object if you know the slide info but do not have the
>>> # slide itself)
>>> metadata = WSIReader.open("CMU-1.ndpi").info
>>> # Load associated annotations
>>> annotation_mask = WSIReader.open(input_img="./CMU-1.db", info=wsi_meta)
>>> location = (0, 0)
>>> size = (256, 256)
>>> # Read a region of the mask at level 0 (baseline / full resolution)
>>> img = annotation_mask.read_rect(location, size)
>>> # Read a region of the mask at 0.5 microns per pixel (mpp)
>>> img = annotation_mask.read_rect(location, size, 0.5, "mpp")
"""
if coord_space == "resolution":
return self._read_rect_at_resolution(
location,
size,
resolution=resolution,
units=units,
interpolation=interpolation,
pad_mode=pad_mode,
pad_constant_values=pad_constant_values,
)
# Find parameters for optimal read
(
read_level,
_,
_,
post_read_scale,
baseline_read_size,
) = self.find_read_rect_params(
location=location,
size=size,
resolution=resolution,
units=units,
)
bounds = utils.transforms.locsize2bounds(
location=location, size=baseline_read_size
)
im_region = self.renderer.render_annotations(
self.store, bounds, self.info.level_downsamples[read_level]
)
im_region = utils.transforms.imresize(
img=im_region,
scale_factor=post_read_scale,
output_size=size,
interpolation=interpolation,
)
if self.base_wsi is not None:
# overlay image region on the base wsi
base_region = self.base_wsi.read_rect(
location,
size,
resolution=resolution,
units=units,
interpolation=interpolation,
pad_mode=pad_mode,
pad_constant_values=pad_constant_values,
coord_space=coord_space,
**kwargs,
)
base_region = Image.fromarray(
utils.transforms.background_composite(base_region, alpha=True)
)
im_region = Image.fromarray(im_region)
if self.alpha < 1.0:
im_region.putalpha(
im_region.getchannel("A").point(lambda i: i * self.alpha)
)
base_region = Image.alpha_composite(base_region, im_region)
base_region = base_region.convert("RGB")
return np.array(base_region)
return utils.transforms.background_composite(im_region)
def read_bounds(
self,
bounds,
resolution=0,
units="level",
interpolation="optimise",
pad_mode="constant",
pad_constant_values=0,
coord_space="baseline",
**kwargs,
):
"""Read a region of the annotation mask, or annotated whole slide
image within given bounds.
Bounds are in terms of the baseline image (level 0 / maximum
resolution).
Reads can be performed at different resolutions by supplying a
pair of arguments for the resolution and the units of
resolution. If metadata does not specify `mpp` or
`objective_power` then `baseline` units should be selected with
resolution 1.0
The output image size may be different to the width and height
of the bounds as the resolution will affect this. To read a
region with a fixed output image size see :func:`read_rect`.
Args:
bounds (tuple(int)):
By default, this is a tuple of (start_x, start_y, end_x,
end_y) i.e. (left, top, right, bottom) of the region in
baseline reference frame. However, with
`coord_space="resolution"`, the bound is expected to be
at the requested resolution system.
resolution (int or float or tuple(float)):
Resolution at which to read the image, default = 0.
Either a single number or a sequence of two numbers for
x and y are valid. This value is in terms of the
corresponding units. For example: resolution=0.5 and
units="mpp" will read the slide at 0.5 microns
per-pixel, and resolution=3, units="level" will read at
level at pyramid level / resolution layer 3.
units (str):
Units of resolution, default="level". Supported units
are: microns per pixel (mpp), objective power (power),
pyramid / resolution level (level), pixels per baseline
pixel (baseline).
interpolation (str):
Method to use when resampling the output image. Possible
values are "linear", "cubic", "lanczos", "area", and
"optimise". Defaults to 'optimise' which will use cubic
interpolation for upscaling and area interpolation for
downscaling to avoid moiré patterns.
pad_mode (str):
Method to use when padding at the edges of the image.
Defaults to 'constant'. See :func:`numpy.pad` for
available modes.
pad_constant_values (int, tuple(int)):
Constant values to use when padding with constant pad mode.
Passed to the :func:`numpy.pad` `constant_values` argument.
Default is 0.
coord_space (str):
Defaults to "baseline". This is a flag to indicate if
the input `bounds` is in the baseline coordinate system
("baseline") or is in the requested resolution system
("resolution").
**kwargs (dict):
Extra key-word arguments for reader specific parameters.
Currently only used by :obj:`VirtualWSIReader`. See
class docstrings for more information.
Returns:
:class:`numpy.ndarray`:
Array of size MxNx3 M=end_h-start_h, N=end_w-start_w
Examples:
>>> from tiatoolbox.wsicore.wsireader import WSIReader
>>> from matplotlib import pyplot as plt
>>> annotated_wsi = WSIReader.open(input_img="./CMU-1.db",
>>> base_wsi="./CMU-1.ndpi")
>>> # Read a region at level 0 (baseline / full resolution)
>>> bounds = [1000, 2000, 2000, 3000]
>>> img = annotated_wsi.read_bounds(bounds)
>>> plt.imshow(img)
>>> # This could also be written more verbosely as follows
>>> img = wsi.read_bounds(
... bounds,
... resolution=0,
... units="level",
... )
>>> plt.imshow(img)
Note: The field of view remains the same as resolution is varied
when using :func:`read_bounds`.
.. figure:: ../images/read_bounds_tissue.png
:width: 512
:alt: Diagram illustrating read_bounds
This is because the bounds are in the baseline (level 0)
reference frame. Therefore, varying the resolution does not
change what is visible within the output image.
If the WSI does not have a resolution layer corresponding
exactly to the requested resolution (shown above in white with a
dashed outline), a larger resolution is downscaled to achieve
the correct requested output resolution.
If the requested resolution is higher than the baseline (maximum
resultion of the image), then bicubic interpolation is applied
to the output image.
"""
bounds_at_baseline = bounds
if coord_space == "resolution":
bounds_at_baseline = self._bounds_at_resolution_to_baseline(
bounds, resolution, units
)
_, size_at_requested = utils.transforms.bounds2locsize(bounds)
# don't use the `output_size` (`size_at_requested`) here
# because the rounding error at `bounds_at_baseline` leads to
# different `size_at_requested` (keeping same read resolution
# but base image is of different scale)
(
read_level,
_,
_,
post_read_scale,
) = self._find_read_bounds_params(
bounds_at_baseline, resolution=resolution, units=units
)
else: # duplicated portion with VirtualReader, factoring out ?
# Find parameters for optimal read
(
read_level,
_,
size_at_requested,
post_read_scale,
) = self._find_read_bounds_params(
bounds_at_baseline, resolution=resolution, units=units
)
im_region = self.renderer.render_annotations(
self.store,
bounds_at_baseline,
self.info.level_downsamples[read_level],
)
if coord_space == "resolution":
# do this to enforce output size is as defined by input bounds
im_region = utils.transforms.imresize(
img=im_region, output_size=size_at_requested
)
else:
im_region = utils.transforms.imresize(
img=im_region,
scale_factor=post_read_scale,
output_size=size_at_requested,
)
if self.base_wsi is not None:
# overlay image region on the base wsi
base_region = self.base_wsi.read_bounds(
bounds,
resolution=resolution,
units=units,
interpolation=interpolation,
pad_mode=pad_mode,
pad_constant_values=pad_constant_values,
coord_space=coord_space,
**kwargs,
)
base_region = Image.fromarray(
utils.transforms.background_composite(base_region, alpha=True)
)
im_region = Image.fromarray(im_region)
if self.alpha < 1.0:
im_region.putalpha(
im_region.getchannel("A").point(lambda i: i * self.alpha)
)
base_region = Image.alpha_composite(base_region, im_region)
base_region = base_region.convert("RGB")
return np.array(base_region)
return utils.transforms.background_composite(im_region)
| 213,212 | 38.244064 | 88 | py |
tiatoolbox | tiatoolbox-master/tiatoolbox/wsicore/wsimeta.py | """This module defines a dataclass which holds metadata about a WSI.
With this class, metadata is in a normalized consistent format
which is quite useful when working with many different WSI formats.
The raw metadata is also preserved and accessible via a dictionary. The
format of this dictionary may vary between WSI formats.
"""
from numbers import Number
from pathlib import Path
from typing import List, Mapping, Optional, Sequence, Tuple, Union
import numpy as np
from tiatoolbox import logger
Resolution = Union[Number, Tuple[Number, Number], np.ndarray]
class WSIMeta:
"""Whole slide image metadata class.
Args:
slide_dimensions (int, int):
Tuple containing the width and height of the WSI. These
are for the baseline (full resolution) image if the WSI
is a pyramid or multi-resolution.
level_dimensions (list):
A list of dimensions for each level of the pyramid or
for each resolution in the WSI.
objective_power (float, optional):
The power of the objective lens used to create the
image.
level_count: (int, optional):
The number of levels or resolutions in the WSI. If not
given this is assigned len(level_dimensions). Defaults
to None.
level_downsamples (:obj:`list` of :obj:`float`):
List of scale values which describe how many times
smaller the current level is compared with the baseline.
vendor (str, optional):
Scanner vendor/manufacturer description.
mpp (float, float, optional):
Microns per pixel.
file_path (Path, optional):
Path to the corresponding WSI file.
raw (dict, optional):
Dictionary of unprocessed metadata extracted from the
WSI format. For JPEG-2000 images this contains an xml
object under the key "xml".
Attributes:
slide_dimensions (tuple(int)):
Tuple containing the width and height of the WSI. These are
for the baseline (full resolution) image if the WSI is a
pyramid or multi-resolution. Required.
axes (str):
Axes ordering of the image. This is most relevant for
OME-TIFF images where the axes ordering can vary. For most
images this with be "YXS" i.e. the image is store in the
axis order of Y coordinates first, then X coordinates, and
colour channels last.
level_dimensions (list):
A list of dimensions for each level of the pyramid or for
each resolution in the WSI. Defaults to [slide_dimension].
objective_power (float):
The magnification power of the objective lens used to scan
the image. Not always present or accurate. Defaults to None.
level_count: (int):
The number of levels or resolutions in the WSI. If not given
this is assigned len(level_dimensions). Defaults to
len(level_dimensions).
level_downsamples (:obj:`list` of :obj:`float`):
List of scale values which describe how many times smaller
the current level is compared with the baseline. Defaults to
(1,).
vendor (str):
Scanner vendor/manufacturer description.
mpp (float, float, optional):
Microns per pixel. Derived from objective power and sensor
size. Not always present or accurate. Defaults to None.
file_path (Path):
Path to the corresponding WSI file. Defaults to None.
raw (dict):
Dictionary of unprocessed metadata extracted from the WSI
format. For JP2 images this contains an xml object under the
key "xml". Defaults to empty dictionary.
"""
_valid_axes_characters = "YXSTZ"
def __init__(
self,
slide_dimensions: Tuple[int, int],
axes: str,
level_dimensions: Optional[Sequence[Tuple[int, int]]] = None,
objective_power: Optional[float] = None,
level_count: Optional[int] = None,
level_downsamples: Optional[Sequence[float]] = (1,),
vendor: Optional[str] = None,
mpp: Optional[Sequence[float]] = None,
file_path: Optional[Path] = None,
raw: Optional[Mapping[str, str]] = None,
):
self.axes = axes
self.objective_power = float(objective_power) if objective_power else None
self.slide_dimensions = tuple(int(x) for x in slide_dimensions)
self.level_dimensions = (
tuple((int(w), int(h)) for w, h in level_dimensions)
if level_dimensions is not None
else [self.slide_dimensions]
)
self.level_downsamples = (
[float(x) for x in level_downsamples]
if level_downsamples is not None
else None
)
self.level_count = (
int(level_count) if level_count is not None else len(self.level_dimensions)
)
self.vendor = str(vendor)
self.mpp = np.array([float(x) for x in mpp]) if mpp is not None else None
self.file_path = Path(file_path) if file_path is not None else None
self.raw = raw if raw is not None else None
self.validate()
def validate(self):
"""Validate passed values and cast to Python types.
Metadata values are often given as strings and must be
parsed/cast to the appropriate python type e.g. "3.14" to 3.14
etc.
Returns:
bool:
True is validation passed, False otherwise.
"""
passed = True
# Fatal conditions: Should return False if not True
if len(set(self.axes) - set(self._valid_axes_characters)) > 0:
logger.warning(
"Axes contains invalid characters. Valid characters are %s.",
self._valid_axes_characters,
)
passed = False
if self.level_count < 1:
logger.warning("Level count is not a positive integer.")
passed = False
if self.level_dimensions is None:
logger.warning("'level_dimensions' is None.")
passed = False
elif len(self.level_dimensions) != self.level_count:
logger.warning("Length of level dimensions != level count")
passed = False
if self.level_downsamples is None:
logger.warning("Level downsamples is None.")
passed = False
elif len(self.level_downsamples) != self.level_count:
logger.warning("Length of level downsamples != level count")
passed = False
# Non-fatal conditions: Raise warning only, do not fail validation
if self.raw is None:
logger.warning("Raw data is None.")
if all(x is None for x in [self.objective_power, self.mpp]):
logger.warning("Unknown scale (no objective_power or mpp)")
return passed # noqa
def level_downsample(
self,
level: Union[int, float],
) -> float:
"""Get the downsample factor for a level.
For non-integer values of `level`, the downsample factor is
linearly interpolated between from the downsample factors of the
level below and the level above.
Args:
level (int or float):
Level to get downsample factor for.
Returns:
float:
Downsample factor for the given level.
"""
level_downsamples = self.level_downsamples
if isinstance(level, int) or int(level) == level:
# Return the downsample for the level
return level_downsamples[int(level)]
# Linearly interpolate between levels
floor = int(np.floor(level))
ceil = int(np.ceil(level))
floor_downsample = level_downsamples[floor]
ceil_downsample = level_downsamples[ceil]
return np.interp(level, [floor, ceil], [floor_downsample, ceil_downsample])
def relative_level_scales(
self, resolution: Resolution, units: str
) -> List[np.ndarray]:
"""Calculate scale of each level in the WSI relative to given resolution.
Find the relative scale of each image pyramid / resolution level
of the WSI relative to the given resolution and units.
Values > 1 indicate that the level has a larger scale than the
target and < 1 indicates that it is smaller.
Args:
resolution (float or tuple(float)):
Scale to calculate relative to units.
units (str):
Units of the scale. Allowed values are: `"mpp"`,
`"power"`, `"level"`, `"baseline"`. Baseline refers to
the largest resolution in the WSI (level 0).
Raises:
ValueError:
Missing MPP metadata.
ValueError:
Missing objective power metadata.
ValueError:
Invalid units.
Returns:
list:
Scale for each level relative to the given scale and
units.
Examples:
>>> from tiatoolbox.wsicore.wsireader import WSIReader
>>> wsi = WSIReader.open(input_img="./CMU-1.ndpi")
>>> print(wsi.info.relative_level_scales(0.5, "mpp"))
[array([0.91282519, 0.91012514]), array([1.82565039, 1.82025028]) ...
>>> from tiatoolbox.wsicore.wsireader import WSIReader
>>> wsi = WSIReader.open(input_img="./CMU-1.ndpi")
>>> print(wsi.info.relative_level_scales(0.5, "baseline"))
[0.125, 0.25, 0.5, 1.0, 2.0, 4.0, 8.0, 16.0, 32.0]
"""
if units not in ("mpp", "power", "level", "baseline"):
raise ValueError("Invalid units")
level_downsamples = self.level_downsamples
def np_pair(x: Union[Number, np.array]) -> np.ndarray:
"""Ensure input x is a numpy array of length 2."""
# If one number is given, the same value is used for x and y
if isinstance(x, Number):
return np.array([x] * 2)
return np.array(x)
if units == "level":
if resolution >= len(level_downsamples):
raise ValueError(
f"Target scale level {resolution} "
f"> number of levels {len(level_downsamples)} in WSI"
)
base_scale, resolution = 1, self.level_downsample(resolution)
resolution = np_pair(resolution)
if units == "mpp":
if self.mpp is None:
raise ValueError("MPP is None. Cannot determine scale in terms of MPP.")
base_scale = self.mpp
if units == "power":
if self.objective_power is None:
raise ValueError(
"Objective power is None. "
"Cannot determine scale in terms of objective power."
)
base_scale, resolution = 1 / self.objective_power, 1 / resolution
if units == "baseline":
base_scale, resolution = 1, 1 / resolution
return [
(base_scale * downsample) / resolution for downsample in level_downsamples
]
def as_dict(self):
"""Convert WSIMeta to dictionary of Python types.
Returns:
dict:
Whole slide image meta data as dictionary.
"""
if self.mpp is None:
mpp = (self.mpp, self.mpp)
else:
mpp = tuple(self.mpp)
return {
"objective_power": self.objective_power,
"slide_dimensions": self.slide_dimensions,
"level_count": self.level_count,
"level_dimensions": self.level_dimensions,
"level_downsamples": self.level_downsamples,
"vendor": self.vendor,
"mpp": mpp,
"file_path": self.file_path,
"axes": self.axes,
}
| 12,205 | 37.263323 | 88 | py |
tiatoolbox | tiatoolbox-master/tiatoolbox/wsicore/__init__.py | """Package to read whole slide images"""
from tiatoolbox.wsicore import metadata, wsimeta, wsireader
# Top level imports
WSIReader = wsireader.WSIReader
WSIMeta = wsimeta.WSIMeta
| 180 | 24.857143 | 59 | py |
tiatoolbox | tiatoolbox-master/tiatoolbox/wsicore/metadata/ngff.py | """Generation of metadata for the OME-NGFF (zarr) slides.
Borrowed from https://github.com/John-P/wsic
Based on version 0.4 of the specification:
https://ngff.openmicroscopy.org/0.4/
"""
from dataclasses import dataclass, field
from numbers import Number
from typing import List, Optional, Union
try:
from typing import Literal
except ImportError:
from typing_extensions import Literal
from tiatoolbox import __version__ as tiatoolbox_version
SpaceUnits = Literal[
"angstrom",
"attometer",
"centimeter",
"decimeter",
"exameter",
"femtometer",
"foot",
"gigameter",
"hectometer",
"inch",
"kilometer",
"megameter",
"meter",
"micrometer",
"mile",
"millimeter",
"nanometer",
"parsec",
"petameter",
"picometer",
"terameter",
"yard",
"yoctometer",
"yottameter",
"zeptometer",
"zettameter",
]
TimeUnits = Literal[
"attosecond",
"centisecond",
"day",
"decisecond",
"exasecond",
"femtosecond",
"gigasecond",
"hectosecond",
"hour",
"kilosecond",
"megasecond",
"microsecond",
"millisecond",
"minute",
"nanosecond",
"petasecond",
"picosecond",
"second",
"terasecond",
"yoctosecond",
"yottasecond",
"zeptosecond",
"zettasecond",
]
TCZYX = Literal["t", "c", "z", "y", "x"]
@dataclass
class Creator:
"""Record the creator (wsic) information.
Attributes:
name (str):
The name of the creator.
version (str):
The version of the creator.
"""
name: str = "tiatoolbox"
version: str = tiatoolbox_version
@dataclass
class CoordinateTransform:
"""Transformation from the zarr to slide coordinate system.
Attributes:
type (str):
The type of coordinate transform. E.g. "scale".
scale (List[float]):
The scale factors. Must be one for each axis.
"""
type: str = "identity" # noqa: A003
scale: Optional[List[float]] = None
@dataclass
class Dataset:
"""Description of a single resolution.
Attributes:
path (str):
Path to the dataset. This will usually be a string of an
integer e.g. "0".
coordinateTransformations (List[CoordinateTransform]):
Transformations from the zarr to slide coordinate system.
"""
path: str = "0"
coordinateTransformations: List[CoordinateTransform] = field( # noqa: N815
default_factory=lambda: [CoordinateTransform()]
)
@dataclass
class Axis:
"""Description of an axis including type and units.
Attributes:
name (str):
The name of the axis. Must be one of: "t", "c", "z", "y",
"x".
type (str):
The type of the axis. Msut be one of: "time", "space",
"channel".
unit (str):
The units of the axis.
"""
name: TCZYX
type: Literal["time", "space", "channel"] # noqa: A003
unit: Optional[Union[SpaceUnits, TimeUnits]] = None
@dataclass
class Multiscales:
"""Description of multiple resolutions present.
Attributes:
axes (List[Axis]):
The axes of the multiscales.
datasets (List[Dataset]):
The datasets of the multiscales.
version (str):
The version of the specification.
"""
axes: List[Axis] = field(
default_factory=lambda: [
Axis("y", "space", "micrometer"),
Axis("x", "space", "micrometer"),
Axis("c", "channel", None),
]
)
datasets: List[Dataset] = field(default_factory=lambda: [Dataset()])
version: str = "0.4"
@dataclass
class Window:
"""The range of values within a channel.
Attributes:
end (int):
The end of the window.
max (int):
The maximum value in the window.
min (int):
The minimum value in the window.
start (int):
The start of the window.
"""
end: Number = 255
max: Number = 255 # noqa: A003
min: Number = 0 # noqa: A003
start: Number = 0
@dataclass
class Channel:
"""Description of a single channel.
Attributes:
active (bool):
Whether the channel is active by default.
color (str):
The color of the channel in hexadecimal format. E.g.
"FF0000" for red.
family (str):
The family of the channel. E.g. "linear".
inverted (bool):
Whether the channel is inverted.
window (Window):
The min and max values represented in the channel.
"""
active: bool = True
coefficient: float = 1.0
color: str = "FF0000" # Hexadecimal color code
family: str = "linear"
inverted: bool = False
label: str = "Red"
window: Window = field(default_factory=Window)
@dataclass
class RDefs:
"""Defaults for axes and colour model.
Attributes:
defaultT (int):
Default timepoint.
defaultZ (int):
Default z-plane.
model (str):
Colour model: "color" or "greyscale".
"""
defaultT: int = 0 # noqa: N815
defaultZ: int = 0 # noqa: N815
model: Literal["color", "greyscale"] = "color"
@dataclass
class Omero:
"""Display information e.g. colour channel information.
Attributes:
name (str):
The display name.
id (int):
The omero ID.
channels (List[Channel]):
The colour channels.
rdefs (RDefs):
The default values for axes and colour model.
version (str):
The version of the specification.
"""
name: Optional[str] = None
id: int = 1 # noqa: A003
channels: list = field(
default_factory=lambda: [
Channel(label="Red", color="FF0000"),
Channel(label="Green", color="00FF00"),
Channel(label="Blue", color="0000FF"),
]
)
rdefs: RDefs = field(default_factory=RDefs)
version: str = "0.4"
@dataclass
class Zattrs:
"""Root metadata.
Attributes:
_creator (Creator):
Information about the creator.
multiscales (Multiscales):
Information about the multiscales.
_ARRAY_DIMENSIONS (List[TCZYX]):
The dimensions of the array, for xarray compatibility.
omero (Omero):
Information about the display of image data.
"""
_creator: Creator = field(default_factory=Creator)
multiscales: Union[Multiscales, List[Multiscales]] = field(
default_factory=lambda: [Multiscales()]
)
_ARRAY_DIMENSIONS: List[TCZYX] = field(default_factory=lambda: ["y", "x", "c"])
omero: Omero = field(default_factory=Omero)
| 6,792 | 23.791971 | 83 | py |
tiatoolbox | tiatoolbox-master/tiatoolbox/wsicore/metadata/__init__.py | from tiatoolbox.wsicore.metadata import ngff
| 45 | 22 | 44 | py |
tiatoolbox | tiatoolbox-master/tiatoolbox/models/__init__.py | """Models package for the models implemented in tiatoolbox."""
from tiatoolbox.models import architecture, dataset, engine, models_abc
from tiatoolbox.models.engine.multi_task_segmentor import MultiTaskSegmentor
from tiatoolbox.models.engine.nucleus_instance_segmentor import NucleusInstanceSegmentor
from tiatoolbox.models.engine.patch_predictor import (
IOPatchPredictorConfig,
PatchDataset,
PatchPredictor,
WSIPatchDataset,
)
from tiatoolbox.models.engine.semantic_segmentor import (
DeepFeatureExtractor,
IOSegmentorConfig,
SemanticSegmentor,
WSIStreamDataset,
)
from .architecture.hovernet import HoVerNet
from .architecture.hovernetplus import HoVerNetPlus
from .architecture.idars import IDaRS
from .architecture.mapde import MapDe
from .architecture.micronet import MicroNet
from .architecture.nuclick import NuClick
from .architecture.sccnn import SCCNN
HoVerNet = HoVerNet
HoVerNetPlus = HoVerNetPlus
IDaRS = IDaRS
MapDe = MapDe
MicroNet = MicroNet
NuClick = NuClick
SCCNN = SCCNN
| 1,025 | 30.090909 | 88 | py |
tiatoolbox | tiatoolbox-master/tiatoolbox/models/models_abc.py | """Defines Abstract Base Class for Models defined in tiatoolbox."""
from abc import ABC, abstractmethod
import torch.nn as nn
class IOConfigABC(ABC):
"""Define an abstract class for holding predictor I/O information.
Enforcing such that following attributes must always be defined by
the subclass.
"""
@property
@abstractmethod
def input_resolutions(self):
raise NotImplementedError
@property
@abstractmethod
def output_resolutions(self):
raise NotImplementedError
class ModelABC(ABC, nn.Module):
"""Abstract base class for models used in tiatoolbox."""
def __init__(self):
super().__init__()
self._postproc = self.postproc
self._preproc = self.preproc
@abstractmethod
# noqa
# This is generic abc, else pylint will complain
def forward(self, *args, **kwargs):
"""Torch method, this contains logic for using layers defined in init."""
... # pragma: no cover
@staticmethod
@abstractmethod
def infer_batch(model, batch_data, on_gpu):
"""Run inference on an input batch.
Contains logic for forward operation as well as I/O aggregation.
Args:
model (nn.Module):
PyTorch defined model.
batch_data (ndarray):
A batch of data generated by
`torch.utils.data.DataLoader`.
on_gpu (bool):
Whether to run inference on a GPU.
"""
... # pragma: no cover
@staticmethod
def preproc(image):
"""Define the pre-processing of this class of model."""
return image
@staticmethod
def postproc(image):
"""Define the post-processing of this class of model."""
return image
@property
def preproc_func(self):
"""Return the current pre-processing function of this instance."""
return self._preproc
@preproc_func.setter
def preproc_func(self, func):
"""Set the pre-processing function for this instance.
If `func=None`, the method will default to `self.preproc`.
Otherwise, `func` is expected to be callable.
Examples:
>>> # expected usage
>>> # model is a subclass object of this ModelABC
>>> # `func` is a user defined function
>>> model.preproc_func = func
>>> transformed_img = model.preproc_func(img)
"""
if func is not None and not callable(func):
raise ValueError(f"{func} is not callable!")
if func is None:
self._preproc = self.preproc
else:
self._preproc = func
@property
def postproc_func(self):
"""Return the current post-processing function of this instance."""
return self._postproc
@postproc_func.setter
def postproc_func(self, func):
"""Set the pre-processing function for this instance of model.
If `func=None`, the method will default to `self.postproc`.
Otherwise, `func` is expected to be callable and behave as
follows:
Examples:
>>> # expected usage
>>> # model is a subclass object of this ModelABC
>>> # `func` is a user defined function
>>> model.postproc_func = func
>>> transformed_img = model.postproc_func(img)
"""
if func is not None and not callable(func):
raise ValueError(f"{func} is not callable!")
if func is None:
self._postproc = self.postproc
else:
self._postproc = func
| 3,623 | 27.761905 | 81 | py |
tiatoolbox | tiatoolbox-master/tiatoolbox/models/dataset/classification.py | import os
import pathlib
import cv2
import numpy as np
import PIL
import torchvision.transforms as transforms
from tiatoolbox import logger
from tiatoolbox.models.dataset import dataset_abc
from tiatoolbox.tools.patchextraction import PatchExtractor
from tiatoolbox.utils.misc import imread
from tiatoolbox.wsicore.wsimeta import WSIMeta
from tiatoolbox.wsicore.wsireader import VirtualWSIReader, WSIReader
class _TorchPreprocCaller:
"""Wrapper for applying PyTorch transforms.
Args:
preprocs (list):
List of torchvision transforms for preprocessing the image.
The transforms will be applied in the order that they are
given in the list. For more information, visit the following
link: https://pytorch.org/vision/stable/transforms.html.
"""
def __init__(self, preprocs):
self.func = transforms.Compose(preprocs)
def __call__(self, img):
img = PIL.Image.fromarray(img)
img = self.func(img)
return img.permute(1, 2, 0)
def predefined_preproc_func(dataset_name):
"""Get the preprocessing information used for the pretrained model.
Args:
dataset_name (str):
Dataset name used to determine what preprocessing was used.
Returns:
_TorchPreprocCaller:
Preprocessing function for transforming the input data.
"""
preproc_dict = {
"kather100k": [
transforms.ToTensor(),
],
"pcam": [
transforms.ToTensor(),
],
}
if dataset_name not in preproc_dict:
raise ValueError(
f"Predefined preprocessing for dataset `{dataset_name}` does not exist."
)
preprocs = preproc_dict[dataset_name]
return _TorchPreprocCaller(preprocs)
class PatchDataset(dataset_abc.PatchDatasetABC):
"""Defines a simple patch dataset, which inherits from the
`torch.utils.data.Dataset` class.
Attributes:
inputs:
Either a list of patches, where each patch is a ndarray or a
list of valid path with its extension be (".jpg", ".jpeg",
".tif", ".tiff", ".png") pointing to an image.
labels:
List of labels for sample at the same index in `inputs`.
Default is `None`.
preproc_func:
Preprocessing function used to transform the input data.
Examples:
>>> # A user defined preproc func and expected behavior
>>> preproc_func = lambda img: img/2 # reduce intensity by half
>>> transformed_img = preproc_func(img)
>>> # create a dataset to get patches preprocessed by the above function
>>> ds = PatchDataset(
... inputs=['/A/B/C/img1.png', '/A/B/C/img2.png'],
... preproc_func=preproc_func
... )
"""
def __init__(self, inputs, labels=None):
super().__init__()
self.data_is_npy_alike = False
self.inputs = inputs
self.labels = labels
# perform check on the input
self._check_input_integrity(mode="patch")
def __getitem__(self, idx):
patch = self.inputs[idx]
# Mode 0 is list of paths
if not self.data_is_npy_alike:
patch = self.load_img(patch)
# Apply preprocessing to selected patch
patch = self._preproc(patch)
data = {
"image": patch,
}
if self.labels is not None:
data["label"] = self.labels[idx]
return data
return data
class WSIPatchDataset(dataset_abc.PatchDatasetABC):
"""Defines a WSI-level patch dataset.
Attributes:
reader (:class:`.WSIReader`):
A WSI Reader or Virtual Reader for reading pyramidal image
or large tile in pyramidal way.
inputs:
List of coordinates to read from the `reader`, each
coordinate is of the form `[start_x, start_y, end_x,
end_y]`.
patch_input_shape:
A tuple (int, int) or ndarray of shape (2,). Expected size to
read from `reader` at requested `resolution` and `units`.
Expected to be `(height, width)`.
resolution:
See (:class:`.WSIReader`) for details.
units:
See (:class:`.WSIReader`) for details.
preproc_func:
Preprocessing function used to transform the input data. It will
be called on each patch before returning it.
"""
def __init__(
self,
img_path,
mode="wsi",
mask_path=None,
patch_input_shape=None,
stride_shape=None,
resolution=None,
units=None,
auto_get_mask=True,
min_mask_ratio=0,
preproc_func=None,
):
"""Create a WSI-level patch dataset.
Args:
mode (str):
Can be either `wsi` or `tile` to denote the image to
read is either a whole-slide image or a large image
tile.
img_path (:obj:`str` or :obj:`pathlib.Path`):
Valid to pyramidal whole-slide image or large tile to
read.
mask_path (:obj:`str` or :obj:`pathlib.Path`):
Valid mask image.
patch_input_shape:
A tuple (int, int) or ndarray of shape (2,). Expected
shape to read from `reader` at requested `resolution`
and `units`. Expected to be positive and of (height,
width). Note, this is not at `resolution` coordinate
space.
stride_shape:
A tuple (int, int) or ndarray of shape (2,). Expected
stride shape to read at requested `resolution` and
`units`. Expected to be positive and of (height, width).
Note, this is not at level 0.
resolution:
Check (:class:`.WSIReader`) for details. When
`mode='tile'`, value is fixed to be `resolution=1.0` and
`units='baseline'` units: check (:class:`.WSIReader`) for
details.
units:
Units in which `resolution` is defined.
auto_get_mask:
If `True`, then automatically get simple threshold mask using
WSIReader.tissue_mask() function.
min_mask_ratio:
Only patches with positive area percentage above this value are
included. Defaults to 0.
preproc_func:
Preprocessing function used to transform the input data. If
supplied, the function will be called on each patch before
returning it.
Examples:
>>> # A user defined preproc func and expected behavior
>>> preproc_func = lambda img: img/2 # reduce intensity by half
>>> transformed_img = preproc_func(img)
>>> # Create a dataset to get patches from WSI with above
>>> # preprocessing function
>>> ds = WSIPatchDataset(
... img_path='/A/B/C/wsi.svs',
... mode="wsi",
... patch_input_shape=[512, 512],
... stride_shape=[256, 256],
... auto_get_mask=False,
... preproc_func=preproc_func
... )
"""
super().__init__()
# Is there a generic func for path test in toolbox?
if not os.path.isfile(img_path):
raise ValueError("`img_path` must be a valid file path.")
if mode not in ["wsi", "tile"]:
raise ValueError(f"`{mode}` is not supported.")
patch_input_shape = np.array(patch_input_shape)
stride_shape = np.array(stride_shape)
if (
not np.issubdtype(patch_input_shape.dtype, np.integer)
or np.size(patch_input_shape) > 2
or np.any(patch_input_shape < 0)
):
raise ValueError(f"Invalid `patch_input_shape` value {patch_input_shape}.")
if (
not np.issubdtype(stride_shape.dtype, np.integer)
or np.size(stride_shape) > 2
or np.any(stride_shape < 0)
):
raise ValueError(f"Invalid `stride_shape` value {stride_shape}.")
self.preproc_func = preproc_func
img_path = pathlib.Path(img_path)
if mode == "wsi":
self.reader = WSIReader.open(img_path)
else:
logger.warning(
"WSIPatchDataset only reads image tile at "
'`units="baseline"` and `resolution=1.0`.',
stacklevel=2,
)
units = "baseline"
resolution = 1.0
img = imread(img_path)
axes = "YXS"[: len(img.shape)]
# initialise metadata for VirtualWSIReader.
# here, we simulate a whole-slide image, but with a single level.
# ! should we expose this so that use can provide their metadata ?
metadata = WSIMeta(
mpp=np.array([1.0, 1.0]),
axes=axes,
objective_power=10,
slide_dimensions=np.array(img.shape[:2][::-1]),
level_downsamples=[1.0],
level_dimensions=[np.array(img.shape[:2][::-1])],
)
# hack value such that read if mask is provided is through
# 'mpp' or 'power' as varying 'baseline' is locked atm
units = "mpp"
resolution = 1.0
self.reader = VirtualWSIReader(
img,
info=metadata,
)
# may decouple into misc ?
# the scaling factor will scale base level to requested read resolution/units
wsi_shape = self.reader.slide_dimensions(resolution=resolution, units=units)
# use all patches, as long as it overlaps source image
self.inputs = PatchExtractor.get_coordinates(
image_shape=wsi_shape,
patch_input_shape=patch_input_shape[::-1],
stride_shape=stride_shape[::-1],
input_within_bound=False,
)
mask_reader = None
if mask_path is not None:
if not os.path.isfile(mask_path):
raise ValueError("`mask_path` must be a valid file path.")
mask = imread(mask_path) # assume to be gray
mask = cv2.cvtColor(mask, cv2.COLOR_RGB2GRAY)
mask = np.array(mask > 0, dtype=np.uint8)
mask_reader = VirtualWSIReader(mask)
mask_reader.info = self.reader.info
elif auto_get_mask and mode == "wsi" and mask_path is None:
# if no mask provided and `wsi` mode, generate basic tissue
# mask on the fly
mask_reader = self.reader.tissue_mask(resolution=1.25, units="power")
# ? will this mess up ?
mask_reader.info = self.reader.info
if mask_reader is not None:
selected = PatchExtractor.filter_coordinates(
mask_reader, # must be at the same resolution
self.inputs, # must already be at requested resolution
wsi_shape=wsi_shape,
min_mask_ratio=min_mask_ratio,
)
self.inputs = self.inputs[selected]
if len(self.inputs) == 0:
raise ValueError("No patch coordinates remain after filtering.")
self.patch_input_shape = patch_input_shape
self.resolution = resolution
self.units = units
# Perform check on the input
self._check_input_integrity(mode="wsi")
def __getitem__(self, idx):
coords = self.inputs[idx]
# Read image patch from the whole-slide image
patch = self.reader.read_bounds(
coords,
resolution=self.resolution,
units=self.units,
pad_constant_values=255,
coord_space="resolution",
)
# Apply preprocessing to selected patch
patch = self._preproc(patch)
return {"image": patch, "coords": np.array(coords)}
| 12,103 | 34.6 | 87 | py |
tiatoolbox | tiatoolbox-master/tiatoolbox/models/dataset/dataset_abc.py | import os
import pathlib
from abc import ABC, abstractmethod
import numpy as np
import torch
from tiatoolbox.utils.misc import imread
class PatchDatasetABC(ABC, torch.utils.data.Dataset):
"""Defines abstract base class for patch dataset."""
def __init__(
self,
):
super().__init__()
self._preproc = self.preproc
self.data_is_npy_alike = False
self.inputs = []
self.labels = []
@staticmethod
def _check_shape_integrity(shapes):
"""Checks the integrity of input shapes.
Args:
shapes (list or np.ndarray):
input shape to check.
Raises:
ValueError: If the shape is not valid.
"""
if any(len(v) != 3 for v in shapes):
raise ValueError("Each sample must be an array of the form HWC.")
max_shape = np.max(shapes, axis=0)
if (shapes - max_shape[None]).sum() != 0:
raise ValueError("Images must have the same dimensions.")
def _check_input_integrity(self, mode):
"""Check that variables received during init are valid.
These checks include:
- Input is of a singular data type, such as a list of paths.
- If it is list of images, all images are of the same height
and width.
"""
if mode == "patch":
self.data_is_npy_alike = False
is_all_paths = all(isinstance(v, (pathlib.Path, str)) for v in self.inputs)
is_all_npy = all(isinstance(v, np.ndarray) for v in self.inputs)
if not (is_all_paths or is_all_npy or isinstance(self.inputs, np.ndarray)):
raise ValueError(
"Input must be either a list/array of images "
"or a list of valid image paths."
)
shapes = None
# When a list of paths is provided
if is_all_paths:
if any(not os.path.exists(v) for v in self.inputs):
# at least one of the paths are invalid
raise ValueError(
"Input must be either a list/array of images "
"or a list of valid image paths."
)
# Preload test for sanity check
shapes = [self.load_img(v).shape for v in self.inputs]
self.data_is_npy_alike = False
if is_all_npy:
shapes = [v.shape for v in self.inputs]
self.data_is_npy_alike = True
if shapes:
self._check_shape_integrity(shapes)
# If input is a numpy array
if isinstance(self.inputs, np.ndarray):
# Check that input array is numerical
if not np.issubdtype(self.inputs.dtype, np.number):
# ndarray of mixed data types
raise ValueError("Provided input array is non-numerical.")
# N H W C | N C H W
if len(self.inputs.shape) != 4:
raise ValueError(
"Input must be an array of images of the form NHWC. This can "
"be achieved by converting a list of images to a numpy array. "
" eg., np.array([img1, img2])."
)
self.data_is_npy_alike = True
elif not isinstance(self.inputs, (list, np.ndarray)):
raise ValueError("`inputs` should be a list of patch coordinates.")
@staticmethod
def load_img(path):
"""Load an image from a provided path.
Args:
path (str): Path to an image file.
"""
path = pathlib.Path(path)
if path.suffix not in (".npy", ".jpg", ".jpeg", ".tif", ".tiff", ".png"):
raise ValueError(f"Cannot load image data from `{path.suffix}` files.")
return imread(path, as_uint8=False)
@staticmethod
def preproc(image):
"""Define the pre-processing of this class of loader."""
return image
@property
def preproc_func(self):
"""Return the current pre-processing function of this instance.
The returned function is expected to behave as follows:
>>> transformed_img = func(img)
"""
return self._preproc
@preproc_func.setter
def preproc_func(self, func):
"""Set the pre-processing function for this instance.
If `func=None`, the method will default to `self.preproc`.
Otherwise, `func` is expected to be callable and behaves as
follows:
>>> transformed_img = func(img)
"""
if func is None:
self._preproc = self.preproc
elif callable(func):
self._preproc = func
else:
raise ValueError(f"{func} is not callable!")
def __len__(self):
return len(self.inputs)
@abstractmethod
def __getitem__(self, idx):
... # pragma: no cover
| 5,005 | 31.506494 | 87 | py |
tiatoolbox | tiatoolbox-master/tiatoolbox/models/dataset/info.py | import os
from abc import ABC, abstractmethod
from pathlib import Path
from tiatoolbox import rcParam
from tiatoolbox.utils.misc import download_data, grab_files_from_dir, unzip_data
class DatasetInfoABC(ABC):
"""Define an abstract class for holding dataset information.
Enforcing such that following attributes must always be defined by
the subclass.
Attributes:
inputs (list):
A list of paths where each path points to a sample image.
labels (list): A list of `int` where each is the label of
the sample at the same index.
label_names (dict):
A dict indicates the possible associate name of each label
value.
"""
@property
@abstractmethod
def inputs(self):
raise NotImplementedError
@property
@abstractmethod
def labels(self):
raise NotImplementedError
@property
@abstractmethod
def label_names(self):
raise NotImplementedError
class KatherPatchDataset(DatasetInfoABC):
"""Define a class for holding the Kather dataset information.
Args:
save_dir_path (str or None):
Path to directory containing the Kather dataset. This is
assumed to be the same form after the data is initially
downloaded. If the argument is `None`, the dataset will be
downloaded and extracted into the 'run_dir/download/Kather'.
Attributes:
inputs (list):
A list of paths where each path points to a sample image.
labels (list):
A list of `int` where each value corresponds to the label of
the sample at the same index.
label_names (dict):
A dict mapping each unique label value to the associated
class name as a string.
"""
# We pre-define to follow enforcement, actual initialization in init
inputs = None
labels = None
label_names = None
def __init__(
self,
save_dir_path=None,
):
label_names = [
"BACK",
"NORM",
"DEB",
"TUM",
"ADI",
"MUC",
"MUS",
"STR",
"LYM",
]
if save_dir_path is None: # pragma: no cover
save_dir_path = Path(rcParam["TIATOOLBOX_HOME"], "dataset")
if not os.path.exists(save_dir_path):
save_zip_path = os.path.join(save_dir_path, "Kather.zip")
url = (
"https://tiatoolbox.dcs.warwick.ac.uk/datasets"
"/kather100k-train-nonorm-subset-20k.zip"
)
download_data(url, save_zip_path)
unzip_data(save_zip_path, save_dir_path)
save_dir_path = Path(save_dir_path, "kather100k-validation")
# bring outside to prevent case where download fail
save_dir_path = Path(save_dir_path)
if not save_dir_path.exists():
raise ValueError(f"Dataset does not exist at `{save_dir_path}`")
# What will happen if downloaded data get corrupted?
uid_name_map = {}
all_paths = []
for label_id, label_name in enumerate(label_names):
paths = grab_files_from_dir(
f"{save_dir_path}/{label_name}/", file_types="*.tif"
)
paths = [[v, label_id] for v in paths]
paths.sort()
all_paths.extend(paths)
uid_name_map[label_id] = label_name
inputs, labels = list(zip(*all_paths))
self.label_names = uid_name_map
self.inputs = list(inputs) # type casting to list
self.labels = list(labels) # type casting to list
| 3,716 | 30.769231 | 80 | py |
tiatoolbox | tiatoolbox-master/tiatoolbox/models/dataset/__init__.py | """Contains dataset functionality for use with models in tiatoolbox."""
from tiatoolbox.models.dataset.classification import (
PatchDataset,
WSIPatchDataset,
predefined_preproc_func,
)
from tiatoolbox.models.dataset.dataset_abc import PatchDatasetABC
from tiatoolbox.models.dataset.info import DatasetInfoABC, KatherPatchDataset
| 342 | 33.3 | 77 | py |
tiatoolbox | tiatoolbox-master/tiatoolbox/models/architecture/micronet.py | """Defines MicroNet architecture.
Raza, SEA et al., “Micro-Net: A unified model for segmentation of
various objects in microscopy images,” Medical Image Analysis,
Dec. 2018, vol. 52, p. 160–173.
"""
from collections import OrderedDict
from typing import Tuple
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as functional
from scipy import ndimage
from skimage import morphology
from tiatoolbox.models.architecture.hovernet import HoVerNet
from tiatoolbox.models.models_abc import ModelABC
from tiatoolbox.utils import misc
def group1_forward_branch(
layer: nn.Module, in_tensor: torch.Tensor, resized_feat: torch.Tensor
) -> torch.Tensor:
"""Defines group 1 connections.
Args:
layer (torch.nn.Module):
Network layer.
in_tensor (torch.Tensor):
Input tensor.
resized_feat (torch.Tensor):
Resized input.
Returns:
torch.Tensor:
Output of group 1 layer.
"""
a = layer["conv1"](in_tensor)
a = layer["conv2"](a)
a = layer["pool"](a)
b = layer["conv3"](resized_feat)
b = layer["conv4"](b)
return torch.cat(tensors=(a, b), dim=1)
def group2_forward_branch(layer: nn.Module, in_tensor: torch.Tensor) -> torch.Tensor:
"""Defines group 1 connections.
Args:
layer (torch.nn.Module):
Network layer.
in_tensor (torch.Tensor):
Input tensor.
Returns:
torch.Tensor:
Output of group 1 layer.
"""
a = layer["conv1"](in_tensor)
return layer["conv2"](a)
def group3_forward_branch(
layer: nn.Module, main_feat: torch.Tensor, skip: torch.Tensor
) -> torch.Tensor:
"""Defines group 1 connections.
Args:
layer (torch.nn.Module):
Network layer.
main_feat (torch.Tensor):
Input tensor.
skip (torch.Tensor):
Skip connection.
Returns:
torch.Tensor: Output of group 1 layer.
"""
a = layer["up1"](main_feat)
a = layer["conv1"](a)
a = layer["conv2"](a)
b1 = layer["up2"](a)
b2 = layer["up3"](skip)
b = torch.cat(tensors=(b1, b2), dim=1)
return layer["conv3"](b)
def group4_forward_branch(layer: nn.Module, in_tensor: torch.Tensor) -> torch.Tensor:
"""Defines group 1 connections.
Args:
layer (torch.nn.Module):
Network layer.
in_tensor (torch.Tensor):
Input tensor.
Returns:
torch.Tensor: Output of group 1 layer.
"""
a = layer["up1"](in_tensor)
return layer["conv1"](a)
def group1_arch_branch(in_ch: int, resized_in_ch: int, out_ch: int):
"""Group1 branch for MicroNet.
Args:
in_ch (int):
Number of input channels.
resized_in_ch (int):
Number of input channels from resized input.
out_ch (int):
Number of output channels.
Returns:
:class:`torch.nn.ModuleDict`:
An output of type :class:`torch.nn.ModuleDict`
"""
module_dict = OrderedDict()
module_dict["conv1"] = nn.Sequential(
nn.Conv2d(
in_ch,
out_ch,
kernel_size=(3, 3),
stride=(1, 1),
padding=0,
bias=True,
),
nn.Tanh(),
nn.BatchNorm2d(out_ch),
)
module_dict["conv2"] = nn.Sequential(
nn.Conv2d(
out_ch,
out_ch,
kernel_size=(3, 3),
stride=(1, 1),
padding=0,
bias=True,
),
nn.Tanh(),
)
module_dict["pool"] = nn.MaxPool2d(2, padding=0) # check padding
module_dict["conv3"] = nn.Sequential(
nn.Conv2d(
resized_in_ch,
out_ch,
kernel_size=(3, 3),
stride=(1, 1),
padding=0,
bias=True,
),
nn.Tanh(),
nn.BatchNorm2d(out_ch),
)
module_dict["conv4"] = nn.Sequential(
nn.Conv2d(
out_ch,
out_ch,
kernel_size=(3, 3),
stride=(1, 1),
padding=0,
bias=True,
),
nn.Tanh(),
)
return nn.ModuleDict(module_dict)
def group2_arch_branch(in_ch: int, out_ch: int):
"""Group2 branch for MicroNet.
Args:
in_ch (int):
Number of input channels.
out_ch (int):
Number of output channels.
Returns:
torch.nn.ModuleDict:
An output of type :class:`torch.nn.ModuleDict`
"""
module_dict = OrderedDict()
module_dict["conv1"] = nn.Sequential(
nn.Conv2d(
in_ch,
out_ch,
kernel_size=(3, 3),
stride=(1, 1),
padding=0,
bias=True,
),
nn.Tanh(),
)
module_dict["conv2"] = nn.Sequential(
nn.Conv2d(
out_ch,
out_ch,
kernel_size=(3, 3),
stride=(1, 1),
padding=0,
bias=True,
),
nn.Tanh(),
)
return nn.ModuleDict(module_dict)
def group3_arch_branch(in_ch: int, skip: int, out_ch: int):
"""Group3 branch for MicroNet.
Args:
in_ch (int):
Number of input channels.
skip (int):
Number of channels for the skip connection.
out_ch (int):
Number of output channels.
Returns:
torch.nn.ModuleDict:
An output of type :class:`torch.nn.ModuleDict`
"""
module_dict = OrderedDict()
module_dict["up1"] = nn.ConvTranspose2d(
in_ch, out_ch, kernel_size=(2, 2), stride=(2, 2)
)
module_dict["conv1"] = nn.Sequential(
nn.Conv2d(
out_ch,
out_ch,
kernel_size=(3, 3),
stride=(1, 1),
padding=0,
bias=True,
),
nn.Tanh(),
)
module_dict["conv2"] = nn.Sequential(
nn.Conv2d(
out_ch,
out_ch,
kernel_size=(3, 3),
stride=(1, 1),
padding=0,
bias=True,
),
nn.Tanh(),
)
module_dict["up2"] = nn.ConvTranspose2d(
out_ch, out_ch, kernel_size=(5, 5), stride=(1, 1)
)
module_dict["up3"] = nn.ConvTranspose2d(
skip, out_ch, kernel_size=(5, 5), stride=(1, 1)
)
module_dict["conv3"] = nn.Sequential(
nn.Conv2d(
2 * out_ch,
out_ch,
kernel_size=(1, 1),
stride=(1, 1),
padding=0,
bias=True,
),
nn.Tanh(),
)
return nn.ModuleDict(module_dict)
def group4_arch_branch(
in_ch: int,
out_ch: int,
up_kernel: Tuple[int, int] = (2, 2),
up_strides: Tuple[int, int] = (2, 2),
activation: str = "tanh",
) -> nn.ModuleDict:
"""Group4 branch for MicroNet.
This branch defines architecture for decoder and
provides input for the auxiliary and main output branch.
Args:
in_ch (int):
Number of input channels.
out_ch (int):
Number of output channels.
up_kernel (tuple of int):
Kernel size for
:class:`torch.nn.ConvTranspose2d`.
up_strides (tuple of int):
Stride size for
:class:`torch.nn.ConvTranspose2d`.
activation (str):
Activation function, default="tanh".
Returns:
torch.nn.ModuleDict:
An output of type :class:`torch.nn.ModuleDict`
"""
if activation == "relu":
activation = nn.ReLU()
else:
activation = nn.Tanh()
module_dict = OrderedDict()
module_dict["up1"] = nn.ConvTranspose2d(
in_ch, out_ch, kernel_size=up_kernel, stride=up_strides
)
module_dict["conv1"] = nn.Sequential(
nn.Conv2d(
out_ch,
out_ch,
kernel_size=(3, 3),
stride=(1, 1),
padding=0,
bias=True,
),
activation,
)
return nn.ModuleDict(module_dict)
def out_arch_branch(
in_ch: int, num_output_channels: int = 2, activation: str = "softmax"
):
"""Group5 branch for MicroNet.
This branch defines architecture for auxiliary and the main output.
Args:
in_ch (int):
Number of input channels.
num_output_channels (int):
Number of output channels. default=2.
activation (str):
Activation function, default="softmax".
Returns:
torch.nn.Sequential:
An output of type :class:`torch.nn.Sequential`
"""
if activation == "relu":
activation = nn.ReLU()
else:
activation = nn.Softmax()
return nn.Sequential(
nn.Dropout2d(p=0.5),
nn.Conv2d(
in_ch,
num_output_channels,
kernel_size=(3, 3),
stride=(1, 1),
padding=0,
bias=True,
),
activation,
)
class MicroNet(ModelABC):
"""Initialize MicroNet [1].
The following models have been included in tiatoolbox:
1. `micronet-consep`:
This is trained on `CoNSeP dataset
<https://warwick.ac.uk/fac/cross_fac/tia/data/hovernet/>`_ The
model is retrained in torch as the original model with results
on CoNSeP [2] was trained in TensorFlow.
The tiatoolbox model should produce the following results on the CoNSeP dataset:
.. list-table:: MicroNet performance
:widths: 15 15 15 15 15 15 15
:header-rows: 1
* - Model name
- Data set
- DICE
- AJI
- DQ
- SQ
- PQ
* - micronet-consep
- CoNSeP
- 0.80
- 0.49
- 0.62
- 0.75
- 0.47
Args:
num_input_channels (int):
Number of channels in input. default=3.
num_output_channels (int):
Number of output channels. default=2.
out_activation (str):
Activation to use at the output. MapDe inherits MicroNet
but uses ReLU activation.
References:
[1] Raza, Shan E Ahmed, et al. "Micro-Net: A unified model for
segmentation of various objects in microscopy images."
Medical image analysis 52 (2019): 160-173.
[2] Graham, Simon, et al. "Hover-net: Simultaneous segmentation
and classification of nuclei in multi-tissue histology images."
Medical Image Analysis 58 (2019): 101563.
"""
def __init__(
self, num_input_channels=3, num_output_channels=2, out_activation="softmax"
):
super().__init__()
if num_output_channels < 2:
raise ValueError("Number of classes should be >=2.")
self.__num_output_channels = num_output_channels
self.in_ch = num_input_channels
module_dict = OrderedDict()
module_dict["b1"] = group1_arch_branch(
num_input_channels, num_input_channels, 64
)
module_dict["b2"] = group1_arch_branch(128, num_input_channels, 128)
module_dict["b3"] = group1_arch_branch(256, num_input_channels, 256)
module_dict["b4"] = group1_arch_branch(512, num_input_channels, 512)
module_dict["b5"] = group2_arch_branch(1024, 2048)
module_dict["b6"] = group3_arch_branch(2048, 1024, 1024)
module_dict["b7"] = group3_arch_branch(1024, 512, 512)
module_dict["b8"] = group3_arch_branch(512, 256, 256)
module_dict["b9"] = group3_arch_branch(256, 128, 128)
module_dict["fm1"] = group4_arch_branch(
128, 64, (2, 2), (2, 2), activation=out_activation
)
module_dict["fm2"] = group4_arch_branch(
256, 128, (4, 4), (4, 4), activation=out_activation
)
module_dict["fm3"] = group4_arch_branch(
512, 256, (8, 8), (8, 8), activation=out_activation
)
module_dict["aux_out1"] = out_arch_branch(
64, num_output_channels=self.__num_output_channels
)
module_dict["aux_out2"] = out_arch_branch(
128, num_output_channels=self.__num_output_channels
)
module_dict["aux_out3"] = out_arch_branch(
256, num_output_channels=self.__num_output_channels
)
module_dict["out"] = out_arch_branch(
64 + 128 + 256,
num_output_channels=self.__num_output_channels,
activation=out_activation,
)
self.layer = nn.ModuleDict(module_dict)
def forward(self, input_tensor: torch.Tensor): # skipcq: PYL-W0221
"""Logic for using layers defined in init.
This method defines how layers are used in forward operation.
Args:
input_tensor (torch.Tensor):
Input images, the tensor is in the shape of NCHW.
Returns:
list:
A list of main and auxiliary outputs. The expected
format is `[main_output, aux1, aux2, aux3]`.
"""
b1 = group1_forward_branch(
self.layer["b1"],
input_tensor,
functional.interpolate(input_tensor, size=(128, 128), mode="bicubic"),
)
b2 = group1_forward_branch(
self.layer["b2"],
b1,
functional.interpolate(input_tensor, size=(64, 64), mode="bicubic"),
)
b3 = group1_forward_branch(
self.layer["b3"],
b2,
functional.interpolate(input_tensor, size=(32, 32), mode="bicubic"),
)
b4 = group1_forward_branch(
self.layer["b4"],
b3,
functional.interpolate(input_tensor, size=(16, 16), mode="bicubic"),
)
b5 = group2_forward_branch(self.layer["b5"], b4)
b6 = group3_forward_branch(self.layer["b6"], b5, b4)
b7 = group3_forward_branch(self.layer["b7"], b6, b3)
b8 = group3_forward_branch(self.layer["b8"], b7, b2)
b9 = group3_forward_branch(self.layer["b9"], b8, b1)
fm1 = group4_forward_branch(self.layer["fm1"], b9)
fm2 = group4_forward_branch(self.layer["fm2"], b8)
fm3 = group4_forward_branch(self.layer["fm3"], b7)
aux1 = self.layer["aux_out1"](fm1)
aux2 = self.layer["aux_out2"](fm2)
aux3 = self.layer["aux_out3"](fm3)
out = torch.cat(tensors=(fm1, fm2, fm3), dim=1)
out = self.layer["out"](out)
return [out, aux1, aux2, aux3]
@staticmethod
def postproc(image: np.ndarray):
"""Post-processing script for MicroNet.
Args:
image (ndarray):
Input image of type numpy array.
Returns:
:class:`numpy.ndarray`:
Pixel-wise nuclear instance segmentation
prediction.
"""
pred_bin = np.argmax(image[0], axis=2)
pred_inst = ndimage.label(pred_bin)[0]
pred_inst = morphology.remove_small_objects(pred_inst, min_size=50)
canvas = np.zeros(pred_inst.shape[:2], dtype=np.int32)
for inst_id in range(1, np.max(pred_inst) + 1):
inst_map = np.array(pred_inst == inst_id, dtype=np.uint8)
inst_map = ndimage.binary_fill_holes(inst_map)
canvas[inst_map > 0] = inst_id
nuc_inst_info_dict = HoVerNet.get_instance_info(canvas)
return canvas, nuc_inst_info_dict
@staticmethod
def preproc(image: np.ndarray):
"""Preprocessing function for MicroNet.
Performs per image standardization.
Args:
image (:class:`numpy.ndarray`):
Input image of type numpy array.
Returns:
:class:`numpy.ndarray`:
Pre-processed numpy array.
"""
image = np.transpose(image, axes=(2, 0, 1))
image = image / 255.0
image = torch.from_numpy(image)
image_mean = torch.mean(image, dim=(-1, -2, -3))
stddev = torch.std(image, dim=(-1, -2, -3))
num_pixels = torch.tensor(torch.numel(image), dtype=torch.float32)
min_stddev = torch.rsqrt(num_pixels)
adjusted_stddev = torch.max(stddev, min_stddev)
image -= image_mean
image = torch.div(image, adjusted_stddev)
return np.transpose(image.numpy(), axes=(1, 2, 0))
@staticmethod
def infer_batch(
model: torch.nn.Module, batch_data: np.ndarray, on_gpu: bool
) -> np.ndarray:
"""Run inference on an input batch.
This contains logic for forward operation as well as batch I/O
aggregation.
Args:
model (nn.Module):
PyTorch defined model.
batch_data (:class:`numpy.ndarray`):
A batch of data generated by
`torch.utils.data.DataLoader`.
on_gpu (bool):
Whether to run inference on a GPU.
Returns:
np.ndarray:
Probability map as a numpy array.
"""
patch_imgs = batch_data
device = misc.select_device(on_gpu)
patch_imgs_gpu = patch_imgs.to(device).type(torch.float32) # to NCHW
patch_imgs_gpu = patch_imgs_gpu.permute(0, 3, 1, 2).contiguous()
model.eval() # infer mode
with torch.inference_mode():
pred, _, _, _ = model(patch_imgs_gpu)
pred = pred.permute(0, 2, 3, 1).contiguous()
pred = pred.cpu().numpy()
return [
pred,
]
| 17,442 | 26.775478 | 85 | py |
tiatoolbox | tiatoolbox-master/tiatoolbox/models/architecture/mapde.py | """Defines MapDe architecture.
Raza, Shan E Ahmed, et al. "Deconvolving convolutional neural network
for cell detection." 2019 IEEE 16th International Symposium on Biomedical
Imaging (ISBI 2019). IEEE, 2019.
"""
import numpy as np
import torch
import torch.nn.functional as F # noqa: N812
from skimage.feature import peak_local_max
from tiatoolbox.models.architecture.micronet import MicroNet
from tiatoolbox.utils.misc import select_device
class MapDe(MicroNet):
"""Initialize MapDe [1].
The following models have been included in tiatoolbox:
1. `mapde-crchisto`:
This model is trained on `CRCHisto dataset
<https://warwick.ac.uk/fac/cross_fac/tia/data/crchistolabelednucleihe/>`_
2. `mapde-conic`:
This model is trained on `CoNIC dataset
<https://conic-challenge.grand-challenge.org/evaluation/challenge/leaderboard//>`_
Centroids of ground truth masks were used to train this model.
The results are reported on the whole test data set including preliminary
and final set.
The tiatoolbox model should produce the following results on the following datasets
using 8 pixels as radius for true detection:
.. list-table:: MapDe performance
:widths: 15 15 15 15 15
:header-rows: 1
* - Model name
- Data set
- Precision
- Recall
- F1Score
* - mapde-crchisto
- CRCHisto
- 0.81
- 0.82
- 0.81
* - mapde-conic
- CoNIC
- 0.85
- 0.85
- 0.85
Args:
num_input_channels (int):
Number of channels in input. default=3.
num_classes (int):
Number of cell classes to identify. default=1.
min_distance (int):
The minimal allowed distance separating peaks.
To find the maximum number of peaks, use `min_distance=1`, default=6.
threshold_abs (float):
Minimum intensity of peaks, default=0.20.
References:
[1] Raza, Shan E. Ahmed, et al. "Deconvolving convolutional neural network
for cell detection." 2019 IEEE 16th International Symposium on Biomedical
Imaging (ISBI 2019). IEEE, 2019.
"""
def __init__(
self,
num_input_channels: int = 3,
min_distance: int = 4,
threshold_abs: float = 250,
num_classes: int = 1,
):
super().__init__(
num_output_channels=num_classes * 2,
num_input_channels=num_input_channels,
out_activation="relu",
)
dist_filter = np.array(
[
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[
0.0,
0.0,
0.0,
0.1055728,
0.17537889,
0.2,
0.17537889,
0.1055728,
0.0,
0.0,
0.0,
],
[
0.0,
0.0,
0.1514719,
0.27888975,
0.36754447,
0.4,
0.36754447,
0.27888975,
0.1514719,
0.0,
0.0,
],
[
0.0,
0.1055728,
0.27888975,
0.43431458,
0.5527864,
0.6,
0.5527864,
0.43431458,
0.27888975,
0.1055728,
0.0,
],
[
0.0,
0.17537889,
0.36754447,
0.5527864,
0.71715724,
0.8,
0.71715724,
0.5527864,
0.36754447,
0.17537889,
0.0,
],
[0.0, 0.2, 0.4, 0.6, 0.8, 1.0, 0.8, 0.6, 0.4, 0.2, 0.0],
[
0.0,
0.17537889,
0.36754447,
0.5527864,
0.71715724,
0.8,
0.71715724,
0.5527864,
0.36754447,
0.17537889,
0.0,
],
[
0.0,
0.1055728,
0.27888975,
0.43431458,
0.5527864,
0.6,
0.5527864,
0.43431458,
0.27888975,
0.1055728,
0.0,
],
[
0.0,
0.0,
0.1514719,
0.27888975,
0.36754447,
0.4,
0.36754447,
0.27888975,
0.1514719,
0.0,
0.0,
],
[
0.0,
0.0,
0.0,
0.1055728,
0.17537889,
0.2,
0.17537889,
0.1055728,
0.0,
0.0,
0.0,
],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
],
dtype=np.float32,
)
dist_filter = np.expand_dims(dist_filter, axis=(0, 1)) # NCHW
dist_filter = np.repeat(dist_filter, repeats=num_classes * 2, axis=1)
self.min_distance = min_distance
self.threshold_abs = threshold_abs
self.register_buffer(
"dist_filter",
torch.from_numpy(dist_filter.astype(np.float32)),
)
self.dist_filter.requires_grad = False
def forward(self, input_tensor: torch.Tensor) -> torch.Tensor:
"""Logic for using layers defined in init.
This method defines how layers are used in forward operation.
Args:
input_tensor (torch.Tensor):
Input images, the tensor is in the shape of NCHW.
Returns:
torch.Tensor:
Output map for cell detection. Peak detection should be applied
to this output for cell detection.
"""
logits, _, _, _ = super().forward(input_tensor)
out = F.conv2d(logits, self.dist_filter, padding="same")
return F.relu(out)
# skipcq: PYL-W0221 # noqa: E800
def postproc(self, prediction_map: np.ndarray) -> np.ndarray:
"""Post-processing script for MicroNet.
Performs peak detection and extracts coordinates in x, y format.
Args:
prediction_map (ndarray):
Input image of type numpy array.
Returns:
:class:`numpy.ndarray`:
Pixel-wise nuclear instance segmentation
prediction.
"""
coordinates = peak_local_max(
np.squeeze(prediction_map[0], axis=2),
min_distance=self.min_distance,
threshold_abs=self.threshold_abs,
exclude_border=False,
)
return np.fliplr(coordinates)
@staticmethod
def infer_batch(
model: torch.nn.Module, batch_data: np.ndarray, on_gpu: bool
) -> np.ndarray:
"""Run inference on an input batch.
This contains logic for forward operation as well as batch I/O
aggregation.
Args:
model (nn.Module):
PyTorch defined model.
batch_data (:class:`numpy.ndarray`):
A batch of data generated by
`torch.utils.data.DataLoader`.
on_gpu (bool):
Whether to run inference on a GPU.
Returns:
np.ndarray:
Probability map as numpy array.
"""
patch_imgs = batch_data
device = select_device(on_gpu)
patch_imgs_gpu = patch_imgs.to(device).type(torch.float32) # to NCHW
patch_imgs_gpu = patch_imgs_gpu.permute(0, 3, 1, 2).contiguous()
model.eval() # infer mode
with torch.inference_mode():
pred = model(patch_imgs_gpu)
pred = pred.permute(0, 2, 3, 1).contiguous()
pred = pred.cpu().numpy()
return [
pred,
]
| 8,693 | 28.571429 | 90 | py |
tiatoolbox | tiatoolbox-master/tiatoolbox/models/architecture/utils.py | """Defines utility layers and operators for models in tiatoolbox."""
from typing import Union
import numpy as np
import torch
import torch.nn as nn
def centre_crop(
img: Union[np.ndarray, torch.tensor],
crop_shape: Union[np.ndarray, torch.tensor],
data_format: str = "NCHW",
):
"""A function to center crop image with given crop shape.
Args:
img (:class:`numpy.ndarray`, torch.tensor):
Input image, should be of 3 channels.
crop_shape (:class:`numpy.ndarray`, torch.tensor):
The subtracted amount in the form of `[subtracted height,
subtracted width]`.
data_format (str):
Either `"NCHW"` or `"NHWC"`.
Returns:
(:class:`numpy.ndarray`, torch.tensor):
Cropped image.
"""
if data_format not in ["NCHW", "NHWC"]:
raise ValueError(f"Unknown input format `{data_format}`.")
crop_t = crop_shape[0] // 2
crop_b = crop_shape[0] - crop_t
crop_l = crop_shape[1] // 2
crop_r = crop_shape[1] - crop_l
if data_format == "NCHW":
return img[:, :, crop_t:-crop_b, crop_l:-crop_r]
return img[:, crop_t:-crop_b, crop_l:-crop_r, :]
def centre_crop_to_shape(
x: Union[np.ndarray, torch.tensor],
y: Union[np.ndarray, torch.tensor],
data_format: str = "NCHW",
):
"""A function to center crop image to shape.
Centre crop `x` so that `x` has shape of `y` and `y` height and
width must be smaller than `x` height width.
Args:
x (:class:`numpy.ndarray`, torch.tensor):
Image to be cropped.
y (:class:`numpy.ndarray`, torch.tensor):
Reference image for getting cropping shape, should be of 3
channels.
data_format:
Either `"NCHW"` or `"NHWC"`.
Returns:
(:class:`numpy.ndarray`, torch.tensor):
Cropped image.
"""
if data_format not in ["NCHW", "NHWC"]:
raise ValueError(f"Unknown input format `{data_format}`.")
if data_format == "NCHW":
_, _, h1, w1 = x.shape
_, _, h2, w2 = y.shape
else:
_, h1, w1, _ = x.shape
_, h2, w2, _ = y.shape
if h1 <= h2 or w1 <= w2:
raise ValueError(
(
"Height or width of `x` is smaller than `y` ",
f"{[h1, w1]} vs {[h2, w2]}",
)
)
x_shape = x.shape
y_shape = y.shape
if data_format == "NCHW":
crop_shape = (x_shape[2] - y_shape[2], x_shape[3] - y_shape[3])
else:
crop_shape = (x_shape[1] - y_shape[1], x_shape[2] - y_shape[2])
return centre_crop(x, crop_shape, data_format)
class UpSample2x(nn.Module):
"""A layer to scale input by a factor of 2.
This layer uses Kronecker product underneath rather than the default
pytorch interpolation.
"""
def __init__(self):
super().__init__()
# correct way to create constant within module
self.register_buffer(
"unpool_mat", torch.from_numpy(np.ones((2, 2), dtype="float32"))
)
self.unpool_mat.unsqueeze(0)
def forward(self, x: torch.Tensor):
"""Logic for using layers defined in init.
Args:
x (torch.Tensor):
Input images, the tensor is in the shape of NCHW.
Returns:
torch.Tensor:
Input images upsampled by a factor of 2 via nearest
neighbour interpolation. The tensor is the shape as
NCHW.
"""
input_shape = list(x.shape)
# un-squeeze is the same as expand_dims
# permute is the same as transpose
# view is the same as reshape
x = x.unsqueeze(-1) # bchwx1
mat = self.unpool_mat.unsqueeze(0) # 1xshxsw
ret = torch.tensordot(x, mat, dims=1) # bxcxhxwxshxsw
ret = ret.permute(0, 1, 2, 4, 3, 5)
return ret.reshape((-1, input_shape[1], input_shape[2] * 2, input_shape[3] * 2))
| 3,972 | 28.213235 | 88 | py |
tiatoolbox | tiatoolbox-master/tiatoolbox/models/architecture/idars.py | """Defines CNNs as used in IDaRS for prediction of molecular pathways and mutations."""
import numpy as np
from torchvision import transforms
from tiatoolbox.models.architecture.vanilla import CNNModel
TRANSFORM = transforms.Compose(
[
transforms.ToTensor(),
transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.1, 0.1, 0.1]),
]
)
class IDaRS(CNNModel):
"""Initialise IDaRS and add custom preprocessing as used in the original paper [1].
The tiatoolbox model should produce the following results:
.. list-table:: IDaRS performance measured by AUROC.
:widths: 15 15 15 15 15 15 15
:header-rows: 1
* -
- MSI
- TP53
- BRAF
- CIMP
- CIN
- HM
* - Bilal et al.
- 0.828
- 0.755
- 0.813
- 0.853
- 0.860
- 0.846
* - TIAToolbox
- 0.870
- 0.747
- 0.750
- 0.748
- 0.810
- 0.790
Args:
backbone (str):
Model name.
num_classes (int):
Number of classes output by model.
References:
[1] Bilal, Mohsin, et al. "Development and validation of a weakly supervised
deep learning framework to predict the status of molecular pathways and key
mutations in colorectal cancer from routine histology images: a retrospective
study." The Lancet Digital Health 3.12 (2021): e763-e772.
"""
def __init__(self, backbone, num_classes=1):
super().__init__(backbone, num_classes=num_classes)
@staticmethod
# noqa: E800
def preproc(image: np.ndarray):
"""Define preprocessing steps.
Args:
img (:class:`numpy.ndarray`):
An image of shape HWC.
Return:
img (:class:`torch.Tensor`):
An image of shape HWC.
"""
image = image.copy()
image = TRANSFORM(image)
# toTensor will turn image to CHW so we transpose again
return image.permute(1, 2, 0)
| 2,075 | 24.317073 | 87 | py |
tiatoolbox | tiatoolbox-master/tiatoolbox/models/architecture/vanilla.py | """Defines vanilla CNNs with torch backbones, mainly for patch classification."""
import numpy as np
import torch
import torch.nn as nn
import torchvision.models as torch_models
from tiatoolbox.models.models_abc import ModelABC
from tiatoolbox.utils.misc import select_device
def _get_architecture(arch_name, pretrained=True, **kwargs):
"""Get a model.
Model architectures are either already defined within torchvision or
they can be custom-made within tiatoolbox.
Args:
arch_name (str):
Architecture name.
Returns:
List of PyTorch network layers wrapped with `nn.Sequential`.
https://pytorch.org/docs/stable/generated/torch.nn.Sequential.html
"""
backbone_dict = {
"alexnet": torch_models.alexnet,
"resnet18": torch_models.resnet18,
"resnet34": torch_models.resnet34,
"resnet50": torch_models.resnet50,
"resnet101": torch_models.resnet101,
"resnext50_32x4d": torch_models.resnext50_32x4d,
"resnext101_32x8d": torch_models.resnext101_32x8d,
"wide_resnet50_2": torch_models.wide_resnet50_2,
"wide_resnet101_2": torch_models.wide_resnet101_2,
"densenet121": torch_models.densenet121,
"densenet161": torch_models.densenet161,
"densenet169": torch_models.densenet169,
"densenet201": torch_models.densenet201,
"inception_v3": torch_models.inception_v3,
"googlenet": torch_models.googlenet,
"mobilenet_v2": torch_models.mobilenet_v2,
"mobilenet_v3_large": torch_models.mobilenet_v3_large,
"mobilenet_v3_small": torch_models.mobilenet_v3_small,
}
if arch_name not in backbone_dict:
raise ValueError(f"Backbone `{arch_name}` is not supported.")
creator = backbone_dict[arch_name]
model = creator(pretrained=pretrained, **kwargs)
# Unroll all the definition and strip off the final GAP and FCN
if "resnet" in arch_name or "resnext" in arch_name:
return nn.Sequential(*list(model.children())[:-2])
if "densenet" in arch_name:
return model.features
if "alexnet" in arch_name:
return model.features
if "inception_v3" in arch_name or "googlenet" in arch_name:
return nn.Sequential(*list(model.children())[:-3])
return model.features
class CNNModel(ModelABC):
"""Retrieve the model backbone and attach an extra FCN to perform classification.
Args:
backbone (str):
Model name.
num_classes (int):
Number of classes output by model.
Attributes:
num_classes (int):
Number of classes output by the model.
feat_extract (nn.Module):
Backbone CNN model.
pool (nn.Module):
Type of pooling applied after feature extraction.
classifier (nn.Module):
Linear classifier module used to map the features to the
output.
"""
def __init__(self, backbone, num_classes=1):
super().__init__()
self.num_classes = num_classes
self.feat_extract = _get_architecture(backbone)
self.pool = nn.AdaptiveAvgPool2d((1, 1))
# Best way to retrieve channel dynamically is passing a small forward pass
prev_num_ch = self.feat_extract(torch.rand([2, 3, 96, 96])).shape[1]
self.classifier = nn.Linear(prev_num_ch, num_classes)
# pylint: disable=W0221
# because abc is generic, this is actual definition
def forward(self, imgs):
"""Pass input data through the model.
Args:
imgs (torch.Tensor):
Model input.
"""
feat = self.feat_extract(imgs)
gap_feat = self.pool(feat)
gap_feat = torch.flatten(gap_feat, 1)
logit = self.classifier(gap_feat)
return torch.softmax(logit, -1)
@staticmethod
def postproc(image):
"""Define the post-processing of this class of model.
This simply applies argmax along last axis of the input.
"""
return np.argmax(image, axis=-1)
@staticmethod
def infer_batch(model, batch_data, on_gpu):
"""Run inference on an input batch.
Contains logic for forward operation as well as i/o aggregation.
Args:
model (nn.Module):
PyTorch defined model.
batch_data (ndarray):
A batch of data generated by
`torch.utils.data.DataLoader`.
on_gpu (bool):
Whether to run inference on a GPU.
"""
img_patches_device = batch_data.to(select_device(on_gpu)).type(
torch.float32
) # to NCHW
img_patches_device = img_patches_device.permute(0, 3, 1, 2).contiguous()
# Inference mode
model.eval()
# Do not compute the gradient (not training)
with torch.inference_mode():
output = model(img_patches_device)
# Output should be a single tensor or scalar
return output.cpu().numpy()
class CNNBackbone(ModelABC):
"""Retrieve the model backbone and strip the classification layer.
This is a wrapper for pretrained models within pytorch.
Args:
backbone (str):
Model name. Currently, the tool supports following
model names and their default associated weights from pytorch.
- "alexnet"
- "resnet18"
- "resnet34"
- "resnet50"
- "resnet101"
- "resnext50_32x4d"
- "resnext101_32x8d"
- "wide_resnet50_2"
- "wide_resnet101_2"
- "densenet121"
- "densenet161"
- "densenet169"
- "densenet201"
- "inception_v3"
- "googlenet"
- "mobilenet_v2"
- "mobilenet_v3_large"
- "mobilenet_v3_small"
Examples:
>>> # Creating resnet50 architecture from default pytorch
>>> # without the classification layer with its associated
>>> # weights loaded
>>> model = CNNBackbone(backbone="resnet50")
>>> model.eval() # set to evaluation mode
>>> # dummy sample in NHWC form
>>> samples = torch.rand(4, 3, 512, 512)
>>> features = model(samples)
>>> features.shape # features after global average pooling
torch.Size([4, 2048])
"""
def __init__(self, backbone):
super().__init__()
self.feat_extract = _get_architecture(backbone)
self.pool = nn.AdaptiveAvgPool2d((1, 1))
# pylint: disable=W0221
# because abc is generic, this is actual definition
def forward(self, imgs):
"""Pass input data through the model.
Args:
imgs (torch.Tensor):
Model input.
"""
feat = self.feat_extract(imgs)
gap_feat = self.pool(feat)
return torch.flatten(gap_feat, 1)
@staticmethod
def infer_batch(model, batch_data, on_gpu):
"""Run inference on an input batch.
Contains logic for forward operation as well as i/o aggregation.
Args:
model (nn.Module):
PyTorch defined model.
batch_data (ndarray):
A batch of data generated by
`torch.utils.data.DataLoader`.
on_gpu (bool):
Whether to run inference on a GPU.
"""
img_patches_device = batch_data.to(select_device(on_gpu)).type(
torch.float32
) # to NCHW
img_patches_device = img_patches_device.permute(0, 3, 1, 2).contiguous()
# Inference mode
model.eval()
# Do not compute the gradient (not training)
with torch.inference_mode():
output = model(img_patches_device)
# Output should be a single tensor or scalar
return [output.cpu().numpy()]
| 7,968 | 31.794239 | 85 | py |
tiatoolbox | tiatoolbox-master/tiatoolbox/models/architecture/unet.py | """Defines a set of UNet variants to be used within tiatoolbox."""
from typing import List, Tuple
import torch
import torch.nn as nn
import torch.nn.functional as F # noqa: N812
from torchvision.models.resnet import Bottleneck as ResNetBottleneck
from torchvision.models.resnet import ResNet
from tiatoolbox.models.architecture.utils import UpSample2x, centre_crop
from tiatoolbox.models.models_abc import ModelABC
from tiatoolbox.utils import misc
class ResNetEncoder(ResNet):
"""A subclass of ResNet defined in torch.
This class overwrites the `forward` implementation within pytorch
to return features of each downsampling level. This is necessary
for segmentation.
"""
def _forward_impl(self, x):
"""Overwriting default torch forward so that it returns features.
Args:
x (:class:`torch.Tensor`): Input images, the tensor is in the shape of NCHW.
For this method, C=3 (i.e 3 channels images are used as input).
Returns:
list:
List of features for each down-sample block. Each
feature tensor is of the shape NCHW.
"""
# See note [TorchScript super()]
x0 = x = self.conv1(x)
x0 = x = self.bn1(x)
x0 = x = self.relu(x)
x1 = x = self.maxpool(x)
x1 = x = self.layer1(x)
x2 = x = self.layer2(x)
x3 = x = self.layer3(x)
x4 = x = self.layer4(x)
return [x0, x1, x2, x3, x4]
@staticmethod
def resnet50(num_input_channels: int):
"""Shortcut method to create ResNet50."""
return ResNetEncoder.resnet(num_input_channels, [3, 4, 6, 3])
@staticmethod
def resnet(
num_input_channels: int,
downsampling_levels: List[int],
):
"""Shortcut method to create customised ResNet.
Args:
num_input_channels (int):
Number of channels in the input images.
downsampling_levels (list):
A list of integers where each number defines the number
of BottleNeck blocks at each down-sampling level.
Returns:
model (torch.nn.Module):
A pytorch model.
Examples:
>>> # instantiate a resnet50
>>> ResNetEncoder.resnet50(
... num_input_channels,
... [3, 4, 6, 3],
... )
"""
model = ResNetEncoder(ResNetBottleneck, downsampling_levels)
if num_input_channels != 3:
model.conv1 = nn.Conv2d( # skipcq: PYL-W0201
num_input_channels, 64, 7, stride=2, padding=3
)
return model
class UnetEncoder(nn.Module):
"""Construct a basic UNet encoder.
This class builds a basic UNet encoder with batch normalization.
The number of channels in each down-sampling block and
the number of down-sampling levels are customisable.
Args:
num_input_channels (int):
Number of channels in the input images.
layer_output_channels (list):
A list of integers where each number defines the number of
output channels at each down-sampling level.
Returns:
model (torch.nn.Module):
A pytorch model.
"""
def __init__(
self,
num_input_channels: int,
layer_output_channels: List[int],
):
super().__init__()
self.blocks = nn.ModuleList()
input_channels = num_input_channels
for output_channels in layer_output_channels:
self.blocks.append(
nn.ModuleList(
[
nn.Sequential(
nn.Conv2d(
input_channels,
output_channels,
3,
1,
padding=1,
bias=False,
),
nn.BatchNorm2d(output_channels),
nn.ReLU(),
nn.Conv2d(
output_channels,
output_channels,
3,
1,
padding=1,
bias=False,
),
nn.BatchNorm2d(output_channels),
nn.ReLU(),
),
nn.AvgPool2d(2, stride=2),
]
)
)
input_channels = output_channels
def forward(self, input_tensor: torch.Tensor):
"""Logic for using layers defined in init.
This method defines how layers are used in forward operation.
Args:
input_tensor (:class:`torch.Tensor`):
Input images, the tensor is in the shape of NCHW.
Returns:
list:
A list of features for each down-sample block. Each
feature tensor is of the shape NCHW.
"""
features = []
for block in self.blocks:
input_tensor = block[0](input_tensor)
features.append(input_tensor)
input_tensor = block[1](input_tensor) # down-sample
return features
def create_block(pre_activation, kernels, input_ch, output_ch):
"""Helper to create a block of Vanilla Convolution.
This is in pre-activation style.
Args:
pre_activation (bool):
Whether to apply activation layer before the convolution layer.
Should be True for ResNet blocks.
kernels (list):
A list of convolution layers. Each item is an
integer and denotes the layer kernel size.
input_ch (int):
Number of channels in the input images.
output_ch (int):
Number of channels in the output images.
"""
layers = []
for ksize in kernels:
if pre_activation:
layers.extend(
[
nn.BatchNorm2d(input_ch),
nn.ReLU(),
nn.Conv2d(
input_ch,
output_ch,
(ksize, ksize),
padding=int((ksize - 1) // 2), # same padding
bias=False,
),
]
)
else:
layers.extend(
[
nn.Conv2d(
input_ch,
output_ch,
(ksize, ksize),
padding=int((ksize - 1) // 2), # same padding
bias=False,
),
nn.BatchNorm2d(output_ch),
nn.ReLU(),
]
)
input_ch = output_ch
return layers
class UNetModel(ModelABC):
"""Generate families of UNet model.
This supports different encoders. However, the decoder is relatively
simple, each upsampling block contains a number of vanilla
convolution layers, that are not customizable. Additionally, the
aggregation between down-sampling and up-sampling is addition, not
concatenation.
Args:
num_input_channels (int):
Number of channels in input images.
num_output_channels (int):
Number of channels in output images.
encoder (str):
Name of the encoder, currently supports:
- "resnet50": The well-known ResNet50- this is not the
pre-activation model.
- "unet": The vanilla UNet encoder where each down-sampling
level contains 2 blocks of Convolution-BatchNorm-ReLu.
encoder_levels (list):
A list of integers to configure "unet" encoder levels.
Each number defines the number of output channels at each
down-sampling level (2 convolutions). Number of intergers
define the number down-sampling levels in the unet encoder.
This is only applicable when `encoder="unet"`.
decoder_block (list):
A list of convolution layers. Each item is an integer and
denotes the layer kernel size.
skip_type (str):
Choosing between "add" or "concat" method to be used for
combining feature maps from encoder and decoder parts at
skip connections. Default is "add".
Returns:
torch.nn.Module:
A pytorch model.
Examples:
>>> # instantiate a UNet with resnet50 encoder and
>>> # only 1 3x3 per each up-sampling block in the decoder
>>> UNetModel.resnet50(
... 2, 2,
... encoder="resnet50",
... decoder_block=(3,)
... )
"""
def __init__(
self,
num_input_channels: int = 2,
num_output_channels: int = 2,
encoder: str = "resnet50",
encoder_levels: List[int] = None,
decoder_block: Tuple[int] = None,
skip_type: str = "add",
):
super().__init__()
if encoder.lower() not in {"resnet50", "unet"}:
raise ValueError(f"Unknown encoder `{encoder}`")
if encoder_levels is None:
encoder_levels = [64, 128, 256, 512, 1024]
if decoder_block is None:
decoder_block = [3, 3]
if encoder == "resnet50":
pre_activation = True
self.backbone = ResNetEncoder.resnet50(num_input_channels)
if encoder == "unet":
pre_activation = False
self.backbone = UnetEncoder(num_input_channels, encoder_levels)
if skip_type.lower() not in {"add", "concat"}:
raise ValueError(f"Unknown type of skip connection: `{skip_type}`")
self.skip_type = skip_type.lower()
img_list = torch.rand([1, num_input_channels, 256, 256])
out_list = self.backbone(img_list)
# ordered from low to high resolution
down_ch_list = [v.shape[1] for v in out_list][::-1]
# channel mapping for shortcut
self.conv1x1 = nn.Conv2d(down_ch_list[0], down_ch_list[1], (1, 1), bias=False)
self.uplist = nn.ModuleList()
for ch_idx, ch in enumerate(down_ch_list[1:]):
next_up_ch = ch
if ch_idx + 2 < len(down_ch_list):
next_up_ch = down_ch_list[ch_idx + 2]
if self.skip_type == "concat":
ch *= 2
layers = create_block(pre_activation, decoder_block, ch, next_up_ch)
self.uplist.append(nn.Sequential(*layers))
self.clf = nn.Conv2d(next_up_ch, num_output_channels, (1, 1), bias=True)
self.upsample2x = UpSample2x()
@staticmethod
def _transform(image: torch.Tensor) -> torch.Tensor:
"""Transforming network input to desired format.
This method is model and dataset specific, meaning that it can be replaced by
user's desired transform function before training/inference.
Args:
image (:class:`torch.Tensor`): Input images, the tensor is of the shape
NCHW.
Returns:
output (:class:`torch.Tensor`): The transformed input.
"""
return image / 255.0
# pylint: disable=W0221
# because abc is generic, this is actual definition
def forward(self, imgs: torch.Tensor, *args, **kwargs):
"""Logic for using layers defined in init.
This method defines how layers are used in forward operation.
Args:
imgs (:class:`torch.Tensor`):
Input images, the tensor is of the shape NCHW.
Returns:
:class:`torch.Tensor`:
The inference output. The tensor is of the shape NCHW.
However, `height` and `width` may not be the same as the
input images.
"""
# transform the input using network-specific transform function
imgs = self._transform(imgs)
# assume output is after each down-sample resolution
en_list = self.backbone(imgs)
x = self.conv1x1(en_list[-1])
en_list = en_list[:-1]
for idx in range(1, len(en_list) + 1):
# up-sample feature from low-resolution
# block, add it with features from the same resolution
# coming from the encoder, then run it through the decoder
# block
y = en_list[-idx]
x_ = self.upsample2x(x)
if self.skip_type == "add":
x = x_ + y
else:
x = torch.cat([x_, y], dim=1)
x = self.uplist[idx - 1](x)
return self.clf(x)
@staticmethod
def infer_batch(model, batch_data, on_gpu):
"""Run inference on an input batch.
This contains logic for forward operation as well as i/o
aggregation.
Args:
model (nn.Module):
PyTorch defined model.
batch_data (:class:`numpy.ndarray`):
A batch of data generated by
`torch.utils.data.DataLoader`.
on_gpu (bool):
Whether to run inference on a GPU.
Returns:
list:
List of network output head, each output is an
:class:`numpy.ndarray`.
"""
model.eval()
device = misc.select_device(on_gpu)
####
imgs = batch_data
imgs = imgs.to(device).type(torch.float32)
imgs = imgs.permute(0, 3, 1, 2) # to NCHW
_, _, h, w = imgs.shape
crop_shape = [h // 2, w // 2]
with torch.inference_mode():
logits = model(imgs)
probs = F.softmax(logits, 1)
probs = F.interpolate(
probs, scale_factor=2, mode="bilinear", align_corners=False
)
probs = centre_crop(probs, crop_shape)
probs = probs.permute(0, 2, 3, 1) # to NHWC
probs = probs.cpu().numpy()
return [probs]
| 14,209 | 32.356808 | 88 | py |
tiatoolbox | tiatoolbox-master/tiatoolbox/models/architecture/__init__.py | """Defines a set of models to be used within tiatoolbox."""
import os
import pathlib
from pydoc import locate
from typing import Union
import torch
from tiatoolbox import rcParam
from tiatoolbox.models.architecture.vanilla import CNNBackbone, CNNModel
from tiatoolbox.models.dataset.classification import predefined_preproc_func
from tiatoolbox.utils.misc import download_data
__all__ = ["get_pretrained_model", "fetch_pretrained_weights"]
PRETRAINED_INFO = rcParam["pretrained_model_info"]
def fetch_pretrained_weights(model_name: str, save_path: str, overwrite: bool = True):
"""Get the pretrained model information from yml file.
Args:
model_name (str):
Refer to `::py::meth:get_pretrained_model` for all supported
model names.
save_path (str):
Path to save the weight of the
corresponding `model_name`.
overwrite (bool):
Overwrite existing downloaded weights.
"""
info = PRETRAINED_INFO[model_name]
download_data(info["url"], save_path, overwrite)
def get_pretrained_model(
pretrained_model: str = None,
pretrained_weights: Union[str, pathlib.Path] = None,
overwrite: bool = False,
):
"""Load a predefined PyTorch model with the appropriate pretrained weights.
Args:
pretrained_model (str):
Name of the existing models support by tiatoolbox for
processing the data. The models currently supported:
- alexnet
- resnet18
- resnet34
- resnet50
- resnet101
- resnext50_32x4d
- resnext101_32x8d
- wide_resnet50_2
- wide_resnet101_2
- densenet121
- densenet161
- densenet169
- densenet201
- mobilenet_v2
- mobilenet_v3_large
- mobilenet_v3_small
- googlenet
Each model has been trained on the Kather100K and PCam
datasets. The format of pretrained_model is
<model_name>-<dataset_name>. For example, to use a resnet18
model trained on Kather100K, use `resnet18-kather100k and to
use an alexnet model trained on PCam, use `alexnet-pcam`.
By default, the corresponding pretrained weights will also be
downloaded. However, you can override with your own set of
weights via the `pretrained_weights` argument. Argument is case-insensitive.
pretrained_weights (str):
Path to the weight of the corresponding `pretrained_model`.
overwrite (bool):
To always overwriting downloaded weights.
Examples:
>>> # get mobilenet pretrained on Kather100K dataset by the TIA team
>>> model = get_pretrained_model(pretrained_model='mobilenet_v2-kather100k')
>>> # get mobilenet defined by TIA team, but loaded with user defined weights
>>> model = get_pretrained_model(
... pretrained_model='mobilenet_v2-kather100k',
... pretrained_weights='/A/B/C/my_weights.tar',
... )
>>> # get resnet34 pretrained on PCam dataset by TIA team
>>> model = get_pretrained_model(pretrained_model='resnet34-pcam')
"""
if not isinstance(pretrained_model, str):
raise ValueError("pretrained_model must be a string.")
if pretrained_model not in PRETRAINED_INFO:
raise ValueError(f"Pretrained model `{pretrained_model}` does not exist.")
info = PRETRAINED_INFO[pretrained_model]
arch_info = info["architecture"]
creator = locate((f"tiatoolbox.models.architecture" f'.{arch_info["class"]}'))
model = creator(**arch_info["kwargs"])
# TODO: a dictionary of dataset specific or transformation ?
if "dataset" in info:
# ! this is a hack currently, need another PR to clean up
# ! associated pre-processing coming from dataset (Kumar, Kather, etc.)
model.preproc_func = predefined_preproc_func(info["dataset"])
if pretrained_weights is None:
file_name = info["url"].split("/")[-1]
pretrained_weights = os.path.join(
rcParam["TIATOOLBOX_HOME"], "models/", file_name
)
fetch_pretrained_weights(pretrained_model, pretrained_weights, overwrite)
# ! assume to be saved in single GPU mode
# always load on to the CPU
saved_state_dict = torch.load(pretrained_weights, map_location="cpu")
model.load_state_dict(saved_state_dict, strict=True)
# !
io_info = info["ioconfig"]
creator = locate((f"tiatoolbox.models.engine" f'.{io_info["class"]}'))
iostate = creator(**io_info["kwargs"])
return model, iostate
| 4,777 | 35.753846 | 88 | py |
tiatoolbox | tiatoolbox-master/tiatoolbox/models/architecture/hovernet.py | import math
from collections import OrderedDict
from typing import List
import cv2
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F # noqa: N812
from scipy import ndimage
from skimage.morphology import remove_small_objects
from skimage.segmentation import watershed
from tiatoolbox.models.architecture.utils import (
UpSample2x,
centre_crop,
centre_crop_to_shape,
)
from tiatoolbox.models.models_abc import ModelABC
from tiatoolbox.utils import misc
from tiatoolbox.utils.misc import get_bounding_box
class TFSamepaddingLayer(nn.Module):
"""To align with tensorflow `same` padding.
Putting this before any conv layer that needs padding. Here, we
assume kernel has same height and width for simplicity.
"""
def __init__(self, ksize: int, stride: int):
super().__init__()
self.ksize = ksize
self.stride = stride
def forward(self, x: torch.Tensor):
"""Logic for using layers defined in init."""
if x.shape[2] % self.stride == 0:
pad = max(self.ksize - self.stride, 0)
else:
pad = max(self.ksize - (x.shape[2] % self.stride), 0)
if pad % 2 == 0:
pad_val = pad // 2
padding = (pad_val, pad_val, pad_val, pad_val)
else:
pad_val_start = pad // 2
pad_val_end = pad - pad_val_start
padding = (pad_val_start, pad_val_end, pad_val_start, pad_val_end)
return F.pad(x, padding, "constant", 0)
class DenseBlock(nn.Module):
"""Dense Convolutional Block.
This convolutional block supports only `valid` padding.
References:
Huang, Gao, et al. "Densely connected convolutional networks."
Proceedings of the IEEE conference on computer vision and
pattern recognition. 2017.
"""
def __init__(
self,
in_ch: int,
unit_ksizes: List[int],
unit_chs: List[int],
unit_count: int,
split: int = 1,
):
super().__init__()
if len(unit_ksizes) != len(unit_chs):
raise ValueError("Unbalance Unit Info.")
self.nr_unit = unit_count
self.in_ch = in_ch
# weights value may not match with tensorflow version
# due to different default initialization scheme between
# torch and tensorflow
def get_unit_block(unit_in_ch):
"""Helper function to make it less long."""
layers = OrderedDict(
[
("preact_bna/bn", nn.BatchNorm2d(unit_in_ch, eps=1e-5)),
("preact_bna/relu", nn.ReLU(inplace=True)),
(
"conv1",
nn.Conv2d(
unit_in_ch,
unit_chs[0],
unit_ksizes[0],
stride=1,
padding=0,
bias=False,
),
),
("conv1/bn", nn.BatchNorm2d(unit_chs[0], eps=1e-5)),
("conv1/relu", nn.ReLU(inplace=True)),
(
"conv2",
nn.Conv2d(
unit_chs[0],
unit_chs[1],
unit_ksizes[1],
groups=split,
stride=1,
padding=0,
bias=False,
),
),
]
)
return nn.Sequential(layers)
unit_in_ch = in_ch
self.units = nn.ModuleList()
for _ in range(unit_count):
self.units.append(get_unit_block(unit_in_ch))
unit_in_ch += unit_chs[1]
self.blk_bna = nn.Sequential(
OrderedDict(
[
("bn", nn.BatchNorm2d(unit_in_ch, eps=1e-5)),
("relu", nn.ReLU(inplace=True)),
]
)
)
def forward(self, prev_feat: torch.Tensor):
"""Logic for using layers defined in init."""
for idx in range(self.nr_unit):
new_feat = self.units[idx](prev_feat)
prev_feat = centre_crop_to_shape(prev_feat, new_feat)
prev_feat = torch.cat([prev_feat, new_feat], dim=1)
return self.blk_bna(prev_feat)
class ResidualBlock(nn.Module):
"""Residual block.
References:
He, Kaiming, et al. "Deep residual learning for image
recognition." Proceedings of the IEEE conference on computer
vision and pattern recognition. 2016.
"""
def __init__(
self,
in_ch: int,
unit_ksizes: List[int],
unit_chs: List[int],
unit_count: int,
stride: int = 1,
):
super().__init__()
if len(unit_ksizes) != len(unit_chs):
raise ValueError("Unbalance Unit Info.")
self.nr_unit = unit_count
self.in_ch = in_ch
# ! For inference only so init values for batch norm may not match tensorflow
unit_in_ch = in_ch
self.units = nn.ModuleList()
for idx in range(unit_count):
unit_layer = [
("preact/bn", nn.BatchNorm2d(unit_in_ch, eps=1e-5)),
("preact/relu", nn.ReLU(inplace=True)),
(
"conv1",
nn.Conv2d(
unit_in_ch,
unit_chs[0],
unit_ksizes[0],
stride=1,
padding=0,
bias=False,
),
),
("conv1/bn", nn.BatchNorm2d(unit_chs[0], eps=1e-5)),
("conv1/relu", nn.ReLU(inplace=True)),
(
"conv2/pad",
TFSamepaddingLayer(
ksize=unit_ksizes[1], stride=stride if idx == 0 else 1
),
),
(
"conv2",
nn.Conv2d(
unit_chs[0],
unit_chs[1],
unit_ksizes[1],
stride=stride if idx == 0 else 1,
padding=0,
bias=False,
),
),
("conv2/bn", nn.BatchNorm2d(unit_chs[1], eps=1e-5)),
("conv2/relu", nn.ReLU(inplace=True)),
(
"conv3",
nn.Conv2d(
unit_chs[1],
unit_chs[2],
unit_ksizes[2],
stride=1,
padding=0,
bias=False,
),
),
]
# has BatchNorm-Activation layers to conclude each
# previous block so must not put pre activation for the first
# unit of this block
unit_layer = unit_layer if idx != 0 else unit_layer[2:]
self.units.append(nn.Sequential(OrderedDict(unit_layer)))
unit_in_ch = unit_chs[-1]
if in_ch != unit_chs[-1] or stride != 1:
self.shortcut = nn.Conv2d(in_ch, unit_chs[-1], 1, stride=stride, bias=False)
else:
self.shortcut = None
self.blk_bna = nn.Sequential(
OrderedDict(
[
("bn", nn.BatchNorm2d(unit_in_ch, eps=1e-5)),
("relu", nn.ReLU(inplace=True)),
]
)
)
def forward(self, prev_feat: torch.Tensor):
"""Logic for using layers defined in init."""
if self.shortcut is None:
shortcut = prev_feat
else:
shortcut = self.shortcut(prev_feat)
for _, unit in enumerate(self.units):
new_feat = prev_feat
new_feat = unit(new_feat)
prev_feat = new_feat + shortcut
shortcut = prev_feat
return self.blk_bna(prev_feat)
class HoVerNet(ModelABC):
"""Initialise HoVerNet [1].
The tiatoolbox models should produce the following results:
.. list-table:: HoVerNet segmentation performance on the CoNSeP dataset [1]
:widths: 15 15 15 15 15 15 15
:header-rows: 1
* - Model name
- Data set
- DICE
- AJI
- DQ
- SQ
- PQ
* - hovernet-original-consep
- CoNSeP
- 0.85
- 0.57
- 0.70
- 0.78
- 0.55
.. list-table:: HoVerNet segmentation performance on the Kumar dataset [2]
:widths: 15 15 15 15 15 15 15
:header-rows: 1
* - Model name
- Data set
- DICE
- AJI
- DQ
- SQ
- PQ
* - hovernet-original-kumar
- Kumar
- 0.83
- 0.62
- 0.77
- 0.77
- 0.60
Args:
num_input_channels (int):
Number of channels in input.
num_types (int):
Number of nuclei types within the predictions. Once defined,
a branch dedicated for typing is created. By default, no
typing (`num_types=None`) is used.
mode (str):
To use architecture defined in as in original paper
(`original`) or the one used in PanNuke paper (`fast`).
References:
[1] Graham, Simon, et al. "HoVerNet: Simultaneous segmentation and
classification of nuclei in multi-tissue histology images."
Medical Image Analysis 58 (2019): 101563.
[2] Kumar, Neeraj, et al. "A dataset and a technique for generalized
nuclear segmentation for computational pathology."
IEEE transactions on medical imaging 36.7 (2017): 1550-1560.
"""
def __init__(
self, num_input_channels: int = 3, num_types: int = None, mode: str = "original"
):
super().__init__()
self.mode = mode
self.num_types = num_types
if mode not in ["original", "fast"]:
raise ValueError(
f"Invalid mode {mode} for HoVerNet. "
"Only support `original` or `fast`."
)
modules = [
(
"/",
nn.Conv2d(num_input_channels, 64, 7, stride=1, padding=0, bias=False),
),
("bn", nn.BatchNorm2d(64, eps=1e-5)),
("relu", nn.ReLU(inplace=True)),
]
# pre-pend the padding for `fast` mode
if mode == "fast":
modules = [("pad", TFSamepaddingLayer(ksize=7, stride=1)), *modules]
self.conv0 = nn.Sequential(OrderedDict(modules))
self.d0 = ResidualBlock(64, [1, 3, 1], [64, 64, 256], 3, stride=1)
self.d1 = ResidualBlock(256, [1, 3, 1], [128, 128, 512], 4, stride=2)
self.d2 = ResidualBlock(512, [1, 3, 1], [256, 256, 1024], 6, stride=2)
self.d3 = ResidualBlock(1024, [1, 3, 1], [512, 512, 2048], 3, stride=2)
self.conv_bot = nn.Conv2d(2048, 1024, 1, stride=1, padding=0, bias=False)
ksize = 5 if mode == "original" else 3
if num_types is None:
self.decoder = nn.ModuleDict(
OrderedDict(
[
("np", HoVerNet._create_decoder_branch(ksize=ksize, out_ch=2)),
("hv", HoVerNet._create_decoder_branch(ksize=ksize, out_ch=2)),
]
)
)
else:
self.decoder = nn.ModuleDict(
OrderedDict(
[
(
"tp",
HoVerNet._create_decoder_branch(
ksize=ksize, out_ch=num_types
),
),
("np", HoVerNet._create_decoder_branch(ksize=ksize, out_ch=2)),
("hv", HoVerNet._create_decoder_branch(ksize=ksize, out_ch=2)),
]
)
)
self.upsample2x = UpSample2x()
def forward(self, input_tensor: torch.Tensor): # skipcq: PYL-W0221
"""Logic for using layers defined in init.
This method defines how layers are used in forward operation.
Args:
input_tensor (torch.Tensor):
Input images, the tensor is in the shape of NCHW.
Returns:
dict:
A dictionary containing the inference output.
The expected format os {decoder_name: prediction}.
"""
input_tensor = input_tensor / 255.0 # to 0-1 range to match XY
d0 = self.conv0(input_tensor)
d0 = self.d0(d0)
d1 = self.d1(d0)
d2 = self.d2(d1)
d3 = self.d3(d2)
d3 = self.conv_bot(d3)
d = [d0, d1, d2, d3]
if self.mode == "original":
d[0] = centre_crop(d[0], [184, 184])
d[1] = centre_crop(d[1], [72, 72])
else:
d[0] = centre_crop(d[0], [92, 92])
d[1] = centre_crop(d[1], [36, 36])
out_dict = OrderedDict()
for branch_name, branch_desc in self.decoder.items():
u3 = self.upsample2x(d[-1]) + d[-2]
u3 = branch_desc[0](u3)
u2 = self.upsample2x(u3) + d[-3]
u2 = branch_desc[1](u2)
u1 = self.upsample2x(u2) + d[-4]
u1 = branch_desc[2](u1)
u0 = branch_desc[3](u1)
out_dict[branch_name] = u0
return out_dict
@staticmethod
def _create_decoder_branch(out_ch=2, ksize=5):
"""Helper to create a decoder branch."""
modules = [
("conva", nn.Conv2d(1024, 256, ksize, stride=1, padding=0, bias=False)),
("dense", DenseBlock(256, [1, ksize], [128, 32], 8, split=4)),
(
"convf",
nn.Conv2d(512, 512, 1, stride=1, padding=0, bias=False),
),
]
u3 = nn.Sequential(OrderedDict(modules))
modules = [
("conva", nn.Conv2d(512, 128, ksize, stride=1, padding=0, bias=False)),
("dense", DenseBlock(128, [1, ksize], [128, 32], 4, split=4)),
(
"convf",
nn.Conv2d(256, 256, 1, stride=1, padding=0, bias=False),
),
]
u2 = nn.Sequential(OrderedDict(modules))
modules = [
("conva/pad", TFSamepaddingLayer(ksize=ksize, stride=1)),
(
"conva",
nn.Conv2d(256, 64, ksize, stride=1, padding=0, bias=False),
),
]
u1 = nn.Sequential(OrderedDict(modules))
modules = [
("bn", nn.BatchNorm2d(64, eps=1e-5)),
("relu", nn.ReLU(inplace=True)),
(
"conv",
nn.Conv2d(64, out_ch, 1, stride=1, padding=0, bias=True),
),
]
u0 = nn.Sequential(OrderedDict(modules))
return nn.Sequential(
OrderedDict([("u3", u3), ("u2", u2), ("u1", u1), ("u0", u0)])
)
@staticmethod
def _proc_np_hv(np_map: np.ndarray, hv_map: np.ndarray, scale_factor: float = 1):
"""Extract Nuclei Instance with NP and HV Map.
Sobel will be applied on horizontal and vertical channel in
`hv_map` to derive an energy landscape which highlight possible
nuclei instance boundaries. Afterward, watershed with markers is
applied on the above energy map using the `np_map` as filter to
remove background regions.
Args:
np_map (np.ndarray):
An image of shape (height, width, 1) which contains the
probabilities of a pixel being a nucleus.
hv_map (np.ndarray):
An array of shape (height, width, 2) which contains the
horizontal (channel 0) and vertical (channel 1) maps of
possible instances within the image.
scale_factor (float):
The scale factor for processing nuclei. The scale
assumes an image of resolution 0.25 microns per pixel.
Default is therefore 1 for HoVer-Net.
Returns:
:class:`numpy.ndarray`:
An np.ndarray of shape (height, width) where each
non-zero values within the array correspond to one
detected nuclei instances.
"""
blb_raw = np_map[..., 0]
h_dir_raw = hv_map[..., 0]
v_dir_raw = hv_map[..., 1]
# processing
blb = np.array(blb_raw >= 0.5, dtype=np.int32)
blb = ndimage.label(blb)[0]
blb = remove_small_objects(blb, min_size=10)
blb[blb > 0] = 1 # background is 0 already
h_dir = cv2.normalize(
h_dir_raw,
None,
alpha=0,
beta=1,
norm_type=cv2.NORM_MINMAX,
dtype=cv2.CV_32F,
)
v_dir = cv2.normalize(
v_dir_raw,
None,
alpha=0,
beta=1,
norm_type=cv2.NORM_MINMAX,
dtype=cv2.CV_32F,
)
ksize = int((20 * scale_factor) + 1)
obj_size = math.ceil(10 * (scale_factor**2))
# Get resolution specific filters etc.
sobel_h = cv2.Sobel(h_dir, cv2.CV_64F, 1, 0, ksize=ksize)
sobel_v = cv2.Sobel(v_dir, cv2.CV_64F, 0, 1, ksize=ksize)
sobel_h = 1 - (
cv2.normalize(
sobel_h,
None,
alpha=0,
beta=1,
norm_type=cv2.NORM_MINMAX,
dtype=cv2.CV_32F,
)
)
sobel_v = 1 - (
cv2.normalize(
sobel_v,
None,
alpha=0,
beta=1,
norm_type=cv2.NORM_MINMAX,
dtype=cv2.CV_32F,
)
)
overall = np.maximum(sobel_h, sobel_v)
overall = overall - (1 - blb)
overall[overall < 0] = 0
dist = (1.0 - overall) * blb
# * nuclei values form mountains so inverse to get basins
dist = -cv2.GaussianBlur(dist, (3, 3), 0)
overall = np.array(overall >= 0.4, dtype=np.int32)
marker = blb - overall
marker[marker < 0] = 0
marker = ndimage.binary_fill_holes(marker).astype("uint8")
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (5, 5))
marker = cv2.morphologyEx(marker, cv2.MORPH_OPEN, kernel)
marker = ndimage.label(marker)[0]
marker = remove_small_objects(marker, min_size=obj_size)
return watershed(dist, markers=marker, mask=blb)
@staticmethod
def get_instance_info(pred_inst, pred_type=None):
"""To collect instance information and store it within a dictionary.
Args:
pred_inst (:class:`numpy.ndarray`):
An image of shape (height, width) which contains the
probabilities of a pixel being a nuclei.
pred_type (:class:`numpy.ndarray`):
An image of shape (height, width, 1) which contains the
probabilities of a pixel being a certain type of nuclei.
Returns:
dict:
A dictionary containing a mapping of each instance
within `pred_inst` instance information. It has
the following form::
{
0: { # Instance ID
"box": [
x_min,
y_min,
x_max,
y_max,
],
"centroid": [x, y],
"contour": [
[x, y],
...
],
"type": integer,
"prob": float,
},
...
}
where the instance ID is an integer corresponding to the
instance at the same pixel value within `pred_inst`.
"""
inst_id_list = np.unique(pred_inst)[1:] # exclude background
inst_info_dict = {}
for inst_id in inst_id_list:
inst_map = pred_inst == inst_id
inst_box = get_bounding_box(inst_map)
inst_box_tl = inst_box[:2]
inst_map = inst_map[inst_box[1] : inst_box[3], inst_box[0] : inst_box[2]]
inst_map = inst_map.astype(np.uint8)
inst_moment = cv2.moments(inst_map)
inst_contour = cv2.findContours(
inst_map, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE
)
# * opencv protocol format may break
inst_contour = inst_contour[0][0].astype(np.int32)
inst_contour = np.squeeze(inst_contour)
# < 3 points does not make a contour, so skip, likely artifact too
# as the contours obtained via approximation => too small
if inst_contour.shape[0] < 3: # pragma: no cover
continue
# ! check for trickery shape
if len(inst_contour.shape) != 2: # pragma: no cover
continue
inst_centroid = [
(inst_moment["m10"] / inst_moment["m00"]),
(inst_moment["m01"] / inst_moment["m00"]),
]
inst_centroid = np.array(inst_centroid)
inst_contour += inst_box_tl[None]
inst_centroid += inst_box_tl # X
inst_info_dict[inst_id] = { # inst_id should start at 1
"box": inst_box,
"centroid": inst_centroid,
"contour": inst_contour,
"prob": None,
"type": None,
}
if pred_type is not None:
# * Get class of each instance id, stored at index id-1
for inst_id in list(inst_info_dict.keys()):
c_min, r_min, c_max, r_max = inst_info_dict[inst_id]["box"]
inst_map_crop = pred_inst[r_min:r_max, c_min:c_max]
inst_type_crop = pred_type[r_min:r_max, c_min:c_max]
inst_map_crop = inst_map_crop == inst_id
inst_type = inst_type_crop[inst_map_crop]
(type_list, type_pixels) = np.unique(inst_type, return_counts=True)
type_list = list(zip(type_list, type_pixels))
type_list = sorted(type_list, key=lambda x: x[1], reverse=True)
inst_type = type_list[0][0]
# ! pick the 2nd most dominant if it exists
if inst_type == 0 and len(type_list) > 1: # pragma: no cover
inst_type = type_list[1][0]
type_dict = {v[0]: v[1] for v in type_list}
type_prob = type_dict[inst_type] / (np.sum(inst_map_crop) + 1.0e-6)
inst_info_dict[inst_id]["type"] = int(inst_type)
inst_info_dict[inst_id]["prob"] = float(type_prob)
return inst_info_dict
@staticmethod
# skipcq: PYL-W0221 # noqa: E800
def postproc(raw_maps: List[np.ndarray]):
"""Post-processing script for image tiles.
Args:
raw_maps (list(:class:`numpy.ndarray`)):
A list of prediction outputs of each head and assumed to
be in the order of [np, hv, tp] (match with the output
of `infer_batch`).
Returns:
tuple:
- :class:`numpy.ndarray` - Instance map:
Pixel-wise nuclear instance segmentation prediction.
- :py:obj:`dict` - Instance dictionary:
A dictionary containing a mapping of each instance
within `inst_map` instance information. It has
the following form::
{
0: { # Instance ID
"box": [
x_min,
y_min,
x_max,
y_max,
],
"centroid": [x, y],
"contour": [
[x, y],
...
],
"type": 1,
"prob": 0.95,
},
...
}
where the instance ID is an integer corresponding to
the instance at the same pixel location within
the returned instance map.
Examples:
>>> from tiatoolbox.models.architecture.hovernet import HoVerNet
>>> import torch
>>> import numpy as np
>>> batch = torch.from_numpy(image_patch)[None]
>>> # image_patch is a 256x256x3 numpy array
>>> weights_path = "A/weights.pth"
>>> pretrained = torch.load(weights_path)
>>> model = HoVerNet(num_types=6, mode="fast")
>>> model.load_state_dict(pretrained)
>>> output = model.infer_batch(model, batch, on_gpu=False)
>>> output = [v[0] for v in output]
>>> output = model.postproc(output)
"""
if len(raw_maps) == 3:
np_map, hv_map, tp_map = raw_maps
else:
tp_map = None
np_map, hv_map = raw_maps
pred_type = tp_map
pred_inst = HoVerNet._proc_np_hv(np_map, hv_map)
nuc_inst_info_dict = HoVerNet.get_instance_info(pred_inst, pred_type)
return pred_inst, nuc_inst_info_dict
@staticmethod
def infer_batch(model, batch_data, on_gpu):
"""Run inference on an input batch.
This contains logic for forward operation as well as batch i/o
aggregation.
Args:
model (nn.Module):
PyTorch defined model.
batch_data (ndarray):
A batch of data generated by
`torch.utils.data.DataLoader`.
on_gpu (bool):
Whether to run inference on a GPU.
Returns:
tuple:
Output from each head. Each head is expected to contain
N predictions for N input patches. There are two cases,
one with 2 heads (Nuclei Pixels `np` and Hover `hv`) or
with 2 heads (`np`, `hv`, and Nuclei Types `tp`).
"""
patch_imgs = batch_data
device = misc.select_device(on_gpu)
patch_imgs_gpu = patch_imgs.to(device).type(torch.float32) # to NCHW
patch_imgs_gpu = patch_imgs_gpu.permute(0, 3, 1, 2).contiguous()
model.eval() # infer mode
# --------------------------------------------------------------
with torch.inference_mode():
pred_dict = model(patch_imgs_gpu)
pred_dict = OrderedDict(
[[k, v.permute(0, 2, 3, 1).contiguous()] for k, v in pred_dict.items()]
)
pred_dict["np"] = F.softmax(pred_dict["np"], dim=-1)[..., 1:]
if "tp" in pred_dict:
type_map = F.softmax(pred_dict["tp"], dim=-1)
type_map = torch.argmax(type_map, dim=-1, keepdim=True)
type_map = type_map.type(torch.float32)
pred_dict["tp"] = type_map
pred_dict = {k: v.cpu().numpy() for k, v in pred_dict.items()}
if "tp" in pred_dict:
return pred_dict["np"], pred_dict["hv"], pred_dict["tp"]
return pred_dict["np"], pred_dict["hv"]
| 28,016 | 33.588889 | 88 | py |
tiatoolbox | tiatoolbox-master/tiatoolbox/models/architecture/hovernetplus.py | from collections import OrderedDict
from typing import List
import cv2
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F # noqa: N812
from skimage import morphology
from tiatoolbox.models.architecture.hovernet import HoVerNet
from tiatoolbox.models.architecture.utils import UpSample2x
from tiatoolbox.utils import misc
class HoVerNetPlus(HoVerNet):
"""Initialise HoVerNet+ [1].
HoVerNet+ takes an RGB input image, and provides the option to
simultaneously segment and classify the nuclei present, as well as
semantically segment different regions or layers in the images. Note
the HoVerNet+ architecture assumes an image resolution of 0.5 mpp,
in contrast to HoVerNet at 0.25 mpp.
The tiatoolbox model should produce following results on the specified datasets
that it was trained on.
.. list-table:: HoVerNet+ Performance for Nuclear Instance Segmentation
:widths: 15 15 15 15 15 15 15
:header-rows: 1
* - Model name
- Data set
- DICE
- AJI
- DQ
- SQ
- PQ
* - hovernetplus-oed
- OED
- 0.84
- 0.69
- 0.86
- 0.80
- 0.69
.. list-table:: HoVerNet+ Mean Performance for Semantic Segmentation
:widths: 15 15 15 15 15 15
:header-rows: 1
* - Model name
- Data set
- F1
- Precision
- Recall
- Accuracy
* - hovernetplus-oed
- OED
- 0.82
- 0.82
- 0.82
- 0.84
Args:
num_input_channels (int):
The number of input channels, default = 3 for RGB.
num_types (int):
The number of types of nuclei present in the images.
num_layers (int):
The number of layers/different regions types present.
References:
[1] Shephard, Adam J., et al. "Simultaneous Nuclear Instance and
Layer Segmentation in Oral Epithelial Dysplasia." Proceedings of
the IEEE/CVF International Conference on Computer Vision. 2021.
"""
def __init__(
self, num_input_channels: int = 3, num_types: int = None, num_layers: int = None
):
super().__init__(mode="fast")
self.num_input_channels = num_input_channels
self.num_types = num_types
self.num_layers = num_layers
ksize = 3
self.decoder = nn.ModuleDict(
OrderedDict(
[
(
"tp",
HoVerNet._create_decoder_branch(ksize=ksize, out_ch=num_types),
),
(
"np",
HoVerNet._create_decoder_branch(ksize=ksize, out_ch=2),
),
(
"hv",
HoVerNet._create_decoder_branch(ksize=ksize, out_ch=2),
),
(
"ls",
HoVerNet._create_decoder_branch(ksize=ksize, out_ch=num_layers),
),
]
)
)
self.upsample2x = UpSample2x()
@staticmethod
def _proc_ls(ls_map: np.ndarray):
"""Extract Layer Segmentation map with LS Map.
This function takes the layer segmentation map and applies various morphological
operations remove spurious segmentations. Note, this processing is specific to
oral epithelium, where prioirty is given to certain tissue layers.
Args:
ls_map:
The input predicted segmentation map.
Returns:
:class:`numpy.ndarray`:
The processed segmentation map.
"""
ls_map = np.squeeze(ls_map)
ls_map = np.around(ls_map).astype("uint8") # ensure all numbers are integers
min_size = 20000
kernel_size = 20
epith_all = np.where(ls_map >= 2, 1, 0).astype("uint8")
mask = np.where(ls_map >= 1, 1, 0).astype("uint8")
epith_all = epith_all > 0
epith_mask = morphology.remove_small_objects(
epith_all, min_size=min_size
).astype("uint8")
epith_edited = epith_mask * ls_map
epith_edited = epith_edited.astype("uint8")
epith_edited_open = np.zeros_like(epith_edited).astype("uint8")
for i in [3, 2, 4]:
tmp = np.where(epith_edited == i, 1, 0).astype("uint8")
ep_open = cv2.morphologyEx(
tmp, cv2.MORPH_CLOSE, np.ones((kernel_size, kernel_size))
)
ep_open = cv2.morphologyEx(
ep_open, cv2.MORPH_OPEN, np.ones((kernel_size, kernel_size))
)
epith_edited_open[ep_open == 1] = i
mask_open = cv2.morphologyEx(
mask, cv2.MORPH_CLOSE, np.ones((kernel_size, kernel_size))
)
mask_open = cv2.morphologyEx(
mask_open, cv2.MORPH_OPEN, np.ones((kernel_size, kernel_size))
).astype("uint8")
ls_map = mask_open.copy()
for i in range(2, 5):
ls_map[epith_edited_open == i] = i
return ls_map.astype("uint8")
@staticmethod
def _get_layer_info(pred_layer):
"""Transforms image layers/regions into contours to store in dictionary.
Args:
pred_layer (:class:`numpy.ndarray`):
Semantic segmentation map of different layers/regions
following processing.
Returns:
dict:
A dictionary of layer contours. It has the
following form:
.. code-block:: json
{
1: { # Instance ID
"contour": [
[x, y],
...
],
"type": integer,
},
...
}
"""
layer_list = np.unique(pred_layer)
layer_list = np.delete(layer_list, np.where(layer_list == 0))
layer_info_dict = {}
count = 1
for type_class in layer_list:
layer = np.where(pred_layer == type_class, 1, 0).astype("uint8")
contours, _ = cv2.findContours(
layer.astype("uint8"), cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE
)
for layer in contours:
coords = layer[:, 0, :]
layer_info_dict[count] = {
"contours": coords,
"type": type_class,
}
count += 1
return layer_info_dict
@staticmethod
# skipcq: PYL-W0221 # noqa: E800
def postproc(raw_maps: List[np.ndarray]):
"""Post-processing script for image tiles.
Args:
raw_maps (list(ndarray)):
A list of prediction outputs of each head and assumed to
be in the order of [np, hv, tp, ls] (match with the
output of `infer_batch`).
Returns:
tuple:
- inst_map (ndarray):
Pixel-wise nuclear instance segmentation prediction.
- inst_dict (dict):
A dictionary containing a mapping of each instance
within `inst_map` instance information. It has the
following form:
.. code-block:: json
{
0: { # Instance ID
"box": [
x_min,
y_min,
x_max,
y_max,
],
"centroid": [x, y],
"contour": [
[x, y],
...
],
"type": integer,
"prob": float,
},
...
}
where the instance ID is an integer corresponding to the
instance at the same pixel value within `inst_map`.
- layer_map (ndarray):
Pixel-wise layer segmentation prediction.
- layer_dict (dict):
A dictionary containing a mapping of each segmented
layer within `layer_map`. It has the following form
.. code-block:: json
{
1: { # Instance ID
"contour": [
[x, y],
...
],
"type": integer,
},
...
}
Examples:
>>> from tiatoolbox.models.architecture.hovernetplus import HoVerNetPlus
>>> import torch
>>> import numpy as np
>>> batch = torch.from_numpy(image_patch)[None]
>>> # image_patch is a 256x256x3 numpy array
>>> weights_path = "A/weights.pth"
>>> pretrained = torch.load(weights_path)
>>> model = HoVerNetPlus(num_types=3, num_layers=5)
>>> model.load_state_dict(pretrained)
>>> output = model.infer_batch(model, batch, on_gpu=False)
>>> output = [v[0] for v in output]
>>> output = model.postproc(output)
"""
np_map, hv_map, tp_map, ls_map = raw_maps
pred_inst = HoVerNet._proc_np_hv(np_map, hv_map, scale_factor=0.5)
# fx=0.5 as nuclear processing is at 0.5 mpp instead of 0.25 mpp
pred_layer = HoVerNetPlus._proc_ls(ls_map)
pred_type = np.around(tp_map).astype("uint8")
nuc_inst_info_dict = HoVerNet.get_instance_info(pred_inst, pred_type)
layer_info_dict = HoVerNetPlus._get_layer_info(pred_layer)
return pred_inst, nuc_inst_info_dict, pred_layer, layer_info_dict
@staticmethod
def infer_batch(model, batch_data, on_gpu):
"""Run inference on an input batch.
This contains logic for forward operation as well as batch i/o
aggregation.
Args:
model (nn.Module)
PyTorch defined model.
batch_data (ndarray):
A batch of data generated by
`torch.utils.data.DataLoader`.
on_gpu (bool):
Whether to run inference on a GPU.
"""
patch_imgs = batch_data
device = misc.select_device(on_gpu)
patch_imgs_gpu = patch_imgs.to(device).type(torch.float32) # to NCHW
patch_imgs_gpu = patch_imgs_gpu.permute(0, 3, 1, 2).contiguous()
model.eval() # infer mode
# --------------------------------------------------------------
with torch.inference_mode():
pred_dict = model(patch_imgs_gpu)
pred_dict = OrderedDict(
[[k, v.permute(0, 2, 3, 1).contiguous()] for k, v in pred_dict.items()]
)
pred_dict["np"] = F.softmax(pred_dict["np"], dim=-1)[..., 1:]
type_map = F.softmax(pred_dict["tp"], dim=-1)
type_map = torch.argmax(type_map, dim=-1, keepdim=True)
type_map = type_map.type(torch.float32)
pred_dict["tp"] = type_map
layer_map = F.softmax(pred_dict["ls"], dim=-1)
layer_map = torch.argmax(layer_map, dim=-1, keepdim=True)
layer_map = layer_map.type(torch.float32)
pred_dict["ls"] = layer_map
pred_dict = {k: v.cpu().numpy() for k, v in pred_dict.items()}
return pred_dict["np"], pred_dict["hv"], pred_dict["tp"], pred_dict["ls"]
| 12,044 | 33.414286 | 88 | py |
tiatoolbox | tiatoolbox-master/tiatoolbox/models/architecture/nuclick.py | """Defines original NuClick architecture
Koohbanani, N. A., Jahanifar, M., Tajadin, N. Z., & Rajpoot, N. (2020).
NuClick: a deep learning framework for interactive segmentation of microscopic images.
Medical Image Analysis, 65, 101771.
"""
from typing import Tuple, Union
import numpy as np
import torch
import torch.nn as nn
from skimage.morphology import (
disk,
reconstruction,
remove_small_holes,
remove_small_objects,
)
from tiatoolbox import logger
from tiatoolbox.models.models_abc import ModelABC
from tiatoolbox.utils import misc
bn_axis = 1
class ConvBnRelu(nn.Module):
"""Performs Convolution, Batch Normalization and activation.
Args:
num_input_channels (int):
Number of channels in input.
num_output_channels (int):
Number of channels in output.
kernel_size (int):
Size of the kernel in the convolution layer.
strides (int):
Size of the stride in the convolution layer.
use_bias (bool):
Whether to use bias in the convolution layer.
dilation_rate (int):
Dilation rate in the convolution layer.
activation (str):
Name of the activation function to use.
do_batchnorm (bool):
Whether to do batch normalization after the convolution layer.
Returns:
model (torch.nn.Module): a pytorch model.
"""
def __init__(
self,
num_input_channels: int,
num_output_channels: int,
kernel_size: Union[Tuple[int, int], np.ndarray] = (3, 3),
strides: Union[Tuple[int, int], np.ndarray] = (1, 1),
use_bias: bool = False,
dilation_rate: Union[Tuple[int, int], np.ndarray] = (1, 1),
activation: str = "relu",
do_batchnorm: bool = True,
):
super().__init__()
if isinstance(kernel_size, int):
kernel_size = (kernel_size, kernel_size)
if isinstance(strides, int):
strides = (strides, strides)
self.conv_bn_relu = self.get_block(
num_input_channels,
num_output_channels,
kernel_size,
strides,
use_bias,
dilation_rate,
activation,
do_batchnorm,
)
def forward(self, input_tensor: torch.Tensor) -> torch.Tensor:
"""Logic for using layers defined in init.
This method defines how layers are used in forward operation.
Args:
input_tensor (torch.Tensor):
Input, the tensor is of the shape NCHW.
Returns:
output (torch.Tensor):
The inference output.
"""
return self.conv_bn_relu(input_tensor)
@staticmethod
def get_block(
in_channels,
out_channels,
kernel_size,
strides,
use_bias,
dilation_rate,
activation,
do_batchnorm,
):
"""Function to acquire a convolutional block.
Args:
in_channels (int):
Number of channels in input.
out_channels (int):
Number of channels in output.
kernel_size (list):
Size of the kernel in the acquired convolution block.
strides (int):
Size of stride in the convolution layer.
use_bias (bool):
Whether to use bias in the convolution layer.
dilation_rates (list):
Dilation rate for each convolution layer.
activation (str):
Name of the activation function to use.
do_batchnorm (bool):
Whether to do batch normalization after the convolution layer.
Returns:
torch.nn.Sequential: a pytorch layer
"""
conv1 = nn.Conv2d(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
stride=strides,
dilation=dilation_rate,
bias=use_bias,
padding="same",
padding_mode="zeros",
)
torch.nn.init.xavier_uniform_(conv1.weight)
layers = [conv1]
if do_batchnorm:
layers.append(nn.BatchNorm2d(num_features=out_channels, eps=1.001e-5))
if activation == "relu":
layers.append(nn.ReLU())
return nn.Sequential(*layers)
class MultiscaleConvBlock(nn.Module):
"""Defines Multiscale convolution block.
Args:
num_input_channels (int):
Number of channels in input.
num_output_channels (int):
Number of channels in output.
kernel_sizes (list):
Size of the kernel in each convolution layer.
strides (int):
Size of stride in the convolution layer.
use_bias (bool):
Whether to use bias in the convolution layer.
dilation_rates (list):
Dilation rate for each convolution layer.
activation (str):
Name of the activation function to use.
Returns:
torch.nn.Module:
A PyTorch model.
"""
def __init__(
self,
num_input_channels: int,
kernel_sizes: Union[Tuple[int, int], np.ndarray],
dilation_rates: Union[Tuple[int, int], np.ndarray],
num_output_channels: int = 32,
strides: Union[Tuple[int, int], np.ndarray] = (1, 1),
activation: str = "relu",
use_bias: bool = False,
):
super().__init__()
self.conv_block_1 = ConvBnRelu(
num_input_channels=num_input_channels,
num_output_channels=num_output_channels,
kernel_size=kernel_sizes[0],
strides=strides,
activation=activation,
use_bias=use_bias,
dilation_rate=(dilation_rates[0], dilation_rates[0]),
)
self.conv_block_2 = ConvBnRelu(
num_input_channels=num_input_channels,
num_output_channels=num_output_channels,
kernel_size=kernel_sizes[1],
strides=strides,
activation=activation,
use_bias=use_bias,
dilation_rate=(dilation_rates[1], dilation_rates[1]),
)
self.conv_block_3 = ConvBnRelu(
num_input_channels=num_input_channels,
num_output_channels=num_output_channels,
kernel_size=kernel_sizes[2],
strides=strides,
activation=activation,
use_bias=use_bias,
dilation_rate=(dilation_rates[2], dilation_rates[2]),
)
self.conv_block_4 = ConvBnRelu(
num_input_channels=num_input_channels,
num_output_channels=num_output_channels,
kernel_size=kernel_sizes[3],
strides=strides,
activation=activation,
use_bias=use_bias,
dilation_rate=(dilation_rates[3], dilation_rates[3]),
)
def forward(self, input_map):
"""Logic for using layers defined in MultiscaleConvBlock init.
This method defines how layers are used in forward operation.
Args:
input_map (torch.Tensor):
Input, the tensor is of the shape NCHW.
Returns:
output (torch.Tensor):
The inference output.
"""
conv0 = input_map
conv1 = self.conv_block_1(conv0)
conv2 = self.conv_block_2(conv0)
conv3 = self.conv_block_3(conv0)
conv4 = self.conv_block_4(conv0)
return torch.cat([conv1, conv2, conv3, conv4], dim=bn_axis)
class ResidualConv(nn.Module):
"""Residual Convolution block.
Args:
num_input_channels (int):
Number of channels in input.
num_output_channels (int):
Number of channels in output.
kernel_size (int):
Size of the kernel in all convolution layers.
strides (int):
Size of the stride in all convolution layers.
use_bias (bool):
Whether to use bias in the convolution layers.
dilation_rate (int):
Dilation rate in all convolution layers.
Returns:
model (torch.nn.Module):
A pytorch model.
"""
def __init__(
self,
num_input_channels: int,
num_output_channels: int = 32,
kernel_size: Union[Tuple[int, int], np.ndarray] = (3, 3),
strides: Union[Tuple[int, int], np.ndarray] = (1, 1),
use_bias: bool = False,
dilation_rate: Union[Tuple[int, int], np.ndarray] = (1, 1),
):
super().__init__()
self.conv_block_1 = ConvBnRelu(
num_input_channels,
num_output_channels,
kernel_size=kernel_size,
strides=strides,
activation="None",
use_bias=use_bias,
dilation_rate=dilation_rate,
do_batchnorm=True,
)
self.conv_block_2 = ConvBnRelu(
num_output_channels,
num_output_channels,
kernel_size=kernel_size,
strides=strides,
activation="None",
use_bias=use_bias,
dilation_rate=dilation_rate,
do_batchnorm=True,
)
self.activation = nn.ReLU()
def forward(self, input_tensor):
"""Logic for using layers defined in ResidualConv init.
This method defines how layers are used in forward operation.
Args:
input_tensor (torch.Tensor):
Input, the tensor is of the shape NCHW.
Returns:
output (torch.Tensor):
The inference output.
"""
conv1 = self.conv_block_1(input_tensor)
conv2 = self.conv_block_2(conv1)
out = torch.add(conv1, conv2)
return self.activation(out)
class NuClick(ModelABC):
"""NuClick Architecture.
NuClick is used for interactive nuclei segmentation.
NuClick takes an RGB image patch along with an inclusion and an exclusion map.
Args:
num_input_channels (int):
Number of channels in input.
num_output_channels (int):
Number of channels in output.
Returns:
model (torch.nn.Module): a pytorch model.
Examples:
>>> # instantiate a NuClick model for interactive nucleus segmentation.
>>> NuClick(num_input_channels = 5, num_output_channels = 1)
"""
def __init__(self, num_input_channels: int, num_output_channels: int):
super().__init__()
self.net_name = "NuClick"
self.n_channels = num_input_channels
self.n_classes = num_output_channels
# -------------Convolution + Batch Normalization + ReLu blocks------------
self.conv_block_1 = nn.Sequential(
ConvBnRelu(
num_input_channels=self.n_channels,
num_output_channels=64,
kernel_size=7,
),
ConvBnRelu(num_input_channels=64, num_output_channels=32, kernel_size=5),
ConvBnRelu(num_input_channels=32, num_output_channels=32, kernel_size=3),
)
self.conv_block_2 = nn.Sequential(
ConvBnRelu(num_input_channels=64, num_output_channels=64),
ConvBnRelu(num_input_channels=64, num_output_channels=32),
ConvBnRelu(num_input_channels=32, num_output_channels=32),
)
self.conv_block_3 = ConvBnRelu(
num_input_channels=32,
num_output_channels=self.n_classes,
kernel_size=(1, 1),
strides=1,
activation=None,
use_bias=True,
do_batchnorm=False,
)
# -------------Residual Convolution blocks------------
self.residual_block_1 = nn.Sequential(
ResidualConv(num_input_channels=32, num_output_channels=64),
ResidualConv(num_input_channels=64, num_output_channels=64),
)
self.residual_block_2 = ResidualConv(
num_input_channels=64, num_output_channels=128
)
self.residual_block_3 = ResidualConv(
num_input_channels=128, num_output_channels=128
)
self.residual_block_4 = nn.Sequential(
ResidualConv(num_input_channels=128, num_output_channels=256),
ResidualConv(num_input_channels=256, num_output_channels=256),
ResidualConv(num_input_channels=256, num_output_channels=256),
)
self.residual_block_5 = nn.Sequential(
ResidualConv(num_input_channels=256, num_output_channels=512),
ResidualConv(num_input_channels=512, num_output_channels=512),
ResidualConv(num_input_channels=512, num_output_channels=512),
)
self.residual_block_6 = nn.Sequential(
ResidualConv(num_input_channels=512, num_output_channels=1024),
ResidualConv(num_input_channels=1024, num_output_channels=1024),
)
self.residual_block_7 = nn.Sequential(
ResidualConv(num_input_channels=1024, num_output_channels=512),
ResidualConv(num_input_channels=512, num_output_channels=256),
)
self.residual_block_8 = ResidualConv(
num_input_channels=512, num_output_channels=256
)
self.residual_block_9 = ResidualConv(
num_input_channels=256, num_output_channels=256
)
self.residual_block_10 = nn.Sequential(
ResidualConv(num_input_channels=256, num_output_channels=128),
ResidualConv(num_input_channels=128, num_output_channels=128),
)
self.residual_block_11 = ResidualConv(
num_input_channels=128, num_output_channels=64
)
self.residual_block_12 = ResidualConv(
num_input_channels=64, num_output_channels=64
)
# -------------Multi-scale Convolution blocks------------
self.multiscale_block_1 = MultiscaleConvBlock(
num_input_channels=128,
num_output_channels=32,
kernel_sizes=[3, 3, 5, 5],
dilation_rates=[1, 3, 3, 6],
)
self.multiscale_block_2 = MultiscaleConvBlock(
num_input_channels=256,
num_output_channels=64,
kernel_sizes=[3, 3, 5, 5],
dilation_rates=[1, 3, 2, 3],
)
self.multiscale_block_3 = MultiscaleConvBlock(
num_input_channels=64,
num_output_channels=16,
kernel_sizes=[3, 3, 5, 7],
dilation_rates=[1, 3, 2, 6],
)
# -------------Max Pooling blocks------------
self.pool_block_1 = nn.MaxPool2d(kernel_size=(2, 2))
self.pool_block_2 = nn.MaxPool2d(kernel_size=(2, 2))
self.pool_block_3 = nn.MaxPool2d(kernel_size=(2, 2))
self.pool_block_4 = nn.MaxPool2d(kernel_size=(2, 2))
self.pool_block_5 = nn.MaxPool2d(kernel_size=(2, 2))
# -------------Transposed Convolution blocks------------
self.conv_transpose_1 = nn.ConvTranspose2d(
in_channels=1024,
out_channels=512,
kernel_size=2,
stride=(2, 2),
)
self.conv_transpose_2 = nn.ConvTranspose2d(
in_channels=256,
out_channels=256,
kernel_size=2,
stride=(2, 2),
)
self.conv_transpose_3 = nn.ConvTranspose2d(
in_channels=256,
out_channels=128,
kernel_size=2,
stride=(2, 2),
)
self.conv_transpose_4 = nn.ConvTranspose2d(
in_channels=128,
out_channels=64,
kernel_size=2,
stride=(2, 2),
)
self.conv_transpose_5 = nn.ConvTranspose2d(
in_channels=64,
out_channels=32,
kernel_size=2,
stride=(2, 2),
)
# pylint: disable=W0221
def forward(self, imgs: torch.Tensor):
"""Logic for using layers defined in NuClick init.
This method defines how layers are used in forward operation.
Args:
imgs (torch.Tensor): Input images, the tensor is of the shape NCHW.
Returns:
output (torch.Tensor): The inference output.
"""
conv1 = self.conv_block_1(imgs)
pool1 = self.pool_block_1(conv1)
conv2 = self.residual_block_1(pool1)
pool2 = self.pool_block_2(conv2)
conv3 = self.residual_block_2(pool2)
conv3 = self.multiscale_block_1(conv3)
conv3 = self.residual_block_3(conv3)
pool3 = self.pool_block_3(conv3)
conv4 = self.residual_block_4(pool3)
pool4 = self.pool_block_4(conv4)
conv5 = self.residual_block_5(pool4)
pool5 = self.pool_block_5(conv5)
conv51 = self.residual_block_6(pool5)
up61 = torch.cat([self.conv_transpose_1(conv51), conv5], dim=1)
conv61 = self.residual_block_7(up61)
up6 = torch.cat([self.conv_transpose_2(conv61), conv4], dim=1)
conv6 = self.residual_block_8(up6)
conv6 = self.multiscale_block_2(conv6)
conv6 = self.residual_block_9(conv6)
up7 = torch.cat([self.conv_transpose_3(conv6), conv3], dim=1)
conv7 = self.residual_block_10(up7)
up8 = torch.cat([self.conv_transpose_4(conv7), conv2], dim=1)
conv8 = self.residual_block_11(up8)
conv8 = self.multiscale_block_3(conv8)
conv8 = self.residual_block_12(conv8)
up9 = torch.cat([self.conv_transpose_5(conv8), conv1], dim=1)
conv9 = self.conv_block_2(up9)
return self.conv_block_3(conv9)
@staticmethod
def postproc(
preds,
thresh=0.33,
min_size=10,
min_hole_size=30,
do_reconstruction=False,
nuc_points=None,
):
"""Post processing.
Args:
preds (ndarray): list of prediction output of each patch and
assumed to be in the order of (no.patch, h, w) (match with the output
of `infer_batch`).
thresh (float): Threshold value. If a pixel has a predicted value larger
than the threshold, it will be classified as nuclei.
min_size (int): The smallest allowable object size.
min_hole_size (int): The maximum area, in pixels, of a contiguous hole
that will be filled.
do_reconstruction (bool): Whether to perform a morphological reconstruction
of an image.
nuc_points (ndarray): In the order of (no.patch, h, w).
In each patch, The pixel that has been 'clicked' is set to 1 and the
rest pixels are set to 0.
Returns:
masks (ndarray): pixel-wise nuclei instance segmentation
prediction, shape:(no.patch, h, w).
"""
masks = preds > thresh
masks = remove_small_objects(masks, min_size=min_size)
masks = remove_small_holes(masks, area_threshold=min_hole_size)
if do_reconstruction:
for i in range(len(masks)):
this_mask = masks[i, :, :]
this_marker = nuc_points[i, :, :] > 0
if np.any(this_mask[this_marker > 0]):
this_mask = reconstruction(
this_marker, this_mask, footprint=disk(1)
)
masks[i] = np.array([this_mask])
else:
logger.warning(
"Nuclei reconstruction was not done for nucleus #%d",
i,
stacklevel=2,
)
return masks
@staticmethod
def infer_batch(model, batch_data, on_gpu):
"""Run inference on an input batch.
This contains logic for forward operation as well as batch i/o
aggregation.
Args:
model (nn.Module): PyTorch defined model.
batch_data (ndarray): a batch of data generated by
torch.utils.data.DataLoader.
on_gpu (bool): Whether to run inference on a GPU.
Returns:
Pixel-wise nuclei prediction for each patch, shape: (no.patch, h, w).
"""
model.eval()
device = misc.select_device(on_gpu)
# Assume batch_data is NCHW
batch_data = batch_data.to(device).type(torch.float32)
with torch.inference_mode():
output = model(batch_data)
output = torch.sigmoid(output)
output = torch.squeeze(output, 1)
return output.cpu().numpy()
| 20,609 | 30.904025 | 87 | py |
tiatoolbox | tiatoolbox-master/tiatoolbox/models/architecture/sccnn.py | """Defines SCCNN architecture.
Sirinukunwattana, Korsuk, et al.
"Locality sensitive deep learning for detection and classification
of nuclei in routine colon cancer histology images."
IEEE transactions on medical imaging 35.5 (2016): 1196-1206.
"""
from __future__ import annotations
from collections import OrderedDict
from typing import List, Tuple
import numpy as np
import torch
import torch.nn as nn
from skimage.feature import peak_local_max
from tiatoolbox.models.models_abc import ModelABC
from tiatoolbox.utils import misc
class SCCNN(ModelABC):
"""Initialize SCCNN [1].
The following models have been included in tiatoolbox:
1. `sccnn-crchisto`:
This model is trained on `CRCHisto dataset
<https://warwick.ac.uk/fac/cross_fac/tia/data/crchistolabelednucleihe/>`_
2. `sccnn-conic`:
This model is trained on `CoNIC dataset
<https://conic-challenge.grand-challenge.org/evaluation/challenge/leaderboard//>`_
Centroids of ground truth masks were used to train this model.
The results are reported on the whole test data set including preliminary
and final set.
The original model was implemented in Matlab. The model has been reimplemented
in PyTorch for Python compatibility. The original model uses HRGB as input,
where 'H' represents hematoxylin. The model has been modified to rely on RGB
image as input.
The tiatoolbox model should produce the following results on the following datasets
using 8 pixels as radius for true detection:
.. list-table:: SCCNN performance
:widths: 15 15 15 15 15
:header-rows: 1
* - Model name
- Data set
- Precision
- Recall
- F1Score
* - sccnn-crchisto
- CRCHisto
- 0.82
- 0.80
- 0.81
* - sccnn-conic
- CoNIC
- 0.79
- 0.79
- 0.79
Args:
num_input_channels (int):
Number of channels in input. default=3.
out_height (int):
Output height. default=13.
out_width (int):
Output width. default=13.
radius (int):
Radius for nucleus detection, default = 12.
min_distance (int):
The minimal allowed distance separating peaks.
To find the maximum number of peaks, use `min_distance=1`, default=6.
threshold_abs (float):
Minimum intensity of peaks, default=0.20.
References:
[1] Sirinukunwattana, Korsuk, et al.
"Locality sensitive deep learning for detection and classification
of nuclei in routine colon cancer histology images."
IEEE transactions on medical imaging 35.5 (2016): 1196-1206.
"""
def __init__(
self,
num_input_channels: int = 3,
patch_output_shape: Tuple[int, int] = (13, 13),
radius: int = 12,
min_distance: int = 6,
threshold_abs: float = 0.20,
) -> None:
super().__init__()
out_height = patch_output_shape[0]
out_width = patch_output_shape[1]
self.in_ch = num_input_channels
self.out_height = out_height
self.out_width = out_width
# Create mesh grid and convert to 3D vector
x, y = torch.meshgrid(
torch.arange(start=0, end=out_height),
torch.arange(start=0, end=out_width),
indexing="ij",
)
self.register_buffer("xv", torch.unsqueeze(x, dim=0).type(torch.float32))
self.register_buffer("yv", torch.unsqueeze(y, dim=0).type(torch.float32))
self.radius = radius
self.min_distance = min_distance
self.threshold_abs = threshold_abs
def conv_act_block(
in_channels: int, out_channels: int, kernel_size: int
) -> torch.nn.ModuleDict:
"""Convolution and activation branch for SCCNN.
This module combines the convolution and activation blocks in a single
function.
Args:
in_channels (int):
Number of channels in input.
out_channels (int):
Number of required channels in output.
kernel_size (int):
Kernel size of convolution filter.
Returns:
torch.nn.ModuleDict:
Module dictionary.
"""
module_dict = OrderedDict()
module_dict["conv1"] = nn.Sequential(
nn.Conv2d(
in_channels,
out_channels,
kernel_size=(kernel_size, kernel_size),
stride=(1, 1),
padding=0,
bias=True,
),
nn.ReLU(),
)
return nn.ModuleDict(module_dict)
def spatially_constrained_layer1(
in_channels: int, out_channels: int
) -> torch.nn.ModuleDict:
"""Spatially constrained layer.
Takes fully connected layer and returns outputs for creating probability
map for the output. The output is Tensor is 3-dimensional where it defines
the row, height of the centre of nucleus and its confidence value.
Args:
in_channels (int):
Number of channels in input.
out_channels (int):
Number of required channels in output.
Returns:
torch.nn.ModuleDict:
Module dictionary.
"""
module_dict = OrderedDict()
module_dict["conv1"] = nn.Sequential(
nn.Conv2d(
in_channels,
out_channels,
kernel_size=(1, 1),
stride=(1, 1),
padding=0,
bias=True,
),
nn.Sigmoid(),
)
return nn.ModuleDict(module_dict)
module_dict = OrderedDict()
module_dict["l1"] = conv_act_block(num_input_channels, 30, 2)
module_dict["pool1"] = nn.MaxPool2d(2, padding=0)
module_dict["l2"] = conv_act_block(30, 60, 2)
module_dict["pool2"] = nn.MaxPool2d(2, padding=0)
module_dict["l3"] = conv_act_block(60, 90, 3)
module_dict["l4"] = conv_act_block(90, 1024, 5)
module_dict["dropout1"] = nn.Dropout2d(p=0.5)
module_dict["l5"] = conv_act_block(1024, 512, 1)
module_dict["dropout2"] = nn.Dropout2d(p=0.5)
module_dict["sc"] = spatially_constrained_layer1(512, 3)
self.layer = nn.ModuleDict(module_dict)
def spatially_constrained_layer2(
self, sc1_0: torch.Tensor, sc1_1: torch.Tensor, sc1_2: torch.Tensor
) -> torch.Tensor:
"""Spatially constrained layer 2.
Estimates row, column and height for sc2 layer mapping.
Args:
sc1_0 (torch.Tensor):
Output of spatially_constrained_layer1 estimating
the x position of the nucleus.
sc1_1 (torch.Tensor):
Output of spatially_constrained_layer1 estimating
the y position of the nucleus.
sc1_2 (torch.Tensor):
Output of spatially_constrained_layer1 estimating
the confidence in nucleus detection.
Returns:
:class:`torch.Tensor`:
Probability map using the estimates from
spatially_constrained_layer1.
"""
x = torch.tile(self.xv, dims=[sc1_0.size(0), 1, 1, 1]) # Tile for batch size
y = torch.tile(self.yv, dims=[sc1_0.size(0), 1, 1, 1])
xvr = (x - sc1_0) ** 2
yvc = (y - sc1_1) ** 2
out_map = xvr + yvc
out_map_threshold = torch.lt(out_map, self.radius).type(torch.float32)
denominator = 1 + (out_map / 2)
sc2 = sc1_2 / denominator
return sc2 * out_map_threshold
@staticmethod
def preproc(image: torch.Tensor) -> torch.Tensor:
"""Transforming network input to desired format.
This method is model and dataset specific, meaning that it can be replaced by
user's desired transform function before training/inference.
Args:
image (torch.Tensor): Input images, the tensor is of the shape NCHW.
Returns:
output (torch.Tensor): The transformed input.
"""
return image / 255.0
def forward(self, input_tensor: torch.Tensor) -> torch.Tensor: # skipcq: PYL-W0221
"""Logic for using layers defined in init.
This method defines how layers are used in forward operation.
Args:
input_tensor (torch.Tensor):
Input images, the tensor is in the shape of NCHW.
Returns:
torch.Tensor:
Output map for cell detection. Peak detection should be applied
to this output for cell detection.
"""
def spatially_constrained_layer1(
layer: torch.nn.Module,
in_tensor: torch.Tensor,
out_height: int = 13,
out_width: int = 13,
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
"""Spatially constrained layer 1.
Estimates row, column and height for
`spatially_constrained_layer2` layer mapping.
Args:
layer (torch.nn.Module):
Torch layer as ModuleDict.
in_tensor (torch.Tensor):
Input Tensor.
out_height (int):
Output height.
out_width (int):
Output Width
Returns:
tuple:
Parameters for the requested nucleus location:
- torch.Tensor - Row location for the centre of the nucleus.
- torch.Tensor - Column location for the centre of the nucleus.
- torch.Tensor - Peak value for the probability function indicating
confidence value for the estimate.
"""
sigmoid = layer["conv1"](in_tensor)
sigmoid0 = sigmoid[:, 0:1, :, :] * (out_height - 1)
sigmoid1 = sigmoid[:, 1:2, :, :] * (out_width - 1)
sigmoid2 = sigmoid[:, 2:3, :, :]
return sigmoid0, sigmoid1, sigmoid2
input_tensor = self.preproc(input_tensor)
l1 = self.layer["l1"]["conv1"](input_tensor)
p1 = self.layer["pool1"](l1)
l2 = self.layer["l2"]["conv1"](p1)
p2 = self.layer["pool1"](l2)
l3 = self.layer["l3"]["conv1"](p2)
l4 = self.layer["l4"]["conv1"](l3)
drop1 = self.layer["dropout1"](l4)
l5 = self.layer["l5"]["conv1"](drop1)
drop2 = self.layer["dropout2"](l5)
s1_sigmoid0, s1_sigmoid1, s1_sigmoid2 = spatially_constrained_layer1(
self.layer["sc"], drop2
)
return self.spatially_constrained_layer2(s1_sigmoid0, s1_sigmoid1, s1_sigmoid2)
# skipcq: PYL-W0221 # noqa: E800
def postproc(self, prediction_map: np.ndarray) -> np.ndarray:
"""Post-processing script for MicroNet.
Performs peak detection and extracts coordinates in x, y format.
Args:
prediction_map (ndarray):
Input image of type numpy array.
Returns:
:class:`numpy.ndarray`:
Pixel-wise nuclear instance segmentation
prediction.
"""
coordinates = peak_local_max(
np.squeeze(prediction_map[0], axis=2),
min_distance=self.min_distance,
threshold_abs=self.threshold_abs,
exclude_border=False,
)
return np.fliplr(coordinates)
@staticmethod
def infer_batch(
model: nn.Module, batch_data: np.ndarray | torch.Tensor, on_gpu: bool
) -> List[np.ndarray]:
"""Run inference on an input batch.
This contains logic for forward operation as well as batch I/O
aggregation.
Args:
model (nn.Module):
PyTorch defined model.
batch_data (:class:`numpy.ndarray` or :class:`torch.Tensor`):
A batch of data generated by
`torch.utils.data.DataLoader`.
on_gpu (bool):
Whether to run inference on a GPU.
Returns:
list of :class:`numpy.ndarray`:
Output probability map.
"""
patch_imgs = batch_data
device = misc.select_device(on_gpu)
patch_imgs_gpu = patch_imgs.to(device).type(torch.float32)
# to NCHW
patch_imgs_gpu = patch_imgs_gpu.permute(0, 3, 1, 2).contiguous()
model.eval() # infer mode
# --------------------------------------------------------------
with torch.inference_mode():
pred = model(patch_imgs_gpu)
pred = pred.permute(0, 2, 3, 1).contiguous()
pred = pred.cpu().numpy()
return [
pred,
]
| 13,123 | 33 | 90 | py |
tiatoolbox | tiatoolbox-master/tiatoolbox/models/engine/multi_task_segmentor.py | # ***** BEGIN GPL LICENSE BLOCK *****
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# The Original Code is Copyright (C) 2021, TIA Centre, University of Warwick
# All rights reserved.
# ***** END GPL LICENSE BLOCK *****
"""This module enables multi-task segmentors."""
import shutil
from typing import Callable, List
# replace with the sql database once the PR in place
import joblib
import numpy as np
import torch
from shapely.geometry import box as shapely_box
from shapely.strtree import STRtree
from tiatoolbox.models.engine.nucleus_instance_segmentor import (
NucleusInstanceSegmentor,
_process_instance_predictions,
)
from tiatoolbox.models.engine.semantic_segmentor import (
IOSegmentorConfig,
WSIStreamDataset,
)
# Python is yet to be able to natively pickle Object method/static method.
# Only top-level function is passable to multi-processing as caller.
# May need 3rd party libraries to use method/static method otherwise.
def _process_tile_predictions(
ioconfig,
tile_bounds,
tile_flag,
tile_mode,
tile_output,
# this would be replaced by annotation store
# in the future
ref_inst_dict,
postproc,
merge_predictions,
model_name,
):
"""Function to merge new tile prediction with existing prediction,
using the output from each task.
Args:
ioconfig (:class:`IOSegmentorConfig`): Object defines information
about input and output placement of patches.
tile_bounds (:class:`numpy.array`): Boundary of the current tile, defined as
(top_left_x, top_left_y, bottom_x, bottom_y).
tile_flag (list): A list of flag to indicate if instances within
an area extended from each side (by `ioconfig.margin`) of
the tile should be replaced by those within the same spatial
region in the accumulated output this run. The format is
[top, bottom, left, right], 1 indicates removal while 0 is not.
For example, [1, 1, 0, 0] denotes replacing top and bottom instances
within `ref_inst_dict` with new ones after this processing.
tile_mode (int): A flag to indicate the type of this tile. There
are 4 flags:
- 0: A tile from tile grid without any overlapping, it is not
an overlapping tile from tile generation. The predicted
instances are immediately added to accumulated output.
- 1: Vertical tile strip that stands between two normal tiles
(flag 0). It has the the same height as normal tile but
less width (hence vertical strip).
- 2: Horizontal tile strip that stands between two normal tiles
(flag 0). It has the the same width as normal tile but
less height (hence horizontal strip).
- 3: tile strip stands at the cross section of four normal tiles
(flag 0).
tile_output (list): A list of patch predictions, that lie within this
tile, to be merged and processed.
ref_inst_dict (dict): Dictionary contains accumulated output. The
expected format is {instance_id: {type: int,
contour: List[List[int]], centroid:List[float], box:List[int]}.
postproc (callable): Function to post-process the raw assembled tile.
merge_predictions (callable): Function to merge the `tile_output` into
raw tile prediction.
model_name (string): Name of the existing models support by tiatoolbox
for processing the data. Refer to [URL] for details.
Returns:
new_inst_dict (dict): A dictionary contain new instances to be accumulated.
The expected format is {instance_id: {type: int,
contour: List[List[int]], centroid:List[float], box:List[int]}.
remove_insts_in_orig (list): List of instance id within `ref_inst_dict`
to be removed to prevent overlapping predictions. These instances
are those get cutoff at the boundary due to the tiling process.
sem_maps (list): List of semantic segmentation maps.
tile_bounds (:class:`numpy.array`): Boundary of the current tile, defined as
(top_left_x, top_left_y, bottom_x, bottom_y).
"""
locations, predictions = list(zip(*tile_output))
# convert from WSI space to tile space
tile_tl = tile_bounds[:2]
tile_br = tile_bounds[2:]
locations = [np.reshape(loc, (2, -1)) for loc in locations]
locations_in_tile = [loc - tile_tl[None] for loc in locations]
locations_in_tile = [loc.flatten() for loc in locations_in_tile]
locations_in_tile = np.array(locations_in_tile)
tile_shape = tile_br - tile_tl # in width height
# as the placement output is calculated wrt highest possible resolution
# within input, the output will need to re-calibrate if it is at different
# resolution than the input
ioconfig = ioconfig.to_baseline()
fx_list = [v["resolution"] for v in ioconfig.output_resolutions]
head_raws = []
for idx, fx in enumerate(fx_list):
head_tile_shape = np.ceil(tile_shape * fx).astype(np.int32)
head_locations = np.ceil(locations_in_tile * fx).astype(np.int32)
head_predictions = [v[idx][0] for v in predictions]
head_raw = merge_predictions(
head_tile_shape[::-1],
head_predictions,
head_locations,
)
head_raws.append(head_raw)
if "hovernetplus" in model_name:
_, inst_dict, layer_map, _ = postproc(head_raws)
out_dicts = [inst_dict, layer_map]
elif "hovernet" in model_name:
_, inst_dict = postproc(head_raws)
out_dicts = [inst_dict]
else:
out_dicts = postproc(head_raws)
inst_dicts = [out for out in out_dicts if type(out) is dict]
sem_maps = [out for out in out_dicts if type(out) is np.ndarray]
# Some output maps may not be aggregated into a single map - combine these
sem_maps = [np.argmax(s, axis=-1) if s.ndim == 3 else s for s in sem_maps]
new_inst_dicts, remove_insts_in_origs = [], []
for inst_id, inst_dict in enumerate(inst_dicts):
new_inst_dict, remove_insts_in_orig = _process_instance_predictions(
inst_dict,
ioconfig,
tile_shape,
tile_flag,
tile_mode,
tile_tl,
ref_inst_dict[inst_id],
)
new_inst_dicts.append(new_inst_dict)
remove_insts_in_origs.append(remove_insts_in_orig)
return new_inst_dicts, remove_insts_in_origs, sem_maps, tile_bounds
class MultiTaskSegmentor(NucleusInstanceSegmentor):
"""An engine specifically designed to handle tiles or WSIs inference.
Note, if `model` is supplied in the arguments, it will ignore the
`pretrained_model` and `pretrained_weights` arguments. Each WSI's instance
predictions (e.g. nuclear instances) will be store under a `.dat` file and
the semantic segmentation predictions will be stored in a `.npy` file. The
`.dat` files contains a dictionary of form:
.. code-block:: yaml
inst_uid:
# top left and bottom right of bounding box
box: (start_x, start_y, end_x, end_y)
# centroid coordinates
centroid: (x, y)
# array/list of points
contour: [(x1, y1), (x2, y2), ...]
# the type of nuclei
type: int
# the probabilities of being this nuclei type
prob: float
Args:
model (nn.Module): Use externally defined PyTorch model for prediction with.
weights already loaded. Default is `None`. If provided,
`pretrained_model` argument is ignored.
pretrained_model (str): Name of the existing models support by tiatoolbox
for processing the data. Refer to [URL] for details.
By default, the corresponding pretrained weights will also be
downloaded. However, you can override with your own set of weights
via the `pretrained_weights` argument. Argument is case insensitive.
pretrained_weights (str): Path to the weight of the corresponding
`pretrained_model`.
batch_size (int) : Number of images fed into the model each time.
num_loader_workers (int) : Number of workers to load the data.
Take note that they will also perform preprocessing.
num_postproc_workers (int) : Number of workers to post-process
predictions.
verbose (bool): Whether to output logging information.
dataset_class (obj): Dataset class to be used instead of default.
auto_generate_mask (bool): To automatically generate tile/WSI tissue mask
if is not provided.
output_types (list): Ordered list describing what sort of segmentation the
output from the model postproc gives for a two-task model this may be:
['instance', 'semantic']
Examples:
>>> # Sample output of a network
>>> wsis = ['A/wsi.svs', 'B/wsi.svs']
>>> predictor = MultiTaskSegmentor(
... model='hovernetplus-oed',
... output_type=['instance', 'semantic'],
... )
>>> output = predictor.predict(wsis, mode='wsi')
>>> list(output.keys())
[('A/wsi.svs', 'output/0') , ('B/wsi.svs', 'output/1')]
>>> # Each output of 'A/wsi.svs'
>>> # will be respectively stored in 'output/0.0.dat', 'output/0.1.npy'
>>> # Here, the second integer represents the task number
>>> # e.g. between 0 or 1 for a two task model
"""
def __init__(
self,
batch_size: int = 8,
num_loader_workers: int = 0,
num_postproc_workers: int = 0,
model: torch.nn.Module = None,
pretrained_model: str = None,
pretrained_weights: str = None,
verbose: bool = True,
auto_generate_mask: bool = False,
dataset_class: Callable = WSIStreamDataset,
output_types: List = None,
):
super().__init__(
batch_size=batch_size,
num_loader_workers=num_loader_workers,
num_postproc_workers=num_postproc_workers,
model=model,
pretrained_model=pretrained_model,
pretrained_weights=pretrained_weights,
verbose=verbose,
auto_generate_mask=auto_generate_mask,
dataset_class=dataset_class,
)
self.output_types = output_types
self._futures = None
if "hovernetplus" in str(pretrained_model):
self.output_types = ["instance", "semantic"]
elif "hovernet" in str(pretrained_model):
self.output_types = ["instance"]
# adding more runtime placeholder
if self.output_types is not None:
if "semantic" in self.output_types:
self.wsi_layers = []
if "instance" in self.output_types:
self._wsi_inst_info = []
else:
raise ValueError(
"Output type must be specified for instance or semantic segmentation."
)
def _predict_one_wsi(
self,
wsi_idx: int,
ioconfig: IOSegmentorConfig,
save_path: str,
mode: str,
):
"""Make a prediction on tile/wsi.
Args:
wsi_idx (int): Index of the tile/wsi to be processed within `self`.
ioconfig (IOSegmentorConfig): Object which defines I/O placement during
inference and when assembling back to full tile/wsi.
loader (torch.Dataloader): The loader object which return batch of data
to be input to model.
save_path (str): Location to save output prediction as well as possible
intermediate results.
mode (str): `tile` or `wsi` to indicate run mode.
"""
cache_dir = f"{self._cache_dir}/"
wsi_path = self.imgs[wsi_idx]
mask_path = None if self.masks is None else self.masks[wsi_idx]
wsi_reader, mask_reader = self.get_reader(
wsi_path, mask_path, mode, self.auto_generate_mask
)
# assume ioconfig has already been converted to `baseline` for `tile` mode
resolution = ioconfig.highest_input_resolution
wsi_proc_shape = wsi_reader.slide_dimensions(**resolution)
# * retrieve patch placement
# this is in XY
(patch_inputs, patch_outputs) = self.get_coordinates(wsi_proc_shape, ioconfig)
if mask_reader is not None:
sel = self.filter_coordinates(mask_reader, patch_outputs, **resolution)
patch_outputs = patch_outputs[sel]
patch_inputs = patch_inputs[sel]
# assume to be in [top_left_x, top_left_y, bot_right_x, bot_right_y]
geometries = [shapely_box(*bounds) for bounds in patch_outputs]
spatial_indexer = STRtree(geometries)
# * retrieve tile placement and tile info flag
# tile shape will always be corrected to be multiple of output
tile_info_sets = self._get_tile_info(wsi_proc_shape, ioconfig)
# ! running order of each set matters !
self._futures = []
indices_sem = [i for i, x in enumerate(self.output_types) if x == "semantic"]
for s_id in range(len(indices_sem)):
self.wsi_layers.append(
np.lib.format.open_memmap(
f"{cache_dir}/{s_id}.npy",
mode="w+",
shape=tuple(np.fliplr([wsi_proc_shape])[0]),
dtype=np.uint8,
)
)
self.wsi_layers[s_id][:] = 0
indices_inst = [i for i, x in enumerate(self.output_types) if x == "instance"]
for _ in indices_inst:
self._wsi_inst_info.append({})
for set_idx, (set_bounds, set_flags) in enumerate(tile_info_sets):
for tile_idx, tile_bounds in enumerate(set_bounds):
tile_flag = set_flags[tile_idx]
# select any patches that have their output
# within the current tile
sel_box = shapely_box(*tile_bounds)
sel_indices = list(spatial_indexer.query(sel_box))
tile_patch_inputs = patch_inputs[sel_indices]
tile_patch_outputs = patch_outputs[sel_indices]
self._to_shared_space(wsi_idx, tile_patch_inputs, tile_patch_outputs)
tile_infer_output = self._infer_once()
self._process_tile_predictions(
ioconfig, tile_bounds, tile_flag, set_idx, tile_infer_output
)
self._merge_post_process_results()
# Maybe change to store semantic annotations as contours in .dat file...
for i_id, inst_idx in enumerate(indices_inst):
joblib.dump(self._wsi_inst_info[i_id], f"{save_path}.{inst_idx}.dat")
self._wsi_inst_info = [] # clean up
for s_id, sem_idx in enumerate(indices_sem):
shutil.copyfile(f"{cache_dir}/{s_id}.npy", f"{save_path}.{sem_idx}.npy")
# may need to chain it with parents
def _process_tile_predictions(
self, ioconfig, tile_bounds, tile_flag, tile_mode, tile_output
):
"""Function to dispatch parallel post processing."""
args = [
ioconfig,
tile_bounds,
tile_flag,
tile_mode,
tile_output,
self._wsi_inst_info,
self.model.postproc_func,
self.merge_prediction,
self.pretrained_model,
]
if self._postproc_workers is not None:
future = self._postproc_workers.submit(_process_tile_predictions, *args)
else:
future = _process_tile_predictions(*args)
self._futures.append(future)
def _merge_post_process_results(self):
"""Helper to aggregate results from parallel workers."""
def callback(new_inst_dicts, remove_uuid_lists, tiles, bounds):
"""Helper to aggregate worker's results."""
# ! DEPRECATION:
# ! will be deprecated upon finalization of SQL annotation store
for inst_id, new_inst_dict in enumerate(new_inst_dicts):
self._wsi_inst_info[inst_id].update(new_inst_dict)
for inst_uuid in remove_uuid_lists[inst_id]:
self._wsi_inst_info[inst_id].pop(inst_uuid, None)
x_start, y_start, x_end, y_end = bounds
for sem_id, tile in enumerate(tiles):
max_h, max_w = self.wsi_layers[sem_id].shape
x_end, y_end = min(x_end, max_w), min(y_end, max_h)
tile = tile[0 : y_end - y_start, 0 : x_end - x_start]
self.wsi_layers[sem_id][y_start:y_end, x_start:x_end] = tile
# !
for future in self._futures:
# not actually future but the results
if self._postproc_workers is None:
callback(*future)
continue
# some errors happen, log it and propagate exception
# ! this will lead to discard a whole bunch of
# ! inferred tiles within this current WSI
if future.exception() is not None:
raise future.exception()
# aggregate the result via callback
# manually call the callback rather than
# attaching it when receiving/creating the future
callback(*future.result())
| 18,210 | 41.155093 | 86 | py |
tiatoolbox | tiatoolbox-master/tiatoolbox/models/engine/nucleus_instance_segmentor.py | """This module enables nucleus instance segmentation."""
import uuid
from collections import deque
from typing import Callable, List, Union
# replace with the sql database once the PR in place
import joblib
import numpy as np
import torch
import tqdm
from shapely.geometry import box as shapely_box
from shapely.strtree import STRtree
from tiatoolbox.models.engine.semantic_segmentor import (
IOSegmentorConfig,
SemanticSegmentor,
WSIStreamDataset,
)
from tiatoolbox.tools.patchextraction import PatchExtractor
def _process_instance_predictions(
inst_dict,
ioconfig,
tile_shape,
tile_flag,
tile_mode,
tile_tl,
ref_inst_dict,
):
"""Function to merge new tile prediction with existing prediction.
Args:
inst_dict (dict): Dictionary containing instance information.
ioconfig (:class:`IOSegmentorConfig`): Object defines information
about input and output placement of patches.
tile_shape (list): A list of the tile shape.
tile_flag (list): A list of flag to indicate if instances within
an area extended from each side (by `ioconfig.margin`) of
the tile should be replaced by those within the same spatial
region in the accumulated output this run. The format is
[top, bottom, left, right], 1 indicates removal while 0 is not.
For example, [1, 1, 0, 0] denotes replacing top and bottom instances
within `ref_inst_dict` with new ones after this processing.
tile_mode (int): A flag to indicate the type of this tile. There
are 4 flags:
- 0: A tile from tile grid without any overlapping, it is not
an overlapping tile from tile generation. The predicted
instances are immediately added to accumulated output.
- 1: Vertical tile strip that stands between two normal tiles
(flag 0). It has the the same height as normal tile but
less width (hence vertical strip).
- 2: Horizontal tile strip that stands between two normal tiles
(flag 0). It has the the same width as normal tile but
less height (hence horizontal strip).
- 3: tile strip stands at the cross section of four normal tiles
(flag 0).
tile_tl (tuple): Top left coordinates of the current tile.
ref_inst_dict (dict): Dictionary contains accumulated output. The
expected format is {instance_id: {type: int,
contour: List[List[int]], centroid:List[float], box:List[int]}.
Returns:
new_inst_dict (dict): A dictionary contain new instances to be accumulated.
The expected format is {instance_id: {type: int,
contour: List[List[int]], centroid:List[float], box:List[int]}.
remove_insts_in_orig (list): List of instance id within `ref_inst_dict`
to be removed to prevent overlapping predictions. These instances
are those get cutoff at the boundary due to the tiling process.
"""
# should be rare, no nuclei detected in input images
if len(inst_dict) == 0:
return {}, []
# !
m = ioconfig.margin
w, h = tile_shape
inst_boxes = [v["box"] for v in inst_dict.values()]
inst_boxes = np.array(inst_boxes)
geometries = [shapely_box(*bounds) for bounds in inst_boxes]
tile_rtree = STRtree(geometries)
# !
# create margin bounding box, ordering should match with
# created tile info flag (top, bottom, left, right)
boundary_lines = [
shapely_box(0, 0, w, 1), # noqa top egde
shapely_box(0, h - 1, w, h), # noqa bottom edge
shapely_box(0, 0, 1, h), # noqa left
shapely_box(w - 1, 0, w, h), # noqa right
]
margin_boxes = [
shapely_box(0, 0, w, m), # noqa top egde
shapely_box(0, h - m, w, h), # noqa bottom edge
shapely_box(0, 0, m, h), # noqa left
shapely_box(w - m, 0, w, h), # noqa right
]
# ! this is wrt to WSI coord space, not tile
margin_lines = [
[[m, m], [w - m, m]], # noqa top egde
[[m, h - m], [w - m, h - m]], # noqa bottom edge
[[m, m], [m, h - m]], # noqa left
[[w - m, m], [w - m, h - m]], # noqa right
]
margin_lines = np.array(margin_lines) + tile_tl[None, None]
margin_lines = [shapely_box(*v.flatten().tolist()) for v in margin_lines]
# the ids within this match with those within `inst_map`, not UUID
sel_indices = []
if tile_mode in [0, 3]:
# for `full grid` tiles `cross section` tiles
# -- extend from the boundary by the margin size, remove
# nuclei whose entire contours lie within the margin area
sel_boxes = [
box
for idx, box in enumerate(margin_boxes)
if tile_flag[idx] or tile_mode == 3
]
sel_indices = [
geo
for bounds in sel_boxes
for geo in tile_rtree.query(bounds)
if bounds.contains(geometries[geo])
]
elif tile_mode in [1, 2]:
# for `horizontal/vertical strip` tiles
# -- extend from the marked edges (top/bot or left/right) by
# the margin size, remove all nuclei lie within the margin
# area (including on the margin line)
# -- remove all nuclei on the boundary also
sel_boxes = [
margin_boxes[idx] if flag else boundary_lines[idx]
for idx, flag in enumerate(tile_flag)
]
sel_indices = [geo for bounds in sel_boxes for geo in tile_rtree.query(bounds)]
else:
raise ValueError(f"Unknown tile mode {tile_mode}.")
def retrieve_sel_uids(sel_indices, inst_dict):
"""Helper to retrieved selected instance uids."""
if len(sel_indices) > 0:
# not sure how costly this is in large dict
inst_uids = list(inst_dict.keys())
return [inst_uids[idx] for idx in sel_indices]
remove_insts_in_tile = retrieve_sel_uids(sel_indices, inst_dict)
# external removal only for tile at cross sections
# this one should contain UUID with the reference database
remove_insts_in_orig = []
if tile_mode == 3:
inst_boxes = [v["box"] for v in ref_inst_dict.values()]
inst_boxes = np.array(inst_boxes)
geometries = [shapely_box(*bounds) for bounds in inst_boxes]
ref_inst_rtree = STRtree(geometries)
sel_indices = [
geo for bounds in margin_lines for geo in ref_inst_rtree.query(bounds)
]
remove_insts_in_orig = retrieve_sel_uids(sel_indices, ref_inst_dict)
# move inst position from tile space back to WSI space
# an also generate universal uid as replacement for storage
new_inst_dict = {}
for inst_uid, inst_info in inst_dict.items():
if inst_uid not in remove_insts_in_tile:
inst_info["box"] += np.concatenate([tile_tl] * 2)
inst_info["centroid"] += tile_tl
inst_info["contour"] += tile_tl
inst_uuid = uuid.uuid4().hex
new_inst_dict[inst_uuid] = inst_info
return new_inst_dict, remove_insts_in_orig
# Python is yet to be able to natively pickle Object method/static
# method. Only top-level function is passable to multiprocessing as
# caller. May need 3rd party libraries to use method/static method
# otherwise.
def _process_tile_predictions(
ioconfig,
tile_bounds,
tile_flag,
tile_mode,
tile_output,
# this would be replaced by annotation store
# in the future
ref_inst_dict,
postproc,
merge_predictions,
):
"""Function to merge new tile prediction with existing prediction.
Args:
ioconfig (:class:`IOSegmentorConfig`):
Object defines information about input and output placement
of patches.
tile_bounds (:class:`numpy.array`):
Boundary of the current tile, defined as `(top_left_x,
top_left_y, bottom_x, bottom_y)`.
tile_flag (list):
A list of flag to indicate if instances within an area
extended from each side (by `ioconfig.margin`) of the tile
should be replaced by those within the same spatial region
in the accumulated output this run. The format is `[top,
bottom, left, right]`, 1 indicates removal while 0 is not.
For example, `[1, 1, 0, 0]` denotes replacing top and bottom
instances within `ref_inst_dict` with new ones after this
processing.
tile_mode (int):
A flag to indicate the type of this tile. There are 4 flags:
- 0: A tile from tile grid without any overlapping, it is
not an overlapping tile from tile generation. The
predicted instances are immediately added to
accumulated output.
- 1: Vertical tile strip that stands between two normal
tiles (flag 0). It has the same height as normal tile
but less width (hence vertical strip).
- 2: Horizontal tile strip that stands between two normal
tiles (flag 0). It has the same width as normal tile
but less height (hence horizontal strip).
- 3: Tile strip stands at the cross-section of four normal
tiles (flag 0).
tile_output (list):
A list of patch predictions, that lie within this tile, to
be merged and processed.
ref_inst_dict (dict):
Dictionary contains accumulated output. The expected format
is `{instance_id: {type: int, contour: List[List[int]],
centroid:List[float], box:List[int]}`.
postproc (callable):
Function to post-process the raw assembled tile.
merge_predictions (callable):
Function to merge the `tile_output` into raw tile
prediction.
Returns:
tuple:
- :py:obj:`dict` - New instances dictionary:
A dictionary contain new instances to be accumulated.
The expected format is `{instance_id: {type: int,
contour: List[List[int]], centroid:List[float],
box:List[int]}`.
- :py:obj:`list` - Instances IDs to remove:
List of instance IDs within `ref_inst_dict` to be
removed to prevent overlapping predictions. These
instances are those get cut off at the boundary due to
the tiling process.
"""
locations, predictions = list(zip(*tile_output))
# convert from WSI space to tile space
tile_tl = tile_bounds[:2]
tile_br = tile_bounds[2:]
locations = [np.reshape(loc, (2, -1)) for loc in locations]
locations_in_tile = [loc - tile_tl[None] for loc in locations]
locations_in_tile = [loc.flatten() for loc in locations_in_tile]
locations_in_tile = np.array(locations_in_tile)
tile_shape = tile_br - tile_tl # in width height
# As the placement output is calculated wrt the highest possible
# resolution within input, the output will need to re-calibrate if
# it is at different resolution than the input.
ioconfig = ioconfig.to_baseline()
fx_list = [v["resolution"] for v in ioconfig.output_resolutions]
head_raws = []
for idx, fx in enumerate(fx_list):
head_tile_shape = np.ceil(tile_shape * fx).astype(np.int32)
head_locations = np.ceil(locations_in_tile * fx).astype(np.int32)
head_predictions = [v[idx][0] for v in predictions]
head_raw = merge_predictions(
head_tile_shape[::-1],
head_predictions,
head_locations,
)
head_raws.append(head_raw)
_, inst_dict = postproc(head_raws)
new_inst_dict, remove_insts_in_orig = _process_instance_predictions(
inst_dict,
ioconfig,
tile_shape,
tile_flag,
tile_mode,
tile_tl,
ref_inst_dict,
)
return new_inst_dict, remove_insts_in_orig
class NucleusInstanceSegmentor(SemanticSegmentor):
"""An engine specifically designed to handle tiles or WSIs inference.
Note, if `model` is supplied in the arguments, it will ignore the
`pretrained_model` and `pretrained_weights` arguments. Additionally,
unlike `SemanticSegmentor`, this engine assumes each input model
will ultimately predict one single target: the nucleus instance
within the tiles/WSIs. Each WSI prediction will be store under a
`.dat` file which contains a dictionary of form:
.. code-block:: yaml
inst_uid:
# top left and bottom right of bounding box
box: (start_x, start_y, end_x, end_y)
# centroid coordinates
centroid: (x, y)
# array/list of points
contour: [(x1, y1), (x2, y2), ...]
# the type of nuclei
type: int
# the probabilities of being this nuclei type
prob: float
Args:
model (nn.Module):
Use externally defined PyTorch model for prediction with.
weights already loaded. Default is `None`. If provided,
`pretrained_model` argument is ignored.
pretrained_model (str):
Name of the existing models support by tiatoolbox for
processing the data. For a full list of pretrained models,
refer to the `docs
<https://tia-toolbox.readthedocs.io/en/latest/pretrained.html>`_.
By default, the corresponding pretrained weights will also
be downloaded. However, you can override with your own set
of weights via the `pretrained_weights` argument. Argument
is case insensitive.
pretrained_weights (str):
Path to the weight of the corresponding `pretrained_model`.
batch_size (int) :
Number of images fed into the model each time.
num_loader_workers (int):
Number of workers to load the data.
Take note that they will also perform preprocessing.
num_postproc_workers (int):
Number of workers to post-process predictions.
verbose (bool):
Whether to output logging information.
dataset_class (obj):
Dataset class to be used instead of default.
auto_generate_mask (bool):
To automatically generate tile/WSI tissue mask
if is not provided.
Examples:
>>> # Sample output of a network
>>> wsis = ['A/wsi.svs', 'B/wsi.svs']
>>> predictor = SemanticSegmentor(model='hovernet_fast-pannuke')
>>> output = predictor.predict(wsis, mode='wsi')
>>> list(output.keys())
[('A/wsi.svs', 'output/0') , ('B/wsi.svs', 'output/1')]
>>> # Each output of 'A/wsi.svs'
>>> # will be respectively stored in 'output/0.dat', 'output/0.dat'
"""
def __init__(
self,
batch_size: int = 8,
num_loader_workers: int = 0,
num_postproc_workers: int = 0,
model: torch.nn.Module = None,
pretrained_model: str = None,
pretrained_weights: str = None,
verbose: bool = True,
auto_generate_mask: bool = False,
dataset_class: Callable = WSIStreamDataset,
):
super().__init__(
batch_size=batch_size,
num_loader_workers=num_loader_workers,
num_postproc_workers=num_postproc_workers,
model=model,
pretrained_model=pretrained_model,
pretrained_weights=pretrained_weights,
verbose=verbose,
auto_generate_mask=auto_generate_mask,
dataset_class=dataset_class,
)
# default is None in base class and is un-settable
# hence we redefine the namespace here
self.num_postproc_workers = (
num_postproc_workers if num_postproc_workers > 0 else None
)
# adding more runtime placeholder
self._wsi_inst_info = None
self._futures = []
@staticmethod
def _get_tile_info(
image_shape: Union[List[int], np.ndarray],
ioconfig: IOSegmentorConfig,
):
"""Generating tile information.
To avoid out of memory problem when processing WSI-scale in
general, the predictor will perform the inference and assemble
on a large image tiles (each may have size of 4000x4000 compared
to patch output of 256x256) first before stitching every tiles
by the end to complete the WSI output. For nuclei instance
segmentation, the stitching process will require removal of
predictions within some bounding areas. This function generates
both the tile placement and the flag to indicate how the removal
should be done to achieve the above goal.
Args:
image_shape (:class:`numpy.ndarray`, list(int)):
The shape of WSI to extract the tile from, assumed to be
in `[width, height]`.
ioconfig (:obj:IOSegmentorConfig):
The input and output configuration objects.
Returns:
list:
- :py:obj:`list` - Tiles and flags
- :class:`numpy.ndarray` - Grid tiles
- :class:`numpy.ndarray` - Removal flags
- :py:obj:`list` - Tiles and flags
- :class:`numpy.ndarray` - Vertical strip tiles
- :class:`numpy.ndarray` - Removal flags
- :py:obj:`list` - Tiles and flags
- :class:`numpy.ndarray` - Horizontal strip tiles
- :class:`numpy.ndarray` - Removal flags
- :py:obj:`list` - Tiles and flags
- :class:`numpy.ndarray` - Cross section tiles
- :class:`numpy.ndarray` - Removal flags
"""
margin = np.array(ioconfig.margin)
tile_shape = np.array(ioconfig.tile_shape)
tile_shape = (
np.floor(tile_shape / ioconfig.patch_output_shape)
* ioconfig.patch_output_shape
).astype(np.int32)
image_shape = np.array(image_shape)
(_, tile_outputs) = PatchExtractor.get_coordinates(
image_shape=image_shape,
patch_input_shape=tile_shape,
patch_output_shape=tile_shape,
stride_shape=tile_shape,
)
# * === Now generating the flags to indicate which side should
# * === be removed in postproc callback
boxes = tile_outputs
# This saves computation time if the image is smaller than the expected tile
if np.all(image_shape <= tile_shape):
flag = np.zeros([boxes.shape[0], 4], dtype=np.int32)
return [[boxes, flag]]
# * remove all sides for boxes
# unset for those lie within the selection
def unset_removal_flag(boxes, removal_flag):
"""Unset removal flags for tiles intersecting image boundaries."""
sel_boxes = [
shapely_box(0, 0, w, 0), # top edge
shapely_box(0, h, w, h), # bottom edge
shapely_box(0, 0, 0, h), # left
shapely_box(w, 0, w, h), # right
]
geometries = [shapely_box(*bounds) for bounds in boxes]
spatial_indexer = STRtree(geometries)
for idx, sel_box in enumerate(sel_boxes):
sel_indices = list(spatial_indexer.query(sel_box))
removal_flag[sel_indices, idx] = 0
return removal_flag
w, h = image_shape
boxes = tile_outputs
# expand to full four corners
boxes_br = boxes[:, 2:]
boxes_tr = np.dstack([boxes[:, 2], boxes[:, 1]])[0]
boxes_bl = np.dstack([boxes[:, 0], boxes[:, 3]])[0]
# * remove edges on all sides, excluding edges at on WSI boundary
flag = np.ones([boxes.shape[0], 4], dtype=np.int32)
flag = unset_removal_flag(boxes, flag)
info = deque([[boxes, flag]])
# * create vertical boxes at tile boundary and
# * flag top and bottom removal, excluding those
# * on the WSI boundary
# -------------------
# | =|= =|= |
# | =|= =|= |
# | >=|= >=|= |
# -------------------
# | >=|= >=|= |
# | =|= =|= |
# | >=|= >=|= |
# -------------------
# | >=|= >=|= |
# | =|= =|= |
# | =|= =|= |
# -------------------
# only select boxes having right edges removed
sel_indices = np.nonzero(flag[..., 3])
_boxes = np.concatenate(
[
boxes_tr[sel_indices] - np.array([margin, 0])[None],
boxes_br[sel_indices] + np.array([margin, 0])[None],
],
axis=-1,
)
_flag = np.full([_boxes.shape[0], 4], 0, dtype=np.int32)
_flag[:, [0, 1]] = 1
_flag = unset_removal_flag(_boxes, _flag)
info.append([_boxes, _flag])
# * create horizontal boxes at tile boundary and
# * flag left and right removal, excluding those
# * on the WSI boundary
# -------------
# | | | |
# | v|v v|v |
# |===|===|===|
# -------------
# |===|===|===|
# | | | |
# | | | |
# -------------
# only select boxes having bottom edges removed
sel_indices = np.nonzero(flag[..., 1])
# top bottom left right
_boxes = np.concatenate(
[
boxes_bl[sel_indices] - np.array([0, margin])[None],
boxes_br[sel_indices] + np.array([0, margin])[None],
],
axis=-1,
)
_flag = np.full([_boxes.shape[0], 4], 0, dtype=np.int32)
_flag[:, [2, 3]] = 1
_flag = unset_removal_flag(_boxes, _flag)
info.append([_boxes, _flag])
# * create boxes at tile cross-section and all sides
# ------------------------
# | | | | |
# | v| | | |
# | > =|= =|= =|= |
# -----=-=---=-=---=-=----
# | =|= =|= =|= |
# | | | | |
# | =|= =|= =|= |
# -----=-=---=-=---=-=----
# | =|= =|= =|= |
# | | | | |
# | | | | |
# ------------------------
# only select boxes having both right and bottom edges removed
sel_indices = np.nonzero(np.prod(flag[:, [1, 3]], axis=-1))
_boxes = np.concatenate(
[
boxes_br[sel_indices] - np.array([2 * margin, 2 * margin])[None],
boxes_br[sel_indices] + np.array([2 * margin, 2 * margin])[None],
],
axis=-1,
)
flag = np.full([_boxes.shape[0], 4], 1, dtype=np.int32)
info.append([_boxes, flag])
return info
def _to_shared_space(self, wsi_idx, patch_inputs, patch_outputs):
"""Helper functions to transfer variable to shared space.
We modify the shared space so that we can update worker info
without needing to re-create the worker. There should be no
race-condition because only by looping `self._loader` in main
thread will trigger querying new data from each worker, and this
portion should still be in sequential execution order in the
main thread.
Args:
wsi_idx (int):
The index of the WSI to be processed. This is used to
retrieve the file path.
patch_inputs (list):
A list of coordinates in `[start_x, start_y, end_x,
end_y]` format indicating the read location of the patch
in the WSI image. The coordinates are in the highest
resolution defined in `self.ioconfig`.
patch_outputs (list):
A list of coordinates in `[start_x, start_y, end_x,
end_y]` format indicating the write location of the
patch in the WSI image. The coordinates are in the
highest resolution defined in `self.ioconfig`.
"""
patch_inputs = torch.from_numpy(patch_inputs).share_memory_()
patch_outputs = torch.from_numpy(patch_outputs).share_memory_()
self._mp_shared_space.patch_inputs = patch_inputs
self._mp_shared_space.patch_outputs = patch_outputs
self._mp_shared_space.wsi_idx = torch.Tensor([wsi_idx]).share_memory_()
def _infer_once(self):
"""Running the inference only once for the currently active dataloader."""
num_steps = len(self._loader)
pbar_desc = "Process Batch: "
pbar = tqdm.tqdm(
desc=pbar_desc,
leave=True,
total=int(num_steps),
ncols=80,
ascii=True,
position=0,
)
cum_output = []
for _, batch_data in enumerate(self._loader):
sample_datas, sample_infos = batch_data
batch_size = sample_infos.shape[0]
# ! depending on the protocol of the output within infer_batch
# ! this may change, how to enforce/document/expose this in a
# ! sensible way?
# assume to return a list of L output,
# each of shape N x etc. (N=batch size)
sample_outputs = self.model.infer_batch(
self._model,
sample_datas,
self._on_gpu,
)
# repackage so that it's a N list, each contains
# L x etc. output
sample_outputs = [np.split(v, batch_size, axis=0) for v in sample_outputs]
sample_outputs = list(zip(*sample_outputs))
# tensor to numpy, costly?
sample_infos = sample_infos.numpy()
sample_infos = np.split(sample_infos, batch_size, axis=0)
sample_outputs = list(zip(sample_infos, sample_outputs))
cum_output.extend(sample_outputs)
pbar.update()
pbar.close()
return cum_output
def _predict_one_wsi(
self,
wsi_idx: int,
ioconfig: IOSegmentorConfig,
save_path: str,
mode: str,
):
"""Make a prediction on tile/wsi.
Args:
wsi_idx (int):
Index of the tile/wsi to be processed within `self`.
ioconfig (IOSegmentorConfig):
Object which defines I/O placement during inference and
when assembling back to full tile/wsi.
save_path (str):
Location to save output prediction as well as possible
intermediate results.
mode (str):
`tile` or `wsi` to indicate run mode.
"""
wsi_path = self.imgs[wsi_idx]
mask_path = None if self.masks is None else self.masks[wsi_idx]
wsi_reader, mask_reader = self.get_reader(
wsi_path, mask_path, mode, self.auto_generate_mask
)
# assume ioconfig has already been converted to `baseline` for `tile` mode
resolution = ioconfig.highest_input_resolution
wsi_proc_shape = wsi_reader.slide_dimensions(**resolution)
# * retrieve patch placement
# this is in XY
(patch_inputs, patch_outputs) = self.get_coordinates(wsi_proc_shape, ioconfig)
if mask_reader is not None:
sel = self.filter_coordinates(mask_reader, patch_outputs, **resolution)
patch_outputs = patch_outputs[sel]
patch_inputs = patch_inputs[sel]
# assume to be in [top_left_x, top_left_y, bot_right_x, bot_right_y]
geometries = [shapely_box(*bounds) for bounds in patch_outputs]
spatial_indexer = STRtree(geometries)
# * retrieve tile placement and tile info flag
# tile shape will always be corrected to be multiple of output
tile_info_sets = self._get_tile_info(wsi_proc_shape, ioconfig)
# ! running order of each set matters !
self._futures = []
# ! DEPRECATION:
# ! will be deprecated upon finalization of SQL annotation store
self._wsi_inst_info = {}
# !
for set_idx, (set_bounds, set_flags) in enumerate(tile_info_sets):
for tile_idx, tile_bounds in enumerate(set_bounds):
tile_flag = set_flags[tile_idx]
# select any patches that have their output
# within the current tile
sel_box = shapely_box(*tile_bounds)
sel_indices = list(spatial_indexer.query(sel_box))
# there is nothing in the tile
# Ignore coverage as the condition is difficult
# to reproduce on travis.
if len(sel_indices) == 0: # pragma: no cover
continue
tile_patch_inputs = patch_inputs[sel_indices]
tile_patch_outputs = patch_outputs[sel_indices]
self._to_shared_space(wsi_idx, tile_patch_inputs, tile_patch_outputs)
tile_infer_output = self._infer_once()
self._process_tile_predictions(
ioconfig, tile_bounds, tile_flag, set_idx, tile_infer_output
)
self._merge_post_process_results()
joblib.dump(self._wsi_inst_info, f"{save_path}.dat")
# may need to chain it with parents
self._wsi_inst_info = None # clean up
def _process_tile_predictions(
self, ioconfig, tile_bounds, tile_flag, tile_mode, tile_output
):
"""Function to dispatch parallel post processing."""
args = [
ioconfig,
tile_bounds,
tile_flag,
tile_mode,
tile_output,
self._wsi_inst_info,
self.model.postproc_func,
self.merge_prediction,
]
if self._postproc_workers is not None:
future = self._postproc_workers.submit(_process_tile_predictions, *args)
else:
future = _process_tile_predictions(*args)
self._futures.append(future)
def _merge_post_process_results(self):
"""Helper to aggregate results from parallel workers."""
def callback(new_inst_dict, remove_uuid_list):
"""Helper to aggregate worker's results."""
# ! DEPRECATION:
# ! will be deprecated upon finalization of SQL annotation store
self._wsi_inst_info.update(new_inst_dict)
for inst_uuid in remove_uuid_list:
self._wsi_inst_info.pop(inst_uuid, None)
# !
for future in self._futures:
# not actually future but the results
if self._postproc_workers is None:
callback(*future)
continue
# some errors happen, log it and propagate exception
# ! this will lead to discard a bunch of
# ! inferred tiles within this current WSI
if future.exception() is not None:
raise future.exception()
# aggregate the result via callback
result = future.result()
# manually call the callback rather than
# attaching it when receiving/creating the future
callback(*result)
| 31,587 | 38.93426 | 87 | py |
tiatoolbox | tiatoolbox-master/tiatoolbox/models/engine/patch_predictor.py | """This module implements patch level prediction."""
import copy
import os
import pathlib
from collections import OrderedDict
from typing import Callable, Tuple, Union
import numpy as np
import torch
import tqdm
from tiatoolbox import logger
from tiatoolbox.models.architecture import get_pretrained_model
from tiatoolbox.models.dataset.classification import PatchDataset, WSIPatchDataset
from tiatoolbox.models.engine.semantic_segmentor import IOSegmentorConfig
from tiatoolbox.utils import misc
from tiatoolbox.utils.misc import save_as_json
from tiatoolbox.wsicore.wsireader import VirtualWSIReader, WSIReader
class IOPatchPredictorConfig(IOSegmentorConfig):
"""Contains patch predictor input and output information."""
def __init__(
self,
patch_input_shape=None,
input_resolutions=None,
stride_shape=None,
**kwargs,
):
stride_shape = patch_input_shape if stride_shape is None else stride_shape
super().__init__(
input_resolutions=input_resolutions,
output_resolutions=[],
stride_shape=stride_shape,
patch_input_shape=patch_input_shape,
patch_output_shape=patch_input_shape,
save_resolution=None,
**kwargs,
)
class PatchPredictor:
r"""Patch level predictor.
The models provided by tiatoolbox should give the following results:
.. list-table:: PatchPredictor performance on the Kather100K dataset [1]
:widths: 15 15
:header-rows: 1
* - Model name
- F\ :sub:`1`\ score
* - alexnet-kather100k
- 0.965
* - resnet18-kather100k
- 0.990
* - resnet34-kather100k
- 0.991
* - resnet50-kather100k
- 0.989
* - resnet101-kather100k
- 0.989
* - resnext50_32x4d-kather100k
- 0.992
* - resnext101_32x8d-kather100k
- 0.991
* - wide_resnet50_2-kather100k
- 0.989
* - wide_resnet101_2-kather100k
- 0.990
* - densenet121-kather100k
- 0.993
* - densenet161-kather100k
- 0.992
* - densenet169-kather100k
- 0.992
* - densenet201-kather100k
- 0.991
* - mobilenet_v2-kather100k
- 0.990
* - mobilenet_v3_large-kather100k
- 0.991
* - mobilenet_v3_small-kather100k
- 0.992
* - googlenet-kather100k
- 0.992
.. list-table:: PatchPredictor performance on the PCam dataset [2]
:widths: 15 15
:header-rows: 1
* - Model name
- F\ :sub:`1`\ score
* - alexnet-pcam
- 0.840
* - resnet18-pcam
- 0.888
* - resnet34-pcam
- 0.889
* - resnet50-pcam
- 0.892
* - resnet101-pcam
- 0.888
* - resnext50_32x4d-pcam
- 0.900
* - resnext101_32x8d-pcam
- 0.892
* - wide_resnet50_2-pcam
- 0.901
* - wide_resnet101_2-pcam
- 0.898
* - densenet121-pcam
- 0.897
* - densenet161-pcam
- 0.893
* - densenet169-pcam
- 0.895
* - densenet201-pcam
- 0.891
* - mobilenet_v2-pcam
- 0.899
* - mobilenet_v3_large-pcam
- 0.895
* - mobilenet_v3_small-pcam
- 0.890
* - googlenet-pcam
- 0.867
Args:
model (nn.Module):
Use externally defined PyTorch model for prediction with.
weights already loaded. Default is `None`. If provided,
`pretrained_model` argument is ignored.
pretrained_model (str):
Name of the existing models support by tiatoolbox for
processing the data. For a full list of pretrained models,
refer to the `docs
<https://tia-toolbox.readthedocs.io/en/latest/pretrained.html>`_
By default, the corresponding pretrained weights will also
be downloaded. However, you can override with your own set
of weights via the `pretrained_weights` argument. Argument
is case-insensitive.
pretrained_weights (str):
Path to the weight of the corresponding `pretrained_model`.
>>> predictor = PatchPredictor(
... pretrained_model="resnet18-kather100k",
... pretrained_weights="resnet18_local_weight")
batch_size (int):
Number of images fed into the model each time.
num_loader_workers (int):
Number of workers to load the data. Take note that they will
also perform preprocessing.
verbose (bool):
Whether to output logging information.
Attributes:
img (:obj:`str` or :obj:`pathlib.Path` or :obj:`numpy.ndarray`):
A HWC image or a path to WSI.
mode (str):
Type of input to process. Choose from either `patch`, `tile`
or `wsi`.
model (nn.Module):
Defined PyTorch model.
pretrained_model (str):
Name of the existing models support by tiatoolbox for
processing the data. For a full list of pretrained models,
refer to the `docs
<https://tia-toolbox.readthedocs.io/en/latest/pretrained.html>`_
By default, the corresponding pretrained weights will also
be downloaded. However, you can override with your own set
of weights via the `pretrained_weights` argument. Argument
is case insensitive.
batch_size (int):
Number of images fed into the model each time.
num_loader_workers (int):
Number of workers used in torch.utils.data.DataLoader.
verbose (bool):
Whether to output logging information.
Examples:
>>> # list of 2 image patches as input
>>> data = [img1, img2]
>>> predictor = PatchPredictor(pretrained_model="resnet18-kather100k")
>>> output = predictor.predict(data, mode='patch')
>>> # array of list of 2 image patches as input
>>> data = np.array([img1, img2])
>>> predictor = PatchPredictor(pretrained_model="resnet18-kather100k")
>>> output = predictor.predict(data, mode='patch')
>>> # list of 2 image patch files as input
>>> data = ['path/img.png', 'path/img.png']
>>> predictor = PatchPredictor(pretrained_model="resnet18-kather100k")
>>> output = predictor.predict(data, mode='patch')
>>> # list of 2 image tile files as input
>>> tile_file = ['path/tile1.png', 'path/tile2.png']
>>> predictor = PatchPredictor(pretraind_model="resnet18-kather100k")
>>> output = predictor.predict(tile_file, mode='tile')
>>> # list of 2 wsi files as input
>>> wsi_file = ['path/wsi1.svs', 'path/wsi2.svs']
>>> predictor = PatchPredictor(pretraind_model="resnet18-kather100k")
>>> output = predictor.predict(wsi_file, mode='wsi')
References:
[1] Kather, Jakob Nikolas, et al. "Predicting survival from colorectal cancer
histology slides using deep learning: A retrospective multicenter study."
PLoS medicine 16.1 (2019): e1002730.
[2] Veeling, Bastiaan S., et al. "Rotation equivariant CNNs for digital
pathology." International Conference on Medical image computing and
computer-assisted intervention. Springer, Cham, 2018.
""" # noqa: W605
def __init__(
self,
batch_size=8,
num_loader_workers=0,
model=None,
pretrained_model=None,
pretrained_weights=None,
verbose=True,
):
super().__init__()
self.imgs = None
self.mode = None
if model is None and pretrained_model is None:
raise ValueError("Must provide either `model` or `pretrained_model`.")
if model is not None:
self.model = model
ioconfig = None # retrieve iostate from provided model ?
else:
model, ioconfig = get_pretrained_model(pretrained_model, pretrained_weights)
self.ioconfig = ioconfig # for storing original
self._ioconfig = None # for storing runtime
self.model = model # for runtime, such as after wrapping with nn.DataParallel
self.pretrained_model = pretrained_model
self.batch_size = batch_size
self.num_loader_worker = num_loader_workers
self.verbose = verbose
@staticmethod
def merge_predictions(
img: Union[str, pathlib.Path, np.ndarray],
output: dict,
resolution: float = None,
units: str = None,
postproc_func: Callable = None,
return_raw: bool = False,
):
"""Merge patch level predictions to form a 2-dimensional prediction map.
#! Improve how the below reads.
The prediction map will contain values from 0 to N, where N is
the number of classes. Here, 0 is the background which has not
been processed by the model and N is the number of classes
predicted by the model.
Args:
img (:obj:`str` or :obj:`pathlib.Path` or :class:`numpy.ndarray`):
A HWC image or a path to WSI.
output (dict):
Output generated by the model.
resolution (float):
Resolution of merged predictions.
units (str):
Units of resolution used when merging predictions. This
must be the same `units` used when processing the data.
postproc_func (callable):
A function to post-process raw prediction from model. By
default, internal code uses the `np.argmax` function.
return_raw (bool):
Return raw result without applying the `postproc_func`
on the assembled image.
Returns:
:class:`numpy.ndarray`:
Merged predictions as a 2D array.
Examples:
>>> # pseudo output dict from model with 2 patches
>>> output = {
... 'resolution': 1.0,
... 'units': 'baseline',
... 'probabilities': [[0.45, 0.55], [0.90, 0.10]],
... 'predictions': [1, 0],
... 'coordinates': [[0, 0, 2, 2], [2, 2, 4, 4]],
... }
>>> merged = PatchPredictor.merge_predictions(
... np.zeros([4, 4]),
... output,
... resolution=1.0,
... units='baseline'
... )
>>> merged
... array([[2, 2, 0, 0],
... [2, 2, 0, 0],
... [0, 0, 1, 1],
... [0, 0, 1, 1]])
"""
reader = WSIReader.open(img)
if isinstance(reader, VirtualWSIReader):
logger.warning(
"Image is not pyramidal hence read is forced to be "
"at `units='baseline'` and `resolution=1.0`.",
stacklevel=2,
)
resolution = 1.0
units = "baseline"
canvas_shape = reader.slide_dimensions(resolution=resolution, units=units)
canvas_shape = canvas_shape[::-1] # XY to YX
# may crash here, do we need to deal with this ?
output_shape = reader.slide_dimensions(
resolution=output["resolution"], units=output["units"]
)
output_shape = output_shape[::-1] # XY to YX
fx = np.array(canvas_shape) / np.array(output_shape)
if "probabilities" not in output.keys():
coordinates = output["coordinates"]
predictions = output["predictions"]
denominator = None
output = np.zeros(list(canvas_shape), dtype=np.float32)
else:
coordinates = output["coordinates"]
predictions = output["probabilities"]
num_class = np.array(predictions[0]).shape[0]
denominator = np.zeros(canvas_shape)
output = np.zeros(list(canvas_shape) + [num_class], dtype=np.float32)
for idx, bound in enumerate(coordinates):
prediction = predictions[idx]
# assumed to be in XY
# top-left for output placement
tl = np.ceil(np.array(bound[:2]) * fx).astype(np.int32)
# bot-right for output placement
br = np.ceil(np.array(bound[2:]) * fx).astype(np.int32)
output[tl[1] : br[1], tl[0] : br[0]] += prediction
if denominator is not None:
denominator[tl[1] : br[1], tl[0] : br[0]] += 1
# deal with overlapping regions
if denominator is not None:
output = output / (np.expand_dims(denominator, -1) + 1.0e-8)
if not return_raw:
# convert raw probabilities to predictions
if postproc_func is not None:
output = postproc_func(output)
else:
output = np.argmax(output, axis=-1)
# to make sure background is 0 while class will be 1...N
output[denominator > 0] += 1
return output
def _predict_engine(
self,
dataset,
return_probabilities=False,
return_labels=False,
return_coordinates=False,
on_gpu=True,
):
"""Make a prediction on a dataset. The dataset may be mutated.
Args:
dataset (torch.utils.data.Dataset):
PyTorch dataset object created using
`tiatoolbox.models.data.classification.Patch_Dataset`.
return_probabilities (bool):
Whether to return per-class probabilities.
return_labels (bool):
Whether to return labels.
return_coordinates (bool):
Whether to return patch coordinates.
on_gpu (bool):
Whether to run model on the GPU.
Returns:
:class:`numpy.ndarray`:
Model predictions of the input dataset
"""
dataset.preproc_func = self.model.preproc_func
# preprocessing must be defined with the dataset
dataloader = torch.utils.data.DataLoader(
dataset,
num_workers=self.num_loader_worker,
batch_size=self.batch_size,
drop_last=False,
shuffle=False,
)
if self.verbose:
pbar = tqdm.tqdm(
total=int(len(dataloader)), leave=True, ncols=80, ascii=True, position=0
)
# use external for testing
model = misc.model_to(on_gpu, self.model)
cum_output = {
"probabilities": [],
"predictions": [],
"coordinates": [],
"labels": [],
}
for _, batch_data in enumerate(dataloader):
batch_output_probabilities = self.model.infer_batch(
model, batch_data["image"], on_gpu
)
# We get the index of the class with the maximum probability
batch_output_predictions = self.model.postproc_func(
batch_output_probabilities
)
# tolist might be very expensive
cum_output["probabilities"].extend(batch_output_probabilities.tolist())
cum_output["predictions"].extend(batch_output_predictions.tolist())
if return_coordinates:
cum_output["coordinates"].extend(batch_data["coords"].tolist())
if return_labels: # be careful of `s`
# We do not use tolist here because label may be of mixed types
# and hence collated as list by torch
cum_output["labels"].extend(list(batch_data["label"]))
if self.verbose:
pbar.update()
if self.verbose:
pbar.close()
if not return_probabilities:
cum_output.pop("probabilities")
if not return_labels:
cum_output.pop("labels")
if not return_coordinates:
cum_output.pop("coordinates")
return cum_output
def _update_ioconfig(
self,
ioconfig,
patch_input_shape,
stride_shape,
resolution,
units,
):
"""
Args:
ioconfig (IOPatchPredictorConfig):
patch_input_shape (tuple):
Size of patches input to the model. Patches are at
requested read resolution, not with respect to level 0,
and must be positive.
stride_shape (tuple):
Stride using during tile and WSI processing. Stride is
at requested read resolution, not with respect to
level 0, and must be positive. If not provided,
`stride_shape=patch_input_shape`.
resolution (float):
Resolution used for reading the image. Please see
:obj:`WSIReader` for details.
units (str):
Units of resolution used for reading the image. Choose
from either `level`, `power` or `mpp`. Please see
:obj:`WSIReader` for details.
Returns:
Updated Patch Predictor IO configuration.
"""
config_flag = (
patch_input_shape is None,
resolution is None,
units is None,
)
if ioconfig:
return ioconfig
if self.ioconfig is None and any(config_flag):
raise ValueError(
"Must provide either `ioconfig` or "
"`patch_input_shape`, `resolution`, and `units`."
)
if stride_shape is None:
stride_shape = patch_input_shape
if self.ioconfig:
ioconfig = copy.deepcopy(self.ioconfig)
# ! not sure if there is a nicer way to set this
if patch_input_shape is not None:
ioconfig.patch_input_shape = patch_input_shape
if stride_shape is not None:
ioconfig.stride_shape = stride_shape
if resolution is not None:
ioconfig.input_resolutions[0]["resolution"] = resolution
if units is not None:
ioconfig.input_resolutions[0]["units"] = units
return ioconfig
return IOPatchPredictorConfig(
input_resolutions=[{"resolution": resolution, "units": units}],
patch_input_shape=patch_input_shape,
stride_shape=stride_shape,
)
@staticmethod
def _prepare_save_dir(save_dir, imgs):
"""Create directory if not defined and number of images is more than 1.
Args:
save_dir (str or pathlib.Path):
Path to output directory.
imgs (list, ndarray):
List of inputs to process.
Returns:
:class:`pathlib.Path`:
Path to output directory.
"""
if save_dir is None and len(imgs) > 1:
logger.warning(
"More than 1 WSIs detected but there is no save directory set."
"All subsequent output will be saved to current runtime"
"location under folder 'output'. Overwriting may happen!",
stacklevel=2,
)
save_dir = pathlib.Path(os.getcwd()).joinpath("output")
elif save_dir is not None and len(imgs) > 1:
logger.warning(
"When providing multiple whole-slide images / tiles, "
"we save the outputs and return the locations "
"to the corresponding files.",
stacklevel=2,
)
if save_dir is not None:
save_dir = pathlib.Path(save_dir)
save_dir.mkdir(parents=True, exist_ok=False)
return save_dir
def _predict_patch(self, imgs, labels, return_probabilities, return_labels, on_gpu):
"""Process patch mode.
Args:
imgs (list, ndarray):
List of inputs to process. when using `patch` mode, the
input must be either a list of images, a list of image
file paths or a numpy array of an image list. When using
`tile` or `wsi` mode, the input must be a list of file
paths.
labels:
List of labels. If using `tile` or `wsi` mode, then only
a single label per image tile or whole-slide image is
supported.
return_probabilities (bool):
Whether to return per-class probabilities.
return_labels (bool):
Whether to return the labels with the predictions.
on_gpu (bool):
Whether to run model on the GPU.
Returns:
:class:`numpy.ndarray`:
Model predictions of the input dataset
"""
if labels:
# if a labels is provided, then return with the prediction
return_labels = bool(labels)
if labels and len(labels) != len(imgs):
raise ValueError(
f"len(labels) != len(imgs) : " f"{len(labels)} != {len(imgs)}"
)
# don't return coordinates if patches are already extracted
return_coordinates = False
dataset = PatchDataset(imgs, labels)
return self._predict_engine(
dataset, return_probabilities, return_labels, return_coordinates, on_gpu
)
def _predict_tile_wsi(
self,
imgs,
masks,
labels,
mode,
return_probabilities,
on_gpu,
ioconfig,
merge_predictions,
save_dir,
save_output,
highest_input_resolution,
):
"""Predict on Tile and WSIs.
Args:
imgs (list, ndarray):
List of inputs to process. when using `patch` mode, the
input must be either a list of images, a list of image
file paths or a numpy array of an image list. When using
`tile` or `wsi` mode, the input must be a list of file
paths.
masks (list):
List of masks. Only utilised when processing image tiles
and whole-slide images. Patches are only processed if
they are within a masked area. If not provided, then a
tissue mask will be automatically generated for
whole-slide images or the entire image is processed for
image tiles.
labels:
List of labels. If using `tile` or `wsi` mode, then only
a single label per image tile or whole-slide image is
supported.
mode (str):
Type of input to process. Choose from either `patch`,
`tile` or `wsi`.
return_probabilities (bool):
Whether to return per-class probabilities.
on_gpu (bool):
Whether to run model on the GPU.
ioconfig (IOPatchPredictorConfig):
Patch Predictor IO configuration..
merge_predictions (bool):
Whether to merge the predictions to form a 2-dimensional
map. This is only applicable for `mode='wsi'` or
`mode='tile'`.
save_dir (str or pathlib.Path):
Output directory when processing multiple tiles and
whole-slide images. By default, it is folder `output`
where the running script is invoked.
save_output (bool):
Whether to save output for a single file. default=False
highest_input_resolution:
Returns:
dict:
Results are saved to `save_dir` and a dictionary indicating save
location for each input is returned. The dict is in the following
format:
- img_path: path of the input image.
- raw: path to save location for raw prediction,
saved in .json.
- merged: path to .npy contain merged
predictions if
`merge_predictions` is `True`.
"""
# return coordinates of patches processed within a tile / whole-slide image
return_coordinates = True
# None if no output
outputs = None
self._ioconfig = ioconfig
# generate a list of output file paths if number of input images > 1
file_dict = OrderedDict()
if len(imgs) > 1:
save_output = True
for idx, img_path in enumerate(imgs):
img_path = pathlib.Path(img_path)
img_label = None if labels is None else labels[idx]
img_mask = None if masks is None else masks[idx]
dataset = WSIPatchDataset(
img_path,
mode=mode,
mask_path=img_mask,
patch_input_shape=ioconfig.patch_input_shape,
stride_shape=ioconfig.stride_shape,
resolution=ioconfig.input_resolutions[0]["resolution"],
units=ioconfig.input_resolutions[0]["units"],
)
output_model = self._predict_engine(
dataset,
return_labels=False,
return_probabilities=return_probabilities,
return_coordinates=return_coordinates,
on_gpu=on_gpu,
)
output_model["label"] = img_label
# add extra information useful for downstream analysis
output_model["pretrained_model"] = self.pretrained_model
output_model["resolution"] = highest_input_resolution["resolution"]
output_model["units"] = highest_input_resolution["units"]
outputs = [output_model] # assign to a list
merged_prediction = None
if merge_predictions:
merged_prediction = self.merge_predictions(
img_path,
output_model,
resolution=output_model["resolution"],
units=output_model["units"],
postproc_func=self.model.postproc,
)
outputs.append(merged_prediction)
if save_output:
# dynamic 0 padding
img_code = f"{idx:0{len(str(len(imgs)))}d}"
save_info = {}
save_path = os.path.join(str(save_dir), img_code)
raw_save_path = f"{save_path}.raw.json"
save_info["raw"] = raw_save_path
save_as_json(output_model, raw_save_path)
if merge_predictions:
merged_file_path = f"{save_path}.merged.npy"
np.save(merged_file_path, merged_prediction)
save_info["merged"] = merged_file_path
file_dict[str(img_path)] = save_info
return file_dict if save_output else outputs
def predict(
self,
imgs,
masks=None,
labels=None,
mode="patch",
return_probabilities=False,
return_labels=False,
on_gpu=True,
ioconfig: IOPatchPredictorConfig = None,
patch_input_shape: Tuple[int, int] = None,
stride_shape: Tuple[int, int] = None,
resolution=None,
units=None,
merge_predictions=False,
save_dir=None,
save_output=False,
):
"""Make a prediction for a list of input data.
Args:
imgs (list, ndarray):
List of inputs to process. when using `patch` mode, the
input must be either a list of images, a list of image
file paths or a numpy array of an image list. When using
`tile` or `wsi` mode, the input must be a list of file
paths.
masks (list):
List of masks. Only utilised when processing image tiles
and whole-slide images. Patches are only processed if
they are within a masked area. If not provided, then a
tissue mask will be automatically generated for
whole-slide images or the entire image is processed for
image tiles.
labels:
List of labels. If using `tile` or `wsi` mode, then only
a single label per image tile or whole-slide image is
supported.
mode (str):
Type of input to process. Choose from either `patch`,
`tile` or `wsi`.
return_probabilities (bool):
Whether to return per-class probabilities.
return_labels (bool):
Whether to return the labels with the predictions.
on_gpu (bool):
Whether to run model on the GPU.
ioconfig (IOPatchPredictorConfig):
Patch Predictor IO configuration.
patch_input_shape (tuple):
Size of patches input to the model. Patches are at
requested read resolution, not with respect to level 0,
and must be positive.
stride_shape (tuple):
Stride using during tile and WSI processing. Stride is
at requested read resolution, not with respect to
level 0, and must be positive. If not provided,
`stride_shape=patch_input_shape`.
resolution (float):
Resolution used for reading the image. Please see
:obj:`WSIReader` for details.
units (str):
Units of resolution used for reading the image. Choose
from either `level`, `power` or `mpp`. Please see
:obj:`WSIReader` for details.
merge_predictions (bool):
Whether to merge the predictions to form a 2-dimensional
map. This is only applicable for `mode='wsi'` or
`mode='tile'`.
save_dir (str or pathlib.Path):
Output directory when processing multiple tiles and
whole-slide images. By default, it is folder `output`
where the running script is invoked.
save_output (bool):
Whether to save output for a single file. default=False
Returns:
(:class:`numpy.ndarray`, dict):
Model predictions of the input dataset. If multiple
image tiles or whole-slide images are provided as input,
or save_output is True, then results are saved to
`save_dir` and a dictionary indicating save location for
each input is returned.
The dict has the following format:
- img_path: path of the input image.
- raw: path to save location for raw prediction,
saved in .json.
- merged: path to .npy contain merged
predictions if `merge_predictions` is `True`.
Examples:
>>> wsis = ['wsi1.svs', 'wsi2.svs']
>>> predictor = PatchPredictor(
... pretrained_model="resnet18-kather100k")
>>> output = predictor.predict(wsis, mode="wsi")
>>> output.keys()
... ['wsi1.svs', 'wsi2.svs']
>>> output['wsi1.svs']
... {'raw': '0.raw.json', 'merged': '0.merged.npy'}
>>> output['wsi2.svs']
... {'raw': '1.raw.json', 'merged': '1.merged.npy'}
"""
if mode not in ["patch", "wsi", "tile"]:
raise ValueError(
f"{mode} is not a valid mode. Use either `patch`, `tile` or `wsi`"
)
if mode == "patch":
return self._predict_patch(
imgs, labels, return_probabilities, return_labels, on_gpu
)
if not isinstance(imgs, list):
raise ValueError(
"Input to `tile` and `wsi` mode must be a list of file paths."
)
if mode == "wsi" and masks is not None and len(masks) != len(imgs):
raise ValueError(
f"len(masks) != len(imgs) : " f"{len(masks)} != {len(imgs)}"
)
ioconfig = self._update_ioconfig(
ioconfig, patch_input_shape, stride_shape, resolution, units
)
if mode == "tile":
logger.warning(
"WSIPatchDataset only reads image tile at "
'`units="baseline"`. Resolutions will be converted '
"to baseline value.",
stacklevel=2,
)
ioconfig = ioconfig.to_baseline()
fx_list = ioconfig.scale_to_highest(
ioconfig.input_resolutions, ioconfig.input_resolutions[0]["units"]
)
fx_list = zip(fx_list, ioconfig.input_resolutions)
fx_list = sorted(fx_list, key=lambda x: x[0])
highest_input_resolution = fx_list[0][1]
save_dir = self._prepare_save_dir(save_dir, imgs)
return self._predict_tile_wsi(
imgs,
masks,
labels,
mode,
return_probabilities,
on_gpu,
ioconfig,
merge_predictions,
save_dir,
save_output,
highest_input_resolution,
)
| 33,563 | 36.376392 | 88 | py |
tiatoolbox | tiatoolbox-master/tiatoolbox/models/engine/semantic_segmentor.py | """This module implements semantic segmentation."""
import copy
import logging
import os
import pathlib
import shutil
from concurrent.futures import ProcessPoolExecutor
from multiprocessing.managers import Namespace
from typing import Callable, List, Tuple, Union
import cv2
import joblib
import numpy as np
import torch
import torch.multiprocessing as torch_mp
import torch.utils.data as torch_data
import tqdm
from tiatoolbox import logger
from tiatoolbox.models.architecture import get_pretrained_model
from tiatoolbox.models.models_abc import IOConfigABC
from tiatoolbox.tools.patchextraction import PatchExtractor
from tiatoolbox.utils import misc
from tiatoolbox.utils.misc import imread
from tiatoolbox.wsicore.wsireader import VirtualWSIReader, WSIMeta, WSIReader
def _estimate_canvas_parameters(sample_prediction, canvas_shape):
"""Estimates canvas parameters.
Args:
sample_prediction (:class:`numpy.ndarray`):
Patch prediction assuming to be of shape HWC.
canvas_shape (:class:`numpy.ndarray`):
HW of the supposed assembled image.
Returns:
(tuple, tuple, bool):
Canvas Shape, Canvas Count and whether to add singleton dimension.
"""
if len(sample_prediction.shape) == 3:
num_output_ch = sample_prediction.shape[-1]
canvas_cum_shape_ = tuple(canvas_shape) + (num_output_ch,)
canvas_count_shape_ = tuple(canvas_shape) + (1,)
add_singleton_dim = num_output_ch == 1
else:
canvas_cum_shape_ = tuple(canvas_shape) + (1,)
canvas_count_shape_ = tuple(canvas_shape) + (1,)
add_singleton_dim = True
return canvas_cum_shape_, canvas_count_shape_, add_singleton_dim
def _prepare_save_output(
save_path, cache_count_path, canvas_cum_shape_, canvas_count_shape_
):
"""Prepares for saving the cached output."""
if save_path is not None:
if os.path.exists(save_path) and os.path.exists(cache_count_path):
cum_canvas = np.load(save_path, mmap_mode="r+")
count_canvas = np.load(cache_count_path, mmap_mode="r+")
if canvas_cum_shape_ != cum_canvas.shape:
raise ValueError("Existing image shape in `save_path` does not match.")
if canvas_count_shape_ != count_canvas.shape:
raise ValueError(
"Existing image shape in `cache_count_path` does not match."
)
else:
cum_canvas = np.lib.format.open_memmap(
save_path,
mode="w+",
shape=canvas_cum_shape_,
dtype=np.float32,
)
# assuming no more than 255 overlapping times
count_canvas = np.lib.format.open_memmap(
cache_count_path,
mode="w+",
shape=canvas_count_shape_,
dtype=np.uint8,
)
# flush fill
count_canvas[:] = 0
is_on_drive = True
else:
is_on_drive = False
cum_canvas = np.zeros(
shape=canvas_cum_shape_,
dtype=np.float32,
)
# for pixel occurrence counting
count_canvas = np.zeros(canvas_count_shape_, dtype=np.float32)
return is_on_drive, count_canvas, cum_canvas
class IOSegmentorConfig(IOConfigABC):
"""Contain semantic segmentor input and output information.
Args:
input_resolutions (list):
Resolution of each input head of model inference, must be in
the same order as `target model.forward()`.
output_resolutions (list):
Resolution of each output head from model inference, must be
in the same order as target model.infer_batch().
patch_input_shape (:class:`numpy.ndarray`, list(int)):
Shape of the largest input in (height, width).
patch_output_shape (:class:`numpy.ndarray`, list(int)):
Shape of the largest output in (height, width).
save_resolution (dict):
Resolution to save all output.
Examples:
>>> # Defining io for a network having 1 input and 1 output at the
>>> # same resolution
>>> ioconfig = IOSegmentorConfig(
... input_resolutions=[{"units": "baseline", "resolution": 1.0}],
... output_resolutions=[{"units": "baseline", "resolution": 1.0}],
... patch_input_shape=[2048, 2048],
... patch_output_shape=[1024, 1024],
... stride_shape=[512, 512],
... )
Examples:
>>> # Defining io for a network having 3 input and 2 output
>>> # at the same resolution, the output is then merged at a
>>> # different resolution.
>>> ioconfig = IOSegmentorConfig(
... input_resolutions=[
... {"units": "mpp", "resolution": 0.25},
... {"units": "mpp", "resolution": 0.50},
... {"units": "mpp", "resolution": 0.75},
... ],
... output_resolutions=[
... {"units": "mpp", "resolution": 0.25},
... {"units": "mpp", "resolution": 0.50},
... ],
... patch_input_shape=[2048, 2048],
... patch_output_shape=[1024, 1024],
... stride_shape=[512, 512],
... save_resolution={"units": "mpp", "resolution": 4.0},
... )
"""
# We pre-define to follow enforcement, actual initialisation in init
input_resolutions = None
output_resolutions = None
def __init__(
self,
input_resolutions: List[dict],
output_resolutions: List[dict],
patch_input_shape: Union[List[int], np.ndarray],
patch_output_shape: Union[List[int], np.ndarray],
save_resolution: dict = None,
**kwargs,
):
self._kwargs = kwargs
self.patch_input_shape = patch_input_shape
self.patch_output_shape = patch_output_shape
self.stride_shape = None
self.input_resolutions = input_resolutions
self.output_resolutions = output_resolutions
self.resolution_unit = input_resolutions[0]["units"]
self.save_resolution = save_resolution
for variable, value in kwargs.items():
self.__setattr__(variable, value)
self._validate()
if self.resolution_unit == "mpp":
self.highest_input_resolution = min(
self.input_resolutions, key=lambda x: x["resolution"]
)
else:
self.highest_input_resolution = max(
self.input_resolutions, key=lambda x: x["resolution"]
)
def _validate(self):
"""Validate the data format."""
resolutions = self.input_resolutions + self.output_resolutions
units = [v["units"] for v in resolutions]
units = np.unique(units)
if len(units) != 1 or units[0] not in [
"power",
"baseline",
"mpp",
]:
raise ValueError(f"Invalid resolution units `{units[0]}`.")
@staticmethod
def scale_to_highest(resolutions: List[dict], units: str):
"""Get the scaling factor from input resolutions.
This will convert resolutions to a scaling factor with respect to
the highest resolution found in the input resolutions list.
Args:
resolutions (list):
A list of resolutions where one is defined as
`{'resolution': value, 'unit': value}`
units (str):
Units that the resolutions are at.
Returns:
:class:`numpy.ndarray`:
A 1D array of scaling factors having the same length as
`resolutions`
"""
old_val = [v["resolution"] for v in resolutions]
if units not in ["baseline", "mpp", "power"]:
raise ValueError(
f"Unknown units `{units}`. "
"Units should be one of 'baseline', 'mpp' or 'power'."
)
if units == "baseline":
return old_val
if units == "mpp":
return np.min(old_val) / np.array(old_val)
return np.array(old_val) / np.max(old_val)
def to_baseline(self):
"""Return a new config object converted to baseline form.
This will return a new :class:`IOSegmentorConfig` where
resolutions have been converted to baseline format with the
highest possible resolution found in both input and output as
reference.
"""
resolutions = self.input_resolutions + self.output_resolutions
if self.save_resolution is not None:
resolutions.append(self.save_resolution)
scale_factors = self.scale_to_highest(resolutions, self.resolution_unit)
num_input_resolutions = len(self.input_resolutions)
num_output_resolutions = len(self.output_resolutions)
end_idx = num_input_resolutions
input_resolutions = [
{"units": "baseline", "resolution": v} for v in scale_factors[:end_idx]
]
end_idx = num_input_resolutions + num_output_resolutions
output_resolutions = [
{"units": "baseline", "resolution": v}
for v in scale_factors[num_input_resolutions:end_idx]
]
save_resolution = None
if self.save_resolution is not None:
save_resolution = {"units": "baseline", "resolution": scale_factors[-1]}
return IOSegmentorConfig(
input_resolutions=input_resolutions,
output_resolutions=output_resolutions,
patch_input_shape=self.patch_input_shape,
patch_output_shape=self.patch_output_shape,
save_resolution=save_resolution,
**self._kwargs,
)
class WSIStreamDataset(torch_data.Dataset):
"""Reading a wsi in parallel mode with persistent workers.
To speed up the inference process for multiple WSIs. The
`torch.utils.data.Dataloader` is set to run in persistent mode.
Normally, this will prevent workers from altering their initial
states (such as provided input etc.). To sidestep this, we use a
shared parallel workspace context manager to send data and signal
from the main thread, thus allowing each worker to load a new wsi as
well as corresponding patch information.
Args:
mp_shared_space (:class:`Namespace`):
A shared multiprocessing space, must be from
`torch.multiprocessing`.
ioconfig (:class:`IOSegmentorConfig`):
An object which contains I/O placement for patches.
wsi_paths (list): List of paths pointing to a WSI or tiles.
preproc (Callable):
Pre-processing function to be applied to a patch.
mode (str):
Either `"wsi"` or `"tile"` to indicate the format of images
in `wsi_paths`.
Examples:
>>> ioconfig = IOSegmentorConfig(
... input_resolutions=[{"units": "baseline", "resolution": 1.0}],
... output_resolutions=[{"units": "baseline", "resolution": 1.0}],
... patch_input_shape=[2048, 2048],
... patch_output_shape=[1024, 1024],
... stride_shape=[512, 512],
... )
>>> mp_manager = torch_mp.Manager()
>>> mp_shared_space = mp_manager.Namespace()
>>> mp_shared_space.signal = 1 # adding variable to the shared space
>>> wsi_paths = ['A.svs', 'B.svs']
>>> wsi_dataset = WSIStreamDataset(ioconfig, wsi_paths, mp_shared_space)
"""
def __init__(
self,
ioconfig: IOSegmentorConfig,
wsi_paths: List[Union[str, pathlib.Path]],
mp_shared_space: Namespace,
preproc: Callable[[np.ndarray], np.ndarray] = None,
mode="wsi",
):
super().__init__()
self.mode = mode
self.preproc = preproc
self.ioconfig = copy.deepcopy(ioconfig)
if mode == "tile":
logger.warning(
"WSIPatchDataset only reads image tile at "
'`units="baseline"`. Resolutions will be converted '
"to baseline value.",
stacklevel=2,
)
self.ioconfig = self.ioconfig.to_baseline()
self.mp_shared_space = mp_shared_space
self.wsi_paths = wsi_paths
self.wsi_idx = None # to be received externally via thread communication
self.reader = None
def _get_reader(self, img_path):
"""Get appropriate reader for input path."""
img_path = pathlib.Path(img_path)
if self.mode == "wsi":
return WSIReader.open(img_path)
img = imread(img_path)
# initialise metadata for VirtualWSIReader.
# here, we simulate a whole-slide image, but with a single level.
metadata = WSIMeta(
mpp=np.array([1.0, 1.0]),
objective_power=10,
axes="YXS",
slide_dimensions=np.array(img.shape[:2][::-1]),
level_downsamples=[1.0],
level_dimensions=[np.array(img.shape[:2][::-1])],
)
return VirtualWSIReader(
img,
info=metadata,
)
def __len__(self):
return len(self.mp_shared_space.patch_inputs)
@staticmethod
def collate_fn(batch):
"""Prototype to handle reading exception.
This will exclude any sample with `None` from the batch. As
such, wrapping `__getitem__` with try-catch and return `None`
upon exceptions will prevent crashing the entire program. But as
a side effect, the batch may not have the size as defined.
"""
batch = [v for v in batch if v is not None]
return torch.utils.data.dataloader.default_collate(batch)
def __getitem__(self, idx: int):
# ! no need to lock as we do not modify source value in shared space
if self.wsi_idx != self.mp_shared_space.wsi_idx:
self.wsi_idx = int(self.mp_shared_space.wsi_idx.item())
self.reader = self._get_reader(self.wsi_paths[self.wsi_idx])
# this is in XY and at requested resolution (not baseline)
bounds = self.mp_shared_space.patch_inputs[idx]
bounds = bounds.numpy() # expected to be a torch.Tensor
# be the same as bounds br-tl, unless bounds are of float
patch_data_ = []
scale_factors = self.ioconfig.scale_to_highest(
self.ioconfig.input_resolutions, self.ioconfig.resolution_unit
)
for idy, resolution in enumerate(self.ioconfig.input_resolutions):
resolution_bounds = np.round(bounds * scale_factors[idy])
patch_data = self.reader.read_bounds(
resolution_bounds.astype(np.int32),
coord_space="resolution",
pad_constant_values=0, # expose this ?
**resolution,
)
if self.preproc is not None:
patch_data = patch_data.copy()
patch_data = self.preproc(patch_data)
patch_data_.append(patch_data)
if len(patch_data_) == 1:
patch_data_ = patch_data_[0]
bound = self.mp_shared_space.patch_outputs[idx]
return patch_data_, bound
class SemanticSegmentor:
"""Pixel-wise segmentation predictor.
The tiatoolbox model should produce the following results on the BCSS dataset
using fcn_resnet50_unet-bcss.
.. list-table:: Semantic segmentation performance on the BCSS dataset
:widths: 15 15 15 15 15 15 15
:header-rows: 1
* -
- Tumour
- Stroma
- Inflammatory
- Necrosis
- Other
- All
* - Amgad et al.
- 0.851
- 0.800
- 0.712
- 0.723
- 0.666
- 0.750
* - TIAToolbox
- 0.885
- 0.825
- 0.761
- 0.765
- 0.581
- 0.763
Note, if `model` is supplied in the arguments, it will ignore the
`pretrained_model` and `pretrained_weights` arguments.
Args:
model (nn.Module):
Use externally defined PyTorch model for prediction with
weights already loaded. Default is `None`. If provided,
`pretrained_model` argument is ignored.
pretrained_model (str):
Name of the existing models support by tiatoolbox for
processing the data. For a full list of pretrained models,
refer to the `docs
<https://tia-toolbox.readthedocs.io/en/latest/pretrained.html>`_.
By default, the corresponding pretrained weights will also
be downloaded. However, you can override with your own set
of weights via the `pretrained_weights` argument. Argument
is case-insensitive.
pretrained_weights (str):
Path to the weight of the corresponding `pretrained_model`.
batch_size (int):
Number of images fed into the model each time.
num_loader_workers (int):
Number of workers to load the data. Take note that they will
also perform preprocessing.
num_postproc_workers (int):
This value is there to maintain input compatibility with
`tiatoolbox.models.classification` and is not used.
verbose (bool):
Whether to output logging information.
dataset_class (obj):
Dataset class to be used instead of default.
auto_generate_mask (bool):
To automatically generate tile/WSI tissue mask if is not
provided.
Attributes:
process_prediction_per_batch (bool):
A flag to denote whether post-processing for inference
output is applied after each batch or after finishing an entire
tile or WSI.
Examples:
>>> # Sample output of a network
>>> wsis = ['A/wsi.svs', 'B/wsi.svs']
>>> predictor = SemanticSegmentor(model='fcn-tissue_mask')
>>> output = predictor.predict(wsis, mode='wsi')
>>> list(output.keys())
[('A/wsi.svs', 'output/0.raw') , ('B/wsi.svs', 'output/1.raw')]
>>> # if a network have 2 output heads, each head output of 'A/wsi.svs'
>>> # will be respectively stored in 'output/0.raw.0', 'output/0.raw.1'
"""
def __init__(
self,
batch_size: int = 8,
num_loader_workers: int = 0,
num_postproc_workers: int = 0, # skipcq: PYL-W0613
model: torch.nn.Module = None,
pretrained_model: str = None,
pretrained_weights: str = None,
verbose: bool = True,
auto_generate_mask: bool = False,
dataset_class: Callable = WSIStreamDataset,
):
super().__init__()
if model is None and pretrained_model is None:
raise ValueError("Must provide either of `model` or `pretrained_model`")
if model is not None:
self.model = model
# template ioconfig, usually coming from pretrained
self.ioconfig = None
else:
model, ioconfig = get_pretrained_model(pretrained_model, pretrained_weights)
self.ioconfig = ioconfig
self.model = model
# local variables for flagging mode within class,
# subclass should have overwritten to alter some specific behavior
self.process_prediction_per_batch = True
# for runtime, such as after wrapping with nn.DataParallel
self._cache_dir = None
self._loader = None
self._model = None
self._on_gpu = None
self._mp_shared_space = None
self._postproc_workers = None
self._futures = None
self._outputs = []
self.imgs = None
self.masks = None
self.dataset_class: WSIStreamDataset = dataset_class
self.model = model # original copy
self.pretrained_model = pretrained_model
self.batch_size = batch_size
self.num_loader_workers = num_loader_workers
self.num_postproc_workers = None
self.verbose = verbose
self.auto_generate_mask = auto_generate_mask
@staticmethod
def get_coordinates(
image_shape: Union[List[int], np.ndarray], ioconfig: IOSegmentorConfig
):
"""Calculate patch tiling coordinates.
By default, internally, it will call the
`PatchExtractor.get_coordinates`. To use your own approach,
either subclass to overwrite or directly assign your own
function to this name. In either cases, the function must obey
the API defined here.
Args:
image_shape (tuple(int), :class:`numpy.ndarray`):
This argument specifies the shape of mother image (the
image we want to extract patches from) at requested
`resolution` and `units` and it is expected to be in
(width, height) format.
ioconfig (:class:`IOSegmentorConfig`):
Object that contains information about input and output
placement of patches. Check `IOSegmentorConfig` for
details about available attributes.
Returns:
tuple:
List of patch inputs and outputs
- :py:obj:`list` - patch_inputs:
A list of corrdinates in `[start_x, start_y, end_x,
end_y]` format indicating the read location of the
patch in the mother image.
- :py:obj:`list` - patch_outputs:
A list of corrdinates in `[start_x, start_y, end_x,
end_y]` format indicating to write location of the
patch in the mother image.
Examples:
>>> # API of function expected to overwrite `get_coordinates`
>>> def func(image_shape, ioconfig):
... patch_inputs = np.array([[0, 0, 256, 256]])
... patch_outputs = np.array([[0, 0, 256, 256]])
... return patch_inputs, patch_outputs
>>> segmentor = SemanticSegmentor(model='unet')
>>> segmentor.get_coordinates = func
"""
(patch_inputs, patch_outputs) = PatchExtractor.get_coordinates(
image_shape=image_shape,
patch_input_shape=ioconfig.patch_input_shape,
patch_output_shape=ioconfig.patch_output_shape,
stride_shape=ioconfig.stride_shape,
)
return patch_inputs, patch_outputs
@staticmethod
def filter_coordinates(
mask_reader: VirtualWSIReader,
bounds: np.ndarray,
resolution: Union[float, int] = None,
units: str = None,
):
"""
Indicates which coordinate is valid basing on the mask.
To use your own approaches, either subclass to overwrite or
directly assign your own function to this name. In either cases,
the function must obey the API defined here.
Args:
mask_reader (:class:`.VirtualReader`):
A virtual pyramidal reader of the mask related to the
WSI from which we want to extract the patches.
bounds (ndarray and np.int32):
Coordinates to be checked via the `func`. They must be
in the same resolution as requested `resolution` and
`units`. The shape of `coordinates` is (N, K) where N is
the number of coordinate sets and K is either 2 for
centroids or 4 for bounding boxes. When using the
default `func=None`, K should be 4, as we expect the
`coordinates` to be bounding boxes in `[start_x,
start_y, end_x, end_y]` format.
Returns:
:class:`numpy.ndarray`:
List of flags to indicate which coordinate is valid.
Examples:
>>> # API of function expected to overwrite `filter_coordinates`
>>> def func(reader, bounds, resolution, units):
... # as example, only select first bound
... return np.array([1, 0])
>>> coords = [[0, 0, 256, 256], [128, 128, 384, 384]]
>>> segmentor = SemanticSegmentor(model='unet')
>>> segmentor.filter_coordinates = func
"""
if not isinstance(mask_reader, VirtualWSIReader):
raise ValueError("`mask_reader` should be VirtualWSIReader.")
if not isinstance(bounds, np.ndarray) or not np.issubdtype(
bounds.dtype, np.integer
):
raise ValueError("`coordinates` should be ndarray of integer type.")
mask_real_shape = mask_reader.img.shape[:2]
mask_resolution_shape = mask_reader.slide_dimensions(
resolution=resolution, units=units
)[::-1]
mask_real_shape = np.array(mask_real_shape)
mask_resolution_shape = np.array(mask_resolution_shape)
scale_factor = mask_real_shape / mask_resolution_shape
scale_factor = scale_factor[0] # what if ratio x != y
def sel_func(coord: np.ndarray):
"""Accept coord as long as its box contains part of mask."""
coord_in_real_mask = np.ceil(scale_factor * coord).astype(np.int32)
start_x, start_y, end_x, end_y = coord_in_real_mask
roi = mask_reader.img[start_y:end_y, start_x:end_x]
return np.sum(roi > 0) > 0
flags = [sel_func(bound) for bound in bounds]
return np.array(flags)
@staticmethod
def get_reader(img_path: str, mask_path: str, mode: str, auto_get_mask: bool):
"""Define how to get reader for mask and source image."""
img_path = pathlib.Path(img_path)
reader = WSIReader.open(img_path)
mask_reader = None
if mask_path is not None:
if not os.path.isfile(mask_path):
raise ValueError("`mask_path` must be a valid file path.")
mask = imread(mask_path) # assume to be gray
mask = cv2.cvtColor(mask, cv2.COLOR_RGB2GRAY)
mask = np.array(mask > 0, dtype=np.uint8)
mask_reader = VirtualWSIReader(mask)
mask_reader.info = reader.info
elif auto_get_mask and mode == "wsi" and mask_path is None:
# if no mask provided and `wsi` mode, generate basic tissue
# mask on the fly
mask_reader = reader.tissue_mask(resolution=1.25, units="power")
mask_reader.info = reader.info
return reader, mask_reader
def _predict_one_wsi(
self,
wsi_idx: int,
ioconfig: IOSegmentorConfig,
save_path: str,
mode: str,
):
"""Make a prediction on tile/wsi.
Args:
wsi_idx (int):
Index of the tile/wsi to be processed within `self`.
ioconfig (:class:`IOSegmentorConfig`):
Object which defines I/O placement during inference and
when assembling back to full tile/wsi.
save_path (str):
Location to save output prediction as well as possible
intermediate results.
mode (str):
Either `"tile"` or `"wsi"` to indicate run mode.
"""
cache_dir = f"{self._cache_dir}/{wsi_idx}/"
os.makedirs(cache_dir)
wsi_path = self.imgs[wsi_idx]
mask_path = None if self.masks is None else self.masks[wsi_idx]
wsi_reader, mask_reader = self.get_reader(
wsi_path, mask_path, mode, self.auto_generate_mask
)
# assume ioconfig has already been converted to `baseline` for `tile` mode
resolution = ioconfig.highest_input_resolution
wsi_proc_shape = wsi_reader.slide_dimensions(**resolution)
# * retrieve patch and tile placement
# this is in XY
(patch_inputs, patch_outputs) = self.get_coordinates(wsi_proc_shape, ioconfig)
if mask_reader is not None:
sel = self.filter_coordinates(mask_reader, patch_outputs, **resolution)
patch_outputs = patch_outputs[sel]
patch_inputs = patch_inputs[sel]
# modify the shared space so that we can update worker info
# without needing to re-create the worker. There should be no
# race-condition because only the following enumerate loop
# triggers the parallelism, and this portion is still in
# sequential execution order
patch_inputs = torch.from_numpy(patch_inputs).share_memory_()
patch_outputs = torch.from_numpy(patch_outputs).share_memory_()
self._mp_shared_space.patch_inputs = patch_inputs
self._mp_shared_space.patch_outputs = patch_outputs
self._mp_shared_space.wsi_idx = torch.Tensor([wsi_idx]).share_memory_()
pbar_desc = "Process Batch: "
pbar = tqdm.tqdm(
desc=pbar_desc,
leave=True,
total=int(len(self._loader)),
ncols=80,
ascii=True,
position=0,
)
cum_output = []
for _, batch_data in enumerate(self._loader):
sample_datas, sample_infos = batch_data
batch_size = sample_infos.shape[0]
# ! depending on the protocol of the output within infer_batch
# ! this may change, how to enforce/document/expose this in a
# ! sensible way?
# assume to return a list of L output,
# each of shape N x etc. (N=batch size)
sample_outputs = self.model.infer_batch(
self._model,
sample_datas,
self._on_gpu,
)
# repackage so that it's an N list, each contains
# L x etc. output
sample_outputs = [np.split(v, batch_size, axis=0) for v in sample_outputs]
sample_outputs = list(zip(*sample_outputs))
# tensor to numpy, costly?
sample_infos = sample_infos.numpy()
sample_infos = np.split(sample_infos, batch_size, axis=0)
sample_outputs = list(zip(sample_infos, sample_outputs))
if self.process_prediction_per_batch:
self._process_predictions(
sample_outputs, wsi_reader, ioconfig, save_path, cache_dir
)
else:
cum_output.extend(sample_outputs)
pbar.update()
pbar.close()
self._process_predictions(
cum_output, wsi_reader, ioconfig, save_path, cache_dir
)
# clean up the cache directories
shutil.rmtree(cache_dir)
def _process_predictions(
self,
cum_batch_predictions: List,
wsi_reader: WSIReader,
ioconfig: IOSegmentorConfig,
save_path: str,
cache_dir: str,
):
"""Define how the aggregated predictions are processed.
This includes merging the prediction if necessary and also saving afterwards.
Note that items within `cum_batch_predictions` will be consumed during
the operation.
Args:
cum_batch_predictions (list):
List of batch predictions. Each item within the list
should be of (location, patch_predictions).
wsi_reader (:class:`WSIReader`):
A reader for the image where the predictions come from.
ioconfig (:class:`IOSegmentorConfig`):
A configuration object contains input and output
information.
save_path (str):
Root path to save current WSI predictions.
cache_dir (str):
Root path to cache current WSI data.
"""
if len(cum_batch_predictions) == 0:
return
# assume predictions is N, each item has L output element
locations, predictions = list(zip(*cum_batch_predictions))
# Nx4 (N x [tl_x, tl_y, br_x, br_y), denotes the location of
# output patch this can exceed the image bound at the requested
# resolution remove singleton due to split.
locations = np.array([v[0] for v in locations])
for index, output_resolution in enumerate(ioconfig.output_resolutions):
# assume resolution index to be in the same order as L
merged_resolution = ioconfig.highest_input_resolution
merged_locations = locations
# ! location is w.r.t the highest resolution, hence still need conversion
if ioconfig.save_resolution is not None:
merged_resolution = ioconfig.save_resolution
output_shape = wsi_reader.slide_dimensions(**output_resolution)
merged_shape = wsi_reader.slide_dimensions(**merged_resolution)
fx = merged_shape[0] / output_shape[0]
merged_locations = np.ceil(locations * fx).astype(np.int64)
merged_shape = wsi_reader.slide_dimensions(**merged_resolution)
# 0 idx is to remove singleton without removing other axes singleton
to_merge_predictions = [v[index][0] for v in predictions]
sub_save_path = f"{save_path}.raw.{index}.npy"
sub_count_path = f"{cache_dir}/count.{index}.npy"
self.merge_prediction(
merged_shape[::-1], # XY to YX
to_merge_predictions,
merged_locations,
save_path=sub_save_path,
cache_count_path=sub_count_path,
)
@staticmethod
def merge_prediction(
canvas_shape: Union[Tuple[int], List[int], np.ndarray],
predictions: List[np.ndarray],
locations: Union[List, np.ndarray],
save_path: Union[str, pathlib.Path] = None,
cache_count_path: Union[str, pathlib.Path] = None,
):
"""Merge patch-level predictions to form a 2-dimensional prediction map.
When accumulating the raw prediction onto a same canvas (via
calling the function multiple times), `save_path` and
`cache_count_path` must be the same. If either of these two do
not exist, the function will create new files. However, if
`save_path` is `None`, the function will perform the
accumulation using CPU-RAM as storage.
Args:
canvas_shape (:class:`numpy.ndarray`):
HW of the supposed assembled image.
predictions (list):
List of :class:`np.ndarray`, each item is a patch prediction,
assuming to be of shape HWC.
locations (list):
List of :class:`np.ndarray`, each item is the location of the patch
at the same index within `predictions`. The location is
in the to be assembled canvas and of the form
`(top_left_x, top_left_y, bottom_right_x,
bottom_right_x)`.
save_path (str):
Location to save the assembled image.
cache_count_path (str):
Location to store the canvas for counting how many times
each pixel get overlapped when assembling.
Returns:
:class:`numpy.ndarray`:
An image contains merged data.
Examples:
>>> SemanticSegmentor.merge_prediction(
... canvas_shape=[4, 4],
... predictions=[
... np.full((2, 2), 1),
... np.full((2, 2), 2)],
... locations=[
... [0, 0, 2, 2],
... [2, 2, 4, 4]],
... save_path=None,
... )
... array([[1, 1, 0, 0],
... [1, 1, 0, 0],
... [0, 0, 2, 2],
... [0, 0, 2, 2]])
"""
canvas_shape = np.array(canvas_shape)
sample_prediction = predictions[0]
if len(sample_prediction.shape) not in (2, 3):
raise ValueError(f"Prediction is no HW or HWC: {sample_prediction.shape}.")
(
canvas_cum_shape_,
canvas_count_shape_,
add_singleton_dim,
) = _estimate_canvas_parameters(sample_prediction, canvas_shape)
is_on_drive, count_canvas, cum_canvas = _prepare_save_output(
save_path, cache_count_path, canvas_cum_shape_, canvas_count_shape_
)
def index(arr, tl, br):
"""Helper to shorten indexing."""
return arr[tl[0] : br[0], tl[1] : br[1]]
patch_infos = list(zip(locations, predictions))
for _, patch_info in enumerate(patch_infos):
# position is assumed to be in XY coordinate
(bound_in_wsi, prediction) = patch_info
# convert to XY to YX, and in tl, br
tl_in_wsi = np.array(bound_in_wsi[:2][::-1])
br_in_wsi = np.array(bound_in_wsi[2:][::-1])
old_tl_in_wsi = tl_in_wsi.copy()
# need to do conversion
patch_shape_in_wsi = tuple(br_in_wsi - tl_in_wsi)
# conversion to make cv2 happy
prediction = prediction.astype(np.float32)
prediction = cv2.resize(prediction, patch_shape_in_wsi[::-1])
# ! cv2 resize will remove singleton !
if add_singleton_dim:
prediction = prediction[..., None]
sel = tl_in_wsi < 0
tl_in_wsi[sel] = 0
if np.any(tl_in_wsi >= canvas_shape):
continue
sel = br_in_wsi > canvas_shape
br_in_wsi[sel] = canvas_shape[sel]
# re-calibrate the position in case patch passing the image bound
br_in_patch = br_in_wsi - old_tl_in_wsi
patch_actual_shape = br_in_wsi - tl_in_wsi
tl_in_patch = br_in_patch - patch_actual_shape
# now cropping the prediction region
patch_pred = prediction[
tl_in_patch[0] : br_in_patch[0], tl_in_patch[1] : br_in_patch[1]
]
patch_count = np.ones(patch_pred.shape[:2])[..., None]
if not is_on_drive:
index(cum_canvas, tl_in_wsi, br_in_wsi)[:] += patch_pred
index(count_canvas, tl_in_wsi, br_in_wsi)[:] += patch_count
else:
old_avg_pred = np.array(index(cum_canvas, tl_in_wsi, br_in_wsi))
old_count = np.array(index(count_canvas, tl_in_wsi, br_in_wsi))
# ! there will be precision error, but we have to live with this
new_count = old_count + patch_count
# retrieve old raw probabilities after summation
old_raw_pred = old_avg_pred * old_count
new_avg_pred = (old_raw_pred + patch_pred) / new_count
index(cum_canvas, tl_in_wsi, br_in_wsi)[:] = new_avg_pred
index(count_canvas, tl_in_wsi, br_in_wsi)[:] = new_count
if not is_on_drive:
cum_canvas /= count_canvas + 1.0e-6
return cum_canvas
@staticmethod
def _prepare_save_dir(save_dir):
"""Prepare save directory and cache."""
if save_dir is None:
logger.warning(
"Segmentor will only output to directory. "
"All subsequent output will be saved to current runtime "
"location under folder 'output'. Overwriting may happen! ",
stacklevel=2,
)
save_dir = os.path.join(os.getcwd(), "output")
save_dir = os.path.abspath(save_dir)
save_dir = pathlib.Path(save_dir)
if save_dir.is_dir():
raise ValueError(f"`save_dir` already exists! {save_dir}")
save_dir.mkdir(parents=True)
cache_dir = f"{save_dir}/cache"
os.makedirs(cache_dir)
return save_dir, cache_dir
def _update_ioconfig(
self,
ioconfig,
mode,
patch_input_shape,
patch_output_shape,
stride_shape,
resolution,
units,
):
"""Update ioconfig according to input parameters.
Args:
ioconfig (:class:`IOSegmentorConfig`):
Object defines information about input and output
placement of patches. When provided,
`patch_input_shape`, `patch_output_shape`,
`stride_shape`, `resolution`, and `units` arguments are
ignored. Otherwise, those arguments will be internally
converted to a :class:`IOSegmentorConfig` object.
mode (str):
Type of input to process. Choose from either `tile` or
`wsi`.
patch_input_shape (tuple):
Size of patches input to the model. The values
are at requested read resolution and must be positive.
patch_output_shape (tuple):
Size of patches output by the model. The values are at
the requested read resolution and must be positive.
stride_shape (tuple):
Stride using during tile and WSI processing. The values
are at requested read resolution and must be positive.
If not provided, `stride_shape=patch_input_shape` is
used.
resolution (float):
Resolution used for reading the image.
units (str):
Units of resolution used for reading the image. Choose
from either `"level"`, `"power"` or `"mpp"`.
Returns:
:class:`IOSegmentorConfig`:
Updated ioconfig.
"""
if patch_output_shape is None:
patch_output_shape = patch_input_shape
if stride_shape is None:
stride_shape = patch_output_shape
if ioconfig is None and patch_input_shape is None:
if self.ioconfig is None:
raise ValueError(
"Must provide either `ioconfig` or "
"`patch_input_shape` and `patch_output_shape`"
)
ioconfig = copy.deepcopy(self.ioconfig)
elif ioconfig is None:
ioconfig = IOSegmentorConfig(
input_resolutions=[{"resolution": resolution, "units": units}],
output_resolutions=[{"resolution": resolution, "units": units}],
patch_input_shape=patch_input_shape,
patch_output_shape=patch_output_shape,
stride_shape=stride_shape,
)
if mode == "tile":
logger.warning(
"WSIPatchDataset only reads image tile at "
'`units="baseline"`. Resolutions will be converted '
"to baseline value.",
stacklevel=2,
)
return ioconfig.to_baseline()
return ioconfig
def _prepare_workers(self):
"""Prepare number of workers."""
self._postproc_workers = None
if self.num_postproc_workers is not None:
self._postproc_workers = ProcessPoolExecutor(
max_workers=self.num_postproc_workers
)
def _memory_cleanup(self):
"""Memory clean up."""
self.imgs = None
self.masks = None
self._cache_dir = None
self._model = None
self._loader = None
self._on_gpu = None
self._futures = None
self._mp_shared_space = None
if self._postproc_workers is not None:
self._postproc_workers.shutdown()
self._postproc_workers = None
def _predict_wsi_handle_exception(
self, imgs, wsi_idx, img_path, mode, ioconfig, save_dir, crash_on_exception
):
"""Predict on multiple WSIs.
Args:
imgs (list, ndarray):
List of inputs to process. When using `"patch"` mode,
the input must be either a list of images, a list of
image file paths or a numpy array of an image list. When
using `"tile"` or `"wsi"` mode, the input must be a list
of file paths.
wsi_idx (int):
index of current WSI being processed.
img_path(str):
Path to current image.
mode (str):
Type of input to process. Choose from either `tile` or
`wsi`.
ioconfig (:class:`IOSegmentorConfig`):
Object defines information about input and output
placement of patches. When provided,
`patch_input_shape`, `patch_output_shape`,
`stride_shape`, `resolution`, and `units` arguments are
ignored. Otherwise, those arguments will be internally
converted to a :class:`IOSegmentorConfig` object.
save_dir (str or pathlib.Path):
Output directory when processing multiple tiles and
whole-slide images. By default, it is folder `output`
where the running script is invoked.
crash_on_exception (bool):
If `True`, the running loop will crash if there is any
error during processing a WSI. Otherwise, the loop will
move on to the next wsi for processing.
Returns:
list:
A list of tuple(input_path, save_path) where
`input_path` is the path of the input wsi while
`save_path` corresponds to the output predictions.
"""
try:
wsi_save_path = save_dir.joinpath(f"{wsi_idx}")
self._predict_one_wsi(wsi_idx, ioconfig, str(wsi_save_path), mode)
# Do not use dict with file name as key, because it can be
# overwritten. It may be user intention to provide files with a
# same name multiple times (maybe they have different root path)
self._outputs.append([str(img_path), str(wsi_save_path)])
# ? will this corrupt old version if control + c midway?
map_file_path = os.path.join(save_dir, "file_map.dat")
# backup old version first
if os.path.exists(map_file_path):
old_map_file_path = os.path.join(save_dir, "file_map_old.dat")
shutil.copy(map_file_path, old_map_file_path)
joblib.dump(self._outputs, map_file_path)
# verbose mode, error by passing ?
logging.info("Finish: %d", wsi_idx / len(imgs))
logging.info("--Input: %s", str(img_path))
logging.info("--Output: %s", str(wsi_save_path))
# prevent deep source check because this is bypass and
# delegating error message
except Exception as err: # noqa: PIE786 # skipcq: PYL-W0703
wsi_save_path = save_dir.joinpath(f"{wsi_idx}")
if crash_on_exception:
raise err
logging.error("Crashed on %s", wsi_save_path)
def predict(
self,
imgs,
masks=None,
mode="tile",
on_gpu=True,
ioconfig=None,
patch_input_shape=None,
patch_output_shape=None,
stride_shape=None,
resolution=1.0,
units="baseline",
save_dir=None,
crash_on_exception=False,
):
"""Make a prediction for a list of input data.
By default, if the input model at the object instantiation time
is a pretrained model in the toolbox as well as
`patch_input_shape`, `patch_output_shape`, `stride_shape`,
`resolution`, `units` and `ioconfig` are `None`. The method will
use the `ioconfig` retrieved together with the pretrained model.
Otherwise, either `patch_input_shape`, `patch_output_shape`,
`stride_shape`, `resolution`, `units` or `ioconfig` must be set
else a `Value Error` will be raised.
Args:
imgs (list, ndarray):
List of inputs to process. When using `"patch"` mode,
the input must be either a list of images, a list of
image file paths or a numpy array of an image list. When
using `"tile"` or `"wsi"` mode, the input must be a list
of file paths.
masks (list):
List of masks. Only utilised when processing image tiles
and whole-slide images. Patches are only processed if
they are within a masked area. If not provided, then a
tissue mask will be automatically generated for
whole-slide images or the entire image is processed for
image tiles.
mode (str):
Type of input to process. Choose from either `tile` or
`wsi`.
ioconfig (:class:`IOSegmentorConfig`):
Object defines information about input and output
placement of patches. When provided,
`patch_input_shape`, `patch_output_shape`,
`stride_shape`, `resolution`, and `units` arguments are
ignored. Otherwise, those arguments will be internally
converted to a :class:`IOSegmentorConfig` object.
on_gpu (bool):
Whether to run the model on the GPU.
patch_input_shape (tuple):
Size of patches input to the model. The values
are at requested read resolution and must be positive.
patch_output_shape (tuple):
Size of patches output by the model. The values are at
the requested read resolution and must be positive.
stride_shape (tuple):
Stride using during tile and WSI processing. The values
are at requested read resolution and must be positive.
If not provided, `stride_shape=patch_input_shape` is
used.
resolution (float):
Resolution used for reading the image.
units (str):
Units of resolution used for reading the image. Choose
from either `"level"`, `"power"` or `"mpp"`.
save_dir (str or pathlib.Path):
Output directory when processing multiple tiles and
whole-slide images. By default, it is folder `output`
where the running script is invoked.
crash_on_exception (bool):
If `True`, the running loop will crash if there is any
error during processing a WSI. Otherwise, the loop will
move on to the next wsi for processing.
Returns:
list:
A list of tuple(input_path, save_path) where
`input_path` is the path of the input wsi while
`save_path` corresponds to the output predictions.
Examples:
>>> # Sample output of a network
>>> wsis = ['A/wsi.svs', 'B/wsi.svs']
>>> predictor = SemanticSegmentor(model='fcn-tissue_mask')
>>> output = predictor.predict(wsis, mode='wsi')
>>> list(output.keys())
[('A/wsi.svs', 'output/0.raw') , ('B/wsi.svs', 'output/1.raw')]
>>> # if a network have 2 output heads, each head output of 'A/wsi.svs'
>>> # will be respectively stored in 'output/0.raw.0', 'output/0.raw.1'
"""
if mode not in ["wsi", "tile"]:
raise ValueError(f"{mode} is not a valid mode. Use either `tile` or `wsi`.")
save_dir, self._cache_dir = self._prepare_save_dir(save_dir)
ioconfig = self._update_ioconfig(
ioconfig,
mode,
patch_input_shape,
patch_output_shape,
stride_shape,
resolution,
units,
)
# use external for testing
self._on_gpu = on_gpu
self._model = misc.model_to(on_gpu, self.model)
# workers should be > 0 else Value Error will be thrown
self._prepare_workers()
mp_manager = torch_mp.Manager()
mp_shared_space = mp_manager.Namespace()
self._mp_shared_space = mp_shared_space
ds = self.dataset_class(
ioconfig=ioconfig,
preproc=self.model.preproc_func,
wsi_paths=imgs,
mp_shared_space=mp_shared_space,
mode=mode,
)
loader = torch_data.DataLoader(
ds,
drop_last=False,
batch_size=self.batch_size,
num_workers=self.num_loader_workers,
persistent_workers=self.num_loader_workers > 0,
)
self._loader = loader
self.imgs = imgs
self.masks = masks
# contain input / output prediction mapping
self._outputs = []
# ? what will happen if this crash midway?
# => may not be able to retrieve the result dict
for wsi_idx, img_path in enumerate(imgs):
self._predict_wsi_handle_exception(
imgs, wsi_idx, img_path, mode, ioconfig, save_dir, crash_on_exception
)
# clean up the cache directories
try:
shutil.rmtree(self._cache_dir)
except PermissionError: # pragma: no cover
logger.warning("Unable to remove %s", self._cache_dir)
self._memory_cleanup()
return self._outputs
class DeepFeatureExtractor(SemanticSegmentor):
"""Generic CNN Feature Extractor.
AN engine for using any CNN model as a feature extractor. Note, if
`model` is supplied in the arguments, it will ignore the
`pretrained_model` and `pretrained_weights` arguments.
Args:
model (nn.Module):
Use externally defined PyTorch model for prediction with
weights already loaded. Default is `None`. If provided,
`pretrained_model` argument is ignored.
pretrained_model (str):
Name of the existing models support by tiatoolbox for
processing the data. By default, the corresponding
pretrained weights will also be downloaded. However, you can
override with your own set of weights via the
`pretrained_weights` argument. Argument is case-insensitive.
Refer to
:class:`tiatoolbox.models.architecture.vanilla.CNNBackbone`
for list of supported pretrained models.
pretrained_weights (str):
Path to the weight of the corresponding `pretrained_model`.
batch_size (int):
Number of images fed into the model each time.
num_loader_workers (int):
Number of workers to load the data. Take note that they will
also perform preprocessing.
num_postproc_workers (int):
This value is there to maintain input compatibility with
`tiatoolbox.models.classification` and is not used.
verbose (bool):
Whether to output logging information.
dataset_class (obj):
Dataset class to be used instead of default.
auto_generate_mask(bool):
To automatically generate tile/WSI tissue mask if is not
provided.
Examples:
>>> # Sample output of a network
>>> from tiatoolbox.models.architecture.vanilla import CNNBackbone
>>> wsis = ['A/wsi.svs', 'B/wsi.svs']
>>> # create resnet50 with pytorch pretrained weights
>>> model = CNNBackbone('resnet50')
>>> predictor = DeepFeatureExtractor(model=model)
>>> output = predictor.predict(wsis, mode='wsi')
>>> list(output.keys())
[('A/wsi.svs', 'output/0') , ('B/wsi.svs', 'output/1')]
>>> # If a network have 2 output heads, for 'A/wsi.svs',
>>> # there will be 3 outputs, and they are respectively stored at
>>> # 'output/0.position.npy' # will always be output
>>> # 'output/0.features.0.npy' # output of head 0
>>> # 'output/0.features.1.npy' # output of head 1
>>> # Each file will contain a same number of items, and the item at each
>>> # index corresponds to 1 patch. The item in `.*position.npy` will
>>> # be the corresponding patch bounding box. The box coordinates are at
>>> # the inference resolution defined within the provided `ioconfig`.
"""
def __init__(
self,
batch_size: int = 8,
num_loader_workers: int = 0,
num_postproc_workers: int = 0,
model: torch.nn.Module = None,
pretrained_model: str = None,
pretrained_weights: str = None,
verbose: bool = True,
auto_generate_mask: bool = False,
dataset_class: Callable = WSIStreamDataset,
):
super().__init__(
batch_size=batch_size,
num_loader_workers=num_loader_workers,
num_postproc_workers=num_postproc_workers,
model=model,
pretrained_model=pretrained_model,
pretrained_weights=pretrained_weights,
verbose=verbose,
auto_generate_mask=auto_generate_mask,
dataset_class=dataset_class,
)
self.process_prediction_per_batch = False
def _process_predictions(
self,
cum_batch_predictions: List,
wsi_reader: WSIReader,
ioconfig: IOSegmentorConfig,
save_path: str,
cache_dir: str,
):
"""Define how the aggregated predictions are processed.
This includes merging the prediction if necessary and also
saving afterwards.
Args:
cum_batch_predictions (list):
List of batch predictions. Each item within the list
should be of (location, patch_predictions).
wsi_reader (:class:`WSIReader`):
A reader for the image where the predictions come from.
ioconfig (:class:`IOSegmentorConfig`):
A configuration object contains input and output
information.
save_path (str):
Root path to save current WSI predictions.
cache_dir (str):
Root path to cache current WSI data.
"""
# assume prediction_list is N, each item has L output elements
location_list, prediction_list = list(zip(*cum_batch_predictions))
# Nx4 (N x [tl_x, tl_y, br_x, br_y), denotes the location of output
# patch, this can exceed the image bound at the requested resolution
# remove singleton due to split.
location_list = np.array([v[0] for v in location_list])
np.save(f"{save_path}.position.npy", location_list)
for idx, _ in enumerate(ioconfig.output_resolutions):
# assume resolution idx to be in the same order as L
# 0 idx is to remove singleton without removing other axes singleton
prediction_list = [v[idx][0] for v in prediction_list]
prediction_list = np.array(prediction_list)
np.save(f"{save_path}.features.{idx}.npy", prediction_list)
def predict(
self,
imgs,
masks=None,
mode="tile",
on_gpu=True,
ioconfig=None,
patch_input_shape=None,
patch_output_shape=None,
stride_shape=None,
resolution=1.0,
units="baseline",
save_dir=None,
crash_on_exception=False,
):
"""Make a prediction for a list of input data.
By default, if the input model at the time of object
instantiation is a pretrained model in the toolbox as well as
`patch_input_shape`, `patch_output_shape`, `stride_shape`,
`resolution`, `units` and `ioconfig` are `None`. The method will
use the `ioconfig` retrieved together with the pretrained model.
Otherwise, either `patch_input_shape`, `patch_output_shape`,
`stride_shape`, `resolution`, `units` or `ioconfig` must be set
- else a `Value Error` will be raised.
Args:
imgs (list, ndarray):
List of inputs to process. When using `"patch"` mode,
the input must be either a list of images, a list of
image file paths or a numpy array of an image list. When
using `"tile"` or `"wsi"` mode, the input must be a list
of file paths.
masks (list):
List of masks. Only utilised when processing image tiles
and whole-slide images. Patches are only processed if
they are within a masked area. If not provided, then a
tissue mask will be automatically generated for each
whole-slide image or all image tiles in the entire image
are processed.
mode (str):
Type of input to process. Choose from either `tile` or
`wsi`.
ioconfig (:class:`IOSegmentorConfig`):
Object that defines information about input and output
placement of patches. When provided,
`patch_input_shape`, `patch_output_shape`,
`stride_shape`, `resolution`, and `units` arguments are
ignored. Otherwise, those arguments will be internally
converted to a :class:`IOSegmentorConfig` object.
on_gpu (bool):
Whether to run the model on the GPU.
patch_input_shape (tuple):
Size of patches input to the model. The values are at
requested read resolution and must be positive.
patch_output_shape (tuple):
Size of patches output by the model. The values are at
the requested read resolution and must be positive.
stride_shape (tuple):
Stride using during tile and WSI processing. The values
are at requested read resolution and must be positive.
If not provided, `stride_shape=patch_input_shape` is
used.
resolution (float):
Resolution used for reading the image.
units (str):
Units of resolution used for reading the image. Choose
from either `"level"`, `"power"` or `"mpp"`.
save_dir (str):
Output directory when processing multiple tiles and
whole-slide images. By default, it is folder `output`
where the running script is invoked.
crash_on_exception (bool):
If `True`, the running loop will crash if there is any
error during processing a WSI. Otherwise, the loop will
move on to the next wsi for processing.
Returns:
list:
A list of tuple(input_path, save_path) where
`input_path` is the path of the input wsi while
`save_path` corresponds to the output predictions.
Examples:
>>> # Sample output of a network
>>> from tiatoolbox.models.architecture.vanilla import CNNBackbone
>>> wsis = ['A/wsi.svs', 'B/wsi.svs']
>>> # create resnet50 with pytorch pretrained weights
>>> model = CNNBackbone('resnet50')
>>> predictor = DeepFeatureExtractor(model=model)
>>> output = predictor.predict(wsis, mode='wsi')
>>> list(output.keys())
[('A/wsi.svs', 'output/0') , ('B/wsi.svs', 'output/1')]
>>> # If a network have 2 output heads, for 'A/wsi.svs',
>>> # there will be 3 outputs, and they are respectively stored at
>>> # 'output/0.position.npy' # will always be output
>>> # 'output/0.features.0.npy' # output of head 0
>>> # 'output/0.features.1.npy' # output of head 1
>>> # Each file will contain a same number of items, and the item at each
>>> # index corresponds to 1 patch. The item in `.*position.npy` will
>>> # be the corresponding patch bounding box. The box coordinates are at
>>> # the inference resolution defined within the provided `ioconfig`.
"""
return super().predict(
imgs=imgs,
masks=masks,
mode=mode,
on_gpu=on_gpu,
ioconfig=ioconfig,
patch_input_shape=patch_input_shape,
patch_output_shape=patch_output_shape,
stride_shape=stride_shape,
resolution=resolution,
units=units,
save_dir=save_dir,
crash_on_exception=crash_on_exception,
)
| 64,566 | 39.633732 | 88 | py |
tiatoolbox | tiatoolbox-master/tiatoolbox/models/engine/__init__.py | """Engines to run models implemented in tiatoolbox."""
from tiatoolbox.models.engine import (
nucleus_instance_segmentor,
patch_predictor,
semantic_segmentor,
)
| 173 | 23.857143 | 54 | py |
tiatoolbox | tiatoolbox-master/tiatoolbox/visualization/__init__.py | """Visualization package for tiatoolbox."""
from tiatoolbox.visualization import tileserver
| 92 | 30 | 47 | py |
tiatoolbox | tiatoolbox-master/tiatoolbox/visualization/tileserver.py | """Simple Flask WSGI apps to display tiles as slippery maps."""
from __future__ import annotations
import io
import json
from pathlib import Path
from typing import Dict, List, Union
import numpy as np
from flask import Flask, Response, send_file
from flask.templating import render_template
from PIL import Image
from tiatoolbox import data
from tiatoolbox.annotation.storage import SQLiteStore
from tiatoolbox.tools.pyramid import AnnotationTileGenerator, ZoomifyGenerator
from tiatoolbox.utils.visualization import AnnotationRenderer, colourise_image
from tiatoolbox.wsicore.wsireader import VirtualWSIReader, WSIReader
class TileServer(Flask):
"""A Flask app to display Zoomify tiles as a slippery map.
Args:
title (str):
The title of the tile server, displayed in the browser as
the page title.
layers (Dict[str, WSIReader | str] | List[WSIReader | str]):
A dictionary mapping layer names to image paths or
:obj:`WSIReader` objects to display. May also be a list, in
which case generic names 'layer-1', 'layer-2' etc. will be
used. If layer is a single-channel low-res overlay, it will
be colourized using the 'viridis' colourmap
Examples:
>>> from tiatoolbox.wsicore.wsireader import WSIReader
>>> from tiatoolbox.visualization.tileserver import TileServer
>>> wsi = WSIReader.open("CMU-1.svs")
>>> app = TileServer(
... title="Testing TileServer",
... layers={
... "My SVS": wsi,
... },
... )
>>> app.run()
"""
def __init__(
self,
title: str,
layers: Union[Dict[str, Union[WSIReader, str]], List[Union[WSIReader, str]]],
renderer: AnnotationRenderer = None,
) -> None:
super().__init__(
__name__,
template_folder=data._local_sample_path(
Path("visualization") / "templates"
),
static_url_path="",
static_folder=data._local_sample_path(Path("visualization") / "static"),
)
self.tia_title = title
self.tia_layers = {}
self.tia_pyramids = {}
self.renderer = renderer # only used if a layer is rendering form a store.
# Generic layer names if none provided.
if isinstance(layers, list):
layers = {f"layer-{i}": p for i, p in enumerate(layers)}
# Set up the layer dict.
meta = None
for i, (key, layer) in enumerate(layers.items()):
layer = self._get_layer_as_wsireader(layer, meta)
self.tia_layers[key] = layer
if isinstance(layer, WSIReader):
self.tia_pyramids[key] = ZoomifyGenerator(layer)
else:
self.tia_pyramids[key] = layer # it's an AnnotationTileGenerator
if i == 0:
meta = layer.info # base slide info
self.route(
"/layer/<layer>/zoomify/TileGroup<int:tile_group>/"
"<int:z>-<int:x>-<int:y>.jpg"
)(
self.zoomify,
)
self.route("/")(self.index)
def _get_layer_as_wsireader(self, layer, meta):
"""Gets appropriate image provider for layer.
Args:
layer (str | ndarray | WSIReader):
A reference to an image or annotations to be displayed.
meta (WSIMeta):
The metadata of the base slide.
Returns:
WSIReader or AnnotationTileGenerator:
The appropriate image source for the layer.
"""
if isinstance(layer, (str, Path)):
layer_path = Path(layer)
if layer_path.suffix in [".jpg", ".png"]:
# Assume it's a low-res heatmap.
layer = np.array(Image.open(layer_path))
elif layer_path.suffix == ".db":
# Assume it's an annotation store.
layer = AnnotationTileGenerator(
meta, SQLiteStore(layer_path), self.renderer
)
elif layer_path.suffix == ".geojson":
# Assume annotations in geojson format
layer = AnnotationTileGenerator(
meta,
SQLiteStore.from_geojson(layer_path),
self.renderer,
)
else:
# Assume it's a WSI.
return WSIReader.open(layer_path)
if isinstance(layer, np.ndarray):
# Make into rgb if single channel.
layer = colourise_image(layer)
return VirtualWSIReader(layer, info=meta)
return layer
def zoomify(
self, layer: str, tile_group: int, z: int, x: int, y: int # skipcq: PYL-w0613
) -> Response:
"""Serve a Zoomify tile for a particular layer.
Note that this should not be called directly, but will be called
automatically by the Flask framework when a client requests a
tile at the registered URL.
Args:
layer (str):
The layer name.
tile_group (int):
The tile group. Currently unused.
z (int):
The zoom level.
x (int):
The x coordinate.
y (int):
The y coordinate.
Returns:
flask.Response:
The tile image response.
"""
try:
pyramid = self.tia_pyramids[layer]
except KeyError:
return Response("Layer not found", status=404)
try:
tile_image = pyramid.get_tile(level=z, x=x, y=y)
except IndexError:
return Response("Tile not found", status=404)
image_io = io.BytesIO()
tile_image.save(image_io, format="webp")
image_io.seek(0)
return send_file(image_io, mimetype="image/webp")
def index(self) -> Response:
"""Serve the index page.
Returns:
flask.Response:
The index page.
"""
layers = [
{
"name": name,
"url": f"/layer/{name}/zoomify/{{TileGroup}}/{{z}}-{{x}}-{{y}}.jpg",
"size": [int(x) for x in layer.info.slide_dimensions],
"mpp": float(np.mean(layer.info.mpp)),
}
for name, layer in self.tia_layers.items()
]
return render_template(
"index.html", title=self.tia_title, layers=json.dumps(layers)
)
| 6,580 | 32.576531 | 86 | py |
tiatoolbox | tiatoolbox-master/tiatoolbox/utils/visualization.py | """Visualisation and overlay functions used in tiatoolbox."""
import colorsys
import random
from typing import Dict, List, Tuple, Union
import cv2
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
from matplotlib import colormaps
from numpy.typing import ArrayLike
from PIL import Image, ImageFilter, ImageOps
from shapely import speedups
from shapely.geometry import Polygon
from tiatoolbox import logger
from tiatoolbox.annotation.storage import Annotation, AnnotationStore
if speedups.available: # pragma: no branch
speedups.enable()
def random_colors(num_colors, bright=True):
"""Generate a number of random colors.
To get visually distinct colors, generate them in HSV space then
convert to RGB.
Args:
num_colors(int):
Number of perceptively different colors to generate.
bright(bool):
To use bright color or not.
Returns:
list:
List of (r, g, b) colors.
"""
brightness = 1.0 if bright else 0.7
hsv = [(i / num_colors, 1, brightness) for i in range(num_colors)]
colors = [colorsys.hsv_to_rgb(*c) for c in hsv]
random.shuffle(colors)
return colors
def colourise_image(img, cmap="viridis"):
"""If input img is single channel, colourise it.
Args:
img(ndarray):
Single channel or RGB image as ndarray.
cmap(str):
Colormap to use, must be a valid matplotlib cmap string.
Returns:
img(ndarray): An RGB image.
"""
if len(img.shape) == 2:
# Single channel, make into rgb with colormap.
c_map = colormaps[cmap]
im_rgb = (c_map(img) * 255).astype(np.uint8)
return im_rgb[:, :, :3]
# Already rgb, return unaltered
return img
def overlay_prediction_mask(
img: np.ndarray,
prediction: np.ndarray,
alpha: float = 0.35,
label_info: dict = None,
min_val: float = 0.0,
ax=None,
return_ax: bool = True,
):
"""Generate an overlay, given a 2D prediction map.
Args:
img (ndarray):
Input image to overlay the results on top of.
prediction (ndarray):
2D prediction map. Multi-class prediction should have values
ranging from 0 to N-1, where N is the number of classes.
label_info (dict):
A dictionary containing the mapping for each integer value
within `prediction` to its string and color. [int] : (str,
(int, int, int)). By default, integer will be taken as label
and color will be random.
min_val (float):
Only consider predictions greater than or equal to
`min_val`. Otherwise, the original WSI in those regions will
be displayed.
alpha (float):
Opacity value used for the overlay.
ax (ax):
Matplotlib ax object.
return_ax (bool):
Whether to return the matplotlib ax object. If not, then the
overlay array will be returned.
Returns:
If return_ax is True, return the matplotlib ax object. Else,
return the overlay array.
"""
# Validate inputs
if img.shape[:2] != prediction.shape[:2]:
raise ValueError(
f"Mismatch shape "
f"`img` {img.shape[:2]} vs `prediction` {prediction.shape[:2]}."
)
if np.issubdtype(img.dtype, np.floating):
if not (img.max() <= 1.0 and img.min() >= 0):
raise ValueError("Not support float `img` outside [0, 1].")
img = np.array(img * 255, dtype=np.uint8)
# If `min_val` is defined, only display the overlay for areas with pred > min_val
if min_val > 0:
prediction_sel = prediction >= min_val
overlay = img.copy()
predicted_classes = sorted(np.unique(prediction).tolist())
# Generate random colours if None are given
rand_state = np.random.get_state()
np.random.seed(123)
label_info = label_info or { # Use label_info if provided OR generate
label_uid: (str(label_uid), np.random.randint(0, 255, 3))
for label_uid in predicted_classes
}
np.random.set_state(rand_state)
# Validate label_info
missing_label_uids = _validate_label_info(label_info, predicted_classes)
if len(missing_label_uids) != 0:
raise ValueError(f"Missing label for: {missing_label_uids}.")
rgb_prediction = np.zeros(
[prediction.shape[0], prediction.shape[1], 3], dtype=np.uint8
)
for label_uid, (_, overlay_rgb) in label_info.items():
sel = prediction == label_uid
rgb_prediction[sel] = overlay_rgb
# Add the overlay
cv2.addWeighted(rgb_prediction, alpha, overlay, 1 - alpha, 0, overlay)
overlay = overlay.astype(np.uint8)
if min_val > 0.0:
overlay[~prediction_sel] = img[~prediction_sel]
if ax is None and not return_ax:
return overlay
# Create colorbar parameters
name_list, color_list = zip(*label_info.values()) # Unzip values
color_list = np.array(color_list) / 255
uid_list = list(label_info.keys())
cmap = mpl.colors.ListedColormap(color_list)
colorbar_params = {
"mappable": mpl.cm.ScalarMappable(cmap=cmap),
"boundaries": uid_list + [uid_list[-1] + 1],
"values": uid_list,
"ticks": [b + 0.5 for b in uid_list],
"spacing": "proportional",
"orientation": "vertical",
}
# Generate another ax, else using the provided
if ax is None:
_, ax = plt.subplots()
ax.imshow(overlay)
ax.axis("off")
# Generate colour bar
cbar = plt.colorbar(**colorbar_params, ax=ax)
cbar.ax.set_yticklabels(name_list)
cbar.ax.tick_params(labelsize=12)
return ax
def _validate_label_info(
label_info: Dict[int, Tuple[str, ArrayLike]], predicted_classes
) -> List[int]:
"""Validate the label_info dictionary.
Args:
label_info (dict):
A dictionary containing the mapping for each integer value
within `prediction` to its string and color. [int] : (str,
(int, int, int)).
predicted_classes (list):
List of predicted classes.
Raises:
ValueError:
If the label_info dictionary is not valid.
Returns:
list:
List of missing label UIDs.
"""
# May need better error messages
check_uid_list = predicted_classes.copy()
for label_uid, (label_name, label_colour) in label_info.items():
if label_uid in check_uid_list:
check_uid_list.remove(label_uid)
if not isinstance(label_uid, int):
raise ValueError(
"Wrong `label_info` format: label_uid "
f"{[label_uid, (label_name, label_colour)]}"
)
if not isinstance(label_name, str):
raise ValueError(
"Wrong `label_info` format: label_name "
f"{[label_uid, (label_name, label_colour)]}"
)
if not isinstance(label_colour, (tuple, list, np.ndarray)):
raise ValueError(
"Wrong `label_info` format: label_colour "
f"{[label_uid, (label_name, label_colour)]}"
)
if len(label_colour) != 3:
raise ValueError(
"Wrong `label_info` format: label_colour "
f"{[label_uid, (label_name, label_colour)]}"
)
return check_uid_list
def overlay_probability_map(
img: np.ndarray,
prediction: np.ndarray,
alpha: float = 0.35,
colour_map: str = "jet",
min_val: float = 0.0,
ax=None,
return_ax: bool = True,
):
"""Generate an overlay, given a 2D prediction map.
Args:
img (ndarray):
Input image to overlay the results on top of. Assumed to be
HW.
prediction (ndarray):
2D prediction map. Values are expected to be between 0-1.
alpha (float):
Opacity value used for the overlay.
colour_map (string):
The colour map to use for the heatmap. `jet` is used as the
default.
min_val (float):
Only consider pixels that are greater than or equal to
`min_val`. Otherwise, the original WSI in those regions will
be displayed.
alpha (float):
Opacity value used for the overlay.
ax (ax):
Matplotlib axis object.
return_ax (bool):
Whether to return the matplotlib ax object. If not, then the
overlay array will be returned.
Returns:
If return_ax is True, return the matplotlib ax object. Else,
return the overlay array.
"""
prediction = prediction.astype(np.float32)
img = _validate_overlay_probability_map(img, prediction, min_val)
prediction_sel = prediction >= min_val
overlay = img.copy()
cmap = plt.get_cmap(colour_map)
prediction = np.squeeze(prediction.astype("float32"))
# Take RGB from RGBA heat map
rgb_prediction = (cmap(prediction)[..., :3] * 255).astype("uint8")
# Add the overlay
overlay = (1 - alpha) * rgb_prediction + alpha * overlay
overlay[overlay > 255.0] = 255.0
overlay = overlay.astype(np.uint8)
if min_val > 0.0:
overlay[~prediction_sel] = img[~prediction_sel]
if ax is None and not return_ax:
return overlay
colorbar_params = {
"mappable": mpl.cm.ScalarMappable(cmap="jet"),
"spacing": "proportional",
"orientation": "vertical",
}
# generate another ax, else using the provided
if ax is None:
_, ax = plt.subplots()
ax.imshow(overlay)
ax.axis("off")
# generate colour bar
cbar = plt.colorbar(**colorbar_params, ax=ax)
cbar.ax.tick_params(labelsize=12)
return ax
def _validate_overlay_probability_map(img, prediction, min_val) -> np.ndarray:
"""Validate the input for the overlay_probability_map function.
Args:
img (:class:`numpy.ndarray`):
Input image to overlay the results on top of. Assumed to be
HW.
prediction (:class:`numpy.ndarray`):
2D prediction map. Values are expected to be between 0-1.
min_val (float):
Only consider pixels that are greater than or equal to
`min_val`. Otherwise, the original WSI in those regions will
be displayed.
Raises:
ValueError:
If the input is not valid.
Returns:
:class:`numpy.ndarray`:
Input image. May be modified if `min_val` has dtype float.
"""
if prediction.ndim != 2:
raise ValueError("The input prediction must be 2-dimensional of the form HW.")
if img.shape[:2] != prediction.shape[:2]:
raise ValueError(
f"Mismatch shape `img` {img.shape[:2]}"
f" vs `prediction` {prediction.shape[:2]}."
)
if prediction.max() > 1.0:
raise ValueError("Not support float `prediction` outside [0, 1].")
if prediction.min() < 0:
raise ValueError("Not support float `prediction` outside [0, 1].")
# if `min_val` is defined, only display the overlay for areas with prob > min_val
if min_val < 0.0:
raise ValueError(f"`min_val={min_val}` is not between [0, 1].")
if min_val > 1.0:
raise ValueError(f"`min_val={min_val}` is not between [0, 1].")
if np.issubdtype(img.dtype, np.floating):
if img.max() > 1.0:
raise ValueError("Not support float `img` outside [0, 1].")
if img.min() < 0:
raise ValueError("Not support float `img` outside [0, 1].")
return np.array(img * 255, dtype=np.uint8)
return img
def overlay_prediction_contours(
canvas: np.ndarray,
inst_dict: dict,
draw_dot: bool = False,
type_colours: dict = None,
inst_colours: Union[np.ndarray, Tuple[int]] = (255, 255, 0),
line_thickness: int = 2,
):
"""Overlaying instance contours on image.
Internally, colours from `type_colours` are prioritized over
`inst_colours`. However, if `inst_colours` is `None` and
`type_colours` is not provided, random colour is generated for each
instance.
Args:
canvas (:class:`numpy.ndarray`):
Image to draw predictions on.
inst_dict (dict):
Dictionary of instances. It is expected to be in the
following format: `{instance_id: {type: int, contour:
List[List[int]], centroid:List[float]}`.
draw_dot (bool):
To draw a dot for each centroid or not.
type_colours (dict):
A dict of {type_id : (type_name, colour)}, `type_id` is from
0 to N and `colour` is a tuple of `(r, g, b)`.
inst_colours (tuple, np.ndarray):
A colour to assign for all instances, or a list of colours
to assigned for each instance in `inst_dict`. By default,
all instances will have RGB colour `(255, 255, 0)`.
line_thickness:
Line thickness of contours.
Returns:
:class:`numpy.ndarray`:
The overlaid image.
"""
overlay = np.copy(canvas)
if inst_colours is None:
inst_colours = random_colors(len(inst_dict))
inst_colours = np.array(inst_colours) * 255
inst_colours = inst_colours.astype(np.uint8)
elif isinstance(inst_colours, tuple):
inst_colours = np.array([inst_colours] * len(inst_dict))
elif not isinstance(inst_colours, np.ndarray):
raise ValueError(
f"`inst_colours` must be np.ndarray or tuple: {type(inst_colours)}"
)
inst_colours = inst_colours.astype(np.uint8)
for idx, [_, inst_info] in enumerate(inst_dict.items()):
inst_contour = inst_info["contour"]
if "type" in inst_info and type_colours is not None:
inst_colour = type_colours[inst_info["type"]][1]
else:
inst_colour = (inst_colours[idx]).tolist()
cv2.drawContours(
overlay, [np.array(inst_contour)], -1, inst_colour, line_thickness
)
if draw_dot:
inst_centroid = inst_info["centroid"]
inst_centroid = tuple(int(v) for v in inst_centroid)
overlay = cv2.circle(overlay, inst_centroid, 3, (255, 0, 0), -1)
return overlay
def plot_graph(
canvas: np.ndarray,
nodes: np.ndarray,
edges: np.ndarray,
node_colors: Union[Tuple[int], np.ndarray] = (255, 0, 0),
node_size: int = 5,
edge_colors: Union[Tuple[int], np.ndarray] = (0, 0, 0),
edge_size: int = 5,
):
"""Drawing a graph onto a canvas.
Drawing a graph onto a canvas.
Args:
canvas (np.ndarray):
Canvas to be drawn upon.
nodes (np.ndarray):
List of nodes, expected to be Nx2 where N is the number of
nodes. Each node is expected to be of `(x, y)` and should be
within the height and width of the canvas.
edges (np.ndarray):
List of edges, expected to be Mx2 where M is the number of
edges. Each edge is defined as a pair of indexes `(from,
to)`, where each corresponds to a node of within `nodes`.
node_colors (tuple or np.ndarray):
A color or list of node colors. Each color is expected to be
`(r, g, b)` and is between 0 and 255.
edge_colors (tuple or np.ndarray):
A color or list of node colors. Each color is expected to be
`(r, g, b)` and is between 0 and 255.
node_size (int):
Radius of each node.
edge_size (int):
Line width of the edge.
"""
if isinstance(node_colors, tuple):
node_colors = [node_colors] * len(nodes)
if isinstance(edge_colors, tuple):
edge_colors = [edge_colors] * len(edges)
# draw the edges
def to_int_tuple(x):
"""Helper to convert to tuple of int."""
return tuple(int(v) for v in x)
for idx, (src, dst) in enumerate(edges):
src = to_int_tuple(nodes[src])
dst = to_int_tuple(nodes[dst])
color = to_int_tuple(edge_colors[idx])
cv2.line(canvas, src, dst, color, thickness=edge_size)
# draw the nodes
for idx, node in enumerate(nodes):
node = to_int_tuple(node)
color = to_int_tuple(node_colors[idx])
cv2.circle(canvas, node, node_size, color, thickness=-1)
return canvas
class AnnotationRenderer:
"""Renderer containing information and methods to render annotations
from an AnnotationStore to a tile.
Args:
score_prop (str):
A key that is present in the properties of annotations
to be rendered that will be used to color rendered annotations.
mapper (str, Dict or List):
A dictionary or colormap used to color annotations according
to the value of properties[score_prop] of an annotation. Should
be either a matplotlib colormap, a string which is a name of a
matplotlib colormap, a dict of possible property {value: color}
pairs, or a list of categorical property values (in which case a
dict will be created with a random color generated for each
category)
where (str or Callable):
a callable or predicate which will be passed on to
AnnotationStore.query() when fetching annotations to be rendered
(see AnnotationStore for more details)
score_fn (Callable):
an optional callable which will be called on the value of
the property that will be used to generate the color before giving
it to colormap. Use it for example to normalise property
values if they do not fall into the range [0,1], as matplotlib
colormap expects values in this range. i.e roughly speaking
annotation_color=mapper(score_fn(ann.properties[score_prop]))
max_scale (int):
downsample level above which Polygon geometries on crowded
tiles will be rendered as a bounding box instead
zoomed_out_strat (int, str):
strategy to use when rendering zoomed out tiles at
a level above max_scale. Can be one of 'decimate', 'scale', or a number
which defines the minimum area an abject has to cover to be rendered
while zoomed out above max_scale.
thickness (int):
line thickness of rendered contours. -1 will render filled
contours.
edge_thickness (int):
line thickness of rendered edges.
secondary_cmap (dict [str, str, cmap])):
a dictionary of the form {"type": some_type,
"score_prop": a property name, "mapper": a matplotlib cmap object}.
For annotations of the specified type, the given secondary colormap
will override the primary colormap.
blur_radius (int):
radius of gaussian blur to apply to rendered annotations.
"""
def __init__(
self,
score_prop=None,
mapper=None,
where=None,
score_fn=lambda x: x,
max_scale=8,
zoomed_out_strat=10000,
thickness=-1,
edge_thickness=1,
secondary_cmap=None,
blur_radius=0,
score_prop_edge=None,
):
if mapper is None:
mapper = colormaps["jet"]
if isinstance(mapper, str) and mapper != "categorical":
mapper = colormaps[mapper]
if isinstance(mapper, list):
colors = random_colors(len(mapper))
mapper = {key: (*color, 1) for key, color in zip(mapper, colors)}
if isinstance(mapper, dict):
self.mapper = lambda x: mapper[x]
else:
self.mapper = mapper
self.score_prop = score_prop
self.score_prop_edge = score_prop_edge
self.where = where
self.score_fn = score_fn
self.max_scale = max_scale
self.info = {"mpp": None}
self.thickness = thickness
self.edge_thickness = edge_thickness
self.zoomed_out_strat = zoomed_out_strat
self.secondary_cmap = secondary_cmap
self.blur_radius = blur_radius
if blur_radius > 0:
self.blur = ImageFilter.GaussianBlur(blur_radius)
self.edge_thickness = 0
else:
self.blur = None
@staticmethod
def to_tile_coords(coords: List, top_left: Tuple[float, float], scale: float):
"""Return coords relative to top left of tile, as array suitable for cv2.
Args:
coords (List):
List of coordinates in the form [x, y].
top_left (tuple):
The top left corner of the tile in wsi.
scale (float):
The zoom scale at which we are rendering.
Returns:
np.array:
Array of coordinates in tile space in the form [x, y].
"""
return np.squeeze(((np.array(coords) - top_left) / scale).astype(np.int32))
def get_color(self, annotation: Annotation, edge=False):
"""Get the color for an annotation.
Args:
annotation (Annotation):
Annotation to get color for.
Returns:
tuple:
A color tuple (rgba).
"""
if edge:
score_prop = self.score_prop_edge
else:
score_prop = self.score_prop
try:
if (
self.secondary_cmap is not None
and "type" in annotation.properties
and annotation.properties["type"] == self.secondary_cmap["type"]
):
# use secondary colormap to color annotations of specific type
return tuple(
int(c * 255)
for c in self.secondary_cmap["mapper"](
self.score_fn(
annotation.properties[self.secondary_cmap["score_prop"]]
)
)
)
if score_prop == "color":
# use colors directly specified in annotation properties
return (*[int(255 * c) for c in annotation.properties["color"]], 255)
if score_prop is not None:
return tuple(
int(c * 255)
for c in self.mapper(
self.score_fn(annotation.properties[score_prop])
)
)
except KeyError:
logger.warning(
"'score_prop' not found in properties. Using default color.",
stacklevel=2,
)
if edge:
return (0, 0, 0, 255) # default to black for edge
return 0, 255, 0, 255 # default color if no score_prop given
def render_poly(
self,
tile: np.ndarray,
annotation: Annotation,
top_left: Tuple[float, float],
scale: float,
):
"""Render a polygon annotation onto a tile using cv2.
Args:
tile (ndarray):
The rgb(a) tile image to render onto.
annotation (Annotation):
The annotation to render.
top_left (tuple):
The top left corner of the tile in wsi.
scale (float):
The zoom scale at which we are rendering.
"""
col = self.get_color(annotation)
cnt = self.to_tile_coords(annotation.geometry.exterior.coords, top_left, scale)
if self.thickness > -1:
cv2.drawContours(
tile, [cnt], 0, col, self.edge_thickness, lineType=cv2.LINE_8
)
else:
cv2.drawContours(tile, [cnt], 0, col, self.thickness, lineType=cv2.LINE_8)
if self.thickness == -1 and self.edge_thickness > 0:
edge_col = self.get_color(annotation, True)
cv2.drawContours(tile, [cnt], 0, edge_col, 1, lineType=cv2.LINE_8)
def render_multipoly(self, tile, annotation, top_left, scale):
"""render a multipolygon annotation onto a tile using cv2"""
col = self.get_color(annotation)
for poly in annotation.geometry.geoms:
cnt = self.to_tile_coords(poly.exterior.coords, top_left, scale)
cv2.drawContours(tile, [cnt], 0, col, self.thickness, lineType=cv2.LINE_8)
def render_pt(
self,
tile: np.ndarray,
annotation: Annotation,
top_left: Tuple[float, float],
scale: float,
):
"""Render a point annotation onto a tile using cv2.
Args:
tile (ndarray):
The rgb(a) tile image to render onto.
annotation (Annotation):
The annotation to render.
top_left (tuple):
The top left corner of the tile in wsi.
scale (float):
The zoom scale at which we are rendering.
"""
col = self.get_color(annotation)
cv2.circle(
tile,
self.to_tile_coords(list(annotation.geometry.coords), top_left, scale),
np.maximum(self.edge_thickness, 1),
col,
thickness=self.thickness,
)
def render_line(
self,
tile: np.ndarray,
annotation: Annotation,
top_left: Tuple[float, float],
scale: float,
):
"""Render a line annotation onto a tile using cv2.
Args:
tile (ndarray):
The rgb(a) tile image to render onto.
annotation (Annotation):
The annotation to render.
top_left (tuple):
The top left corner of the tile in wsi.
scale (float):
The zoom scale at which we are rendering.
"""
col = self.get_color(annotation)
cv2.polylines(
tile,
[self.to_tile_coords(list(annotation.geometry.coords), top_left, scale)],
False,
col,
thickness=3,
)
def __setattr__(self, __name: str, __value) -> None:
if __name == "blur_radius":
# need to change additional settings
if __value > 0:
self.__dict__["blur"] = ImageFilter.GaussianBlur(__value)
self.__dict__["edge_thickness"] = 0
else:
self.__dict__["blur"] = None
self.__dict__["edge_thickness"] = self.__dict__["edge_thickness_old"]
elif __name == "edge_thickness":
self.__dict__["edge_thickness_old"] = __value
self.__dict__[__name] = __value
def render_annotations(
self,
store: AnnotationStore,
bounds: Tuple[float, float, float, float],
scale: float,
res: int = 1,
border: int = 0,
):
"""Render annotations within given bounds.
This gets annotations as bounding boxes or geometries according to
zoom level, and renders them. Large collections of small
annotation geometries are decimated if appropriate.
Args:
rgb (np.ndarray):
The image to render the annotation on.
bound_geom (Polygon):
A polygon representing the bounding box of the tile.
scale (float):
The scale at which we are rendering the tile.
Returns:
np.ndarray:
The tile with the annotations rendered.
"""
bound_geom = Polygon.from_bounds(*bounds)
top_left = np.array(bounds[:2])
output_size = [
int((bounds[3] - bounds[1]) / scale),
int((bounds[2] - bounds[0]) / scale),
]
mpp_sf = (
np.minimum(self.info["mpp"][0] / 0.25, 1)
if self.info["mpp"] is not None
else 1
)
min_area = 0.0005 * (output_size[0] * output_size[1]) * (scale * mpp_sf) ** 2
tile = np.zeros((output_size[0] * res, output_size[1] * res, 4), dtype=np.uint8)
if scale <= self.max_scale:
# get all annotations
anns = store.query(
bound_geom,
self.where,
geometry_predicate="bbox_intersects",
)
for ann in anns.values():
self.render_by_type(tile, ann, top_left, scale / res)
elif self.zoomed_out_strat == "decimate":
# do decimation on small annotations
decimate = int(scale / self.max_scale) + 1
bounding_boxes = store.bquery(
bound_geom,
self.where,
)
for i, (key, box) in enumerate(bounding_boxes.items()):
area = (box[0] - box[2]) * (box[1] - box[3])
if area > min_area or i % decimate == 0:
ann = store[key]
self.render_by_type(tile, ann, top_left, scale / res)
else:
# Get only annotations > min_area. Plot them all
anns = store.query(
bound_geom,
self.where,
min_area=min_area,
geometry_predicate="bbox_intersects",
)
for ann in anns.values():
self.render_by_type(tile, ann, top_left, scale / res)
if self.blur is None:
return tile
return np.array(
ImageOps.crop(Image.fromarray(tile).filter(self.blur), border * res)
)
def render_by_type(
self,
tile: np.ndarray,
annotation: Annotation,
top_left: Tuple[float, float],
scale: float,
):
"""Render annotation appropriately to its geometry type.
Args:
tile (np.ndarray):
The rgb(a) tile image to render the annotation on.
annotation (Annotation):
The annotation to render.
top_left (Tuple[int, int]):
The top left coordinate of the tile.
scale (float):
The scale at which we are rendering the tile.
"""
geom_type = annotation.geometry.geom_type
if geom_type == "Point":
self.render_pt(tile, annotation, top_left, scale)
elif geom_type == "Polygon":
self.render_poly(tile, annotation, top_left, scale)
elif geom_type == "MultiPolygon":
self.render_multipoly(tile, annotation, top_left, scale)
elif geom_type == "LineString":
self.render_line(tile, annotation, top_left, scale)
else:
logger.warning("Unknown geometry: %s", geom_type, stacklevel=3)
| 30,713 | 34.021665 | 88 | py |
tiatoolbox | tiatoolbox-master/tiatoolbox/utils/image.py | """Miscellaneous utilities which operate on image data."""
from typing import Tuple, Union
import numpy as np
from PIL import Image
from tiatoolbox import logger
from tiatoolbox.utils.misc import conv_out_size
from tiatoolbox.utils.transforms import (
bounds2locsize,
bounds2slices,
imresize,
locsize2bounds,
pad_bounds,
)
PADDING_TO_BOUNDS = np.array([-1, -1, 1, 1])
"""
Constant array which when multiplied with padding and added to bounds,
applies the padding to the bounds.
"""
# Make this immutable / non-writable
PADDING_TO_BOUNDS.flags.writeable = False
def normalize_padding_size(padding):
"""Normalizes padding to be length 4 (left, top, right, bottom).
Given a scalar value, this is assumed to apply to all sides and
therefore repeated for each output (left, right, top, bottom). A
length 2 input is assumed to apply the same padding to the
left/right and top/bottom.
Args:
padding (int or tuple(int)):
Padding to normalize.
Raises:
ValueError:
Invalid input size of padding (e.g. length 3).
ValueError:
Invalid input shape of padding (e.g. 3 dimensional).
Returns:
:class:`numpy.ndarray`:
Numpy array of length 4 with elements containing padding for
left, top, right, bottom.
"""
padding_shape = np.shape(padding)
if len(padding_shape) > 1:
raise ValueError(
"Invalid input padding shape. Must be scalar or 1 dimensional."
)
padding_size = np.size(padding)
if padding_size == 3:
raise ValueError("Padding has invalid size 3. Valid sizes are 1, 2, or 4.")
if padding_size == 1:
return np.repeat(padding, 4)
if padding_size == 2:
return np.tile(padding, 2)
return np.array(padding)
def find_padding(read_location, read_size, image_size):
"""Find the correct padding to add when reading a region of an image.
Args:
read_location (tuple(int)):
The location of the region to read.
read_size (tuple(int)):
The size of the location to read.
image_size (tuple(int)):
The size of the image to read from.
Returns:
tuple:
Tuple of padding to apply in the format expect by `np.pad`.
i.e. `((before_x, after_x), (before_y, after_y))`.
Examples:
>>> from tiatoolbox.utils.image import find_padding
>>> location, size = (-2, -2), (10, 10)
>>> # Find padding needed to make the output (10, 10)
>>> # if the image is only (5 , 5) and read at
>>> # location (-2, -2).
>>> find_padding(location, size, image_size=(5, 5))
"""
read_location = np.array(read_location)
read_size = np.array(read_size)
image_size = np.array(image_size)
before_padding = np.maximum(-read_location, 0)
region_end = read_location + read_size
after_padding = np.maximum(region_end - np.max([image_size, read_location], 0), 0)
return np.stack([before_padding[::-1], after_padding[::-1]], axis=1)
def find_overlap(read_location, read_size, image_size):
"""Find the part of a region which overlaps the image area.
Args:
read_location (tuple(int)):
The location of the region to read.
read_size (tuple(int)):
The size of the location to read.
image_size (tuple(int)):
The size of the image to read from.
Returns:
tuple:
Bounds of the overlapping region.
Examples:
>>> from tiatoolbox.utils.image import find_overlap
>>> loc, size = (-5, -5), (10, 10)
>>> find_overlap(loc, size, (5, 5))
"""
read_location = np.array(read_location)
read_size = np.array(read_size)
image_size = np.array(image_size)
start = np.maximum(read_location, 0)
region_end = read_location + read_size
stop = np.minimum(region_end, image_size)
# Concatenate start and stop to make a bounds array (left, top, right, bottom)
return np.concatenate([start, stop])
def make_bounds_size_positive(bounds):
"""Make bounds have positive size and get horizontal/vertical flip flags.
Bounds with a negative size in either direction with have the
coordinates swapped (e.g. left and right or top and bottom swapped)
and a respective horizontal or vertical flip flag set in the output
to reflect the swaps which occurred.
Args:
bounds (:class:`numpy.ndarray`):
Length 4 array of bounds.
Returns:
tuple:
Three tuple containing positive bounds and flips:
- :class:`numpy.ndarray` - Positive bounds
- :py:obj:`bool` - Horizontal flip
- :py:obj:`bool` - Vertical flip
Examples:
>>> from tiatoolbox.utils.image import make_bounds_size_positive
>>> bounds = (10, 10, 0, 0)
>>> positive_bounds, flipud, fliplr = make_bounds_size_positive(bounds)
"""
fliplr, flipud = False, False
_, (width, height) = bounds2locsize(bounds)
if width >= 0 and height >= 0:
return bounds, fliplr, flipud
l, t, r, b = bounds
if width < 0:
l, r = r, l
fliplr = True
if height < 0:
t, b = b, t
flipud = True
bounds = np.array([l, t, r, b])
return (bounds, fliplr, flipud)
def crop_and_pad_edges(
bounds: Tuple[int, int, int, int],
max_dimensions: Tuple[int, int],
region: np.ndarray,
pad_mode: str = "constant",
pad_constant_values: Union[int, Tuple] = 0,
) -> np.ndarray:
"""Apply padding to areas of a region which are outside max dimensions.
Applies padding to areas of the image region which have coordinates
less than zero or above the width and height in `max_dimensions`.
Note that bounds and max_dimensions must be given for the same image
pyramid level (or more generally resolution e.g. if interpolated
between levels or working in other units).
Note: This function is planned to be deprecated in the future when a
transition from OpenSlide to tifffile as a dependency is complete.
It is currently used to remove padding from OpenSlide regions before
applying custom padding via :func:`numpy.pad`. This allows the
behaviour when reading OpenSlide images to be consistent with other
formats.
Args:
bounds (tuple(int)):
Bounds of the image region.
max_dimensions (tuple(int)):
The maximum valid x and y values of the bounds, i.e. the
width and height of the slide.
region (:class:`numpy.ndarray`):
The image region to be cropped and padded.
pad_mode (str):
The pad mode to use, see :func:`numpy.pad` for valid pad
modes. Defaults to 'constant'. If set to "none" or None no
padding is applied.
pad_constant_values (int or tuple(int)):
Constant value(s) to use when padding. Only used with
pad_mode constant.
Returns:
:class:`numpy.ndarray`:
The cropped and padded image.
Examples:
>>> from tiatoolbox.utils.image import crop_and_pad_edges
>>> import numpy as np
>>> region = np.ones((10, 10, 3))
>>> padded_region = crop_and_pad_edges(
... bounds=(-1, -1, 5, 5),
... max_dimensions=(10, 10),
... region=image,
... pad_mode="constant",
... pad_constant_values=0,
... )
"""
loc, size = bounds2locsize(bounds)
if np.min(max_dimensions) < 0:
raise ValueError("Max dimensions must be >= 0.")
if np.min(size) <= 0:
raise ValueError("Bounds must have size (width and height) > 0.")
padding = find_padding(loc, size, max_dimensions)
if len(region.shape) > 2:
padding = np.concatenate([padding, [[0, 0]]])
# If no padding is required then return the original image unmodified
if np.all(np.array(padding) == 0):
return region
overlap = find_overlap(loc, size, max_dimensions)
overlap = np.maximum(overlap - np.tile(loc, 2), 0)
# Add extra padding dimension for colour channels
if len(region.shape) > 2:
padding = padding + ((0, 0),)
# Crop the region
slices = bounds2slices(overlap)
slices += (...,)
crop = region[slices]
# Return if pad_mode is None
if pad_mode in ["none", None]:
return crop
# Pad the region and return
if pad_mode == "constant":
return np.pad(crop, padding, mode=pad_mode, constant_values=pad_constant_values)
return np.pad(crop, padding, mode=pad_mode)
def safe_padded_read(
image,
bounds,
stride=1,
padding=0,
pad_mode="constant",
pad_constant_values=0,
pad_kwargs=None,
):
"""Read a region of a numpy array with padding applied to edges.
Safely 'read' regions, even outside the image bounds. Accepts
integer bounds only.
Regions outside the source image are padded using any of the pad
modes available in :func:`numpy.pad`.
Note that padding of the output is not guaranteed to be
integer/pixel aligned if using a stride != 1.
.. figure:: ../images/out_of_bounds_read.png
:width: 512
:alt: Illustration for reading a region with negative
coordinates using zero padding and reflection padding.
Args:
image (:class:`numpy.ndarray` or :class:`glymur.Jp2k`):
Input image to read from.
bounds (tuple(int)):
Bounds of the region in (left, top, right, bottom) format.
stride (int or tuple(int)):
Stride when reading from img. Defaults to 1. A tuple is
interpreted as stride in x and y (axis 1 and 0
respectively). Also applies to padding.
padding (int or tuple(int)):
Padding to apply to each bound. Default to 0.
pad_mode (str):
Method for padding when reading areas outside the input
image. Default is constant (0 padding). Possible values are:
constant, reflect, wrap, symmetric. See :func:`numpy.pad`
for more.
pad_constant_values (int, tuple(int)): Constant values to use
when padding with constant pad mode. Passed to the
:func:`numpy.pad` `constant_values` argument. Default is 0.
pad_kwargs (dict):
Arbitrary keyword arguments passed through to the padding
function :func:`numpy.pad`.
Returns:
:class:`numpy.ndarray`:
Padded image region.
Raises:
ValueError:
Bounds must be integers.
ValueError:
Padding can't be negative.
Examples:
>>> bounds = (-5, -5, 5, 5)
>>> safe_padded_read(img, bounds)
>>> bounds = (-5, -5, 5, 5)
>>> safe_padded_read(img, bounds, pad_mode="reflect")
>>> bounds = (1, 1, 6, 6)
>>> safe_padded_read(img, bounds, padding=2, pad_mode="reflect")
"""
if pad_kwargs is None:
pad_kwargs = {}
if pad_mode == "constant" and "constant_values" not in pad_kwargs:
pad_kwargs["constant_values"] = pad_constant_values
padding = np.array(padding)
# Ensure the bounds are integers.
if not issubclass(np.array(bounds).dtype.type, (int, np.integer)):
raise ValueError("Bounds must be integers.")
if np.any(padding < 0):
raise ValueError("Padding cannot be negative.")
# Allow padding to be a 2-tuple in addition to an int or 4-tuple
padding = normalize_padding_size(padding)
# Ensure stride is a 2-tuple
if np.size(stride) not in [1, 2]:
raise ValueError("Stride must be of size 1 or 2.")
if np.size(stride) == 1:
stride = np.tile(stride, 2)
x_stride, y_stride = stride
# Check if the padded coords are outside the image bounds
# (over the width/height or under 0)
padded_bounds = bounds + (padding * np.array([-1, -1, 1, 1]))
img_size = np.array(image.shape[:2][::-1])
hw_limits = np.tile(img_size, 2) # height/width limits
zeros = np.zeros(hw_limits.shape)
# If all original bounds are within the bounds
padded_over = padded_bounds >= hw_limits
padded_under = padded_bounds < zeros
# If all padded coords are within the image then read normally
if not any(padded_over | padded_under):
l, t, r, b = padded_bounds
return image[t:b:y_stride, l:r:x_stride, ...]
# Else find the closest coordinates which are inside the image
clamped_bounds = np.max([np.min([padded_bounds, hw_limits], axis=0), zeros], axis=0)
clamped_bounds = np.round(clamped_bounds).astype(int)
# Read the area within the image
l, t, r, b = clamped_bounds
region = image[t:b:y_stride, l:r:x_stride, ...]
# Reduce bounds an img_size for the stride
if not np.all(np.isin(stride, [None, 1])):
# This if is not required but avoids unnecessary calculations
bounds = conv_out_size(np.array(bounds), stride=np.tile(stride, 2))
padded_bounds = bounds + (padding * np.array([-1, -1, 1, 1]))
img_size = conv_out_size(img_size, stride=stride)
# Return without padding if pad_mode is none
if pad_mode in ["none", None]:
return region
# Find how much padding needs to be applied to fill the edge gaps
before_padding = np.min([[0, 0], padded_bounds[2:]], axis=0)
after_padding = np.max([img_size, padded_bounds[:2] - img_size], axis=0)
edge_padding = padded_bounds - np.concatenate([before_padding, after_padding])
edge_padding[:2] = np.min([edge_padding[:2], [0, 0]], axis=0)
edge_padding[2:] = np.max([edge_padding[2:], [0, 0]], axis=0)
edge_padding = np.abs(edge_padding)
l, t, r, b = edge_padding
pad_width = [(t, b), (l, r)]
if len(region.shape) == 3:
pad_width += [(0, 0)]
# Pad the image region at the edges
return np.pad(
region,
pad_width,
mode=pad_mode,
**pad_kwargs,
)
def sub_pixel_read( # noqa: CCR001
image,
bounds,
output_size,
padding=0,
stride=1,
interpolation="nearest",
pad_at_baseline=False,
interpolation_padding=2,
read_func=None,
pad_mode="constant",
pad_constant_values=0,
read_kwargs=None,
pad_kwargs=None,
):
"""Read and resize an image region with sub-pixel bounds.
Allows for reading of image regions with sub-pixel coordinates, and
out of bounds reads with various padding and interpolation modes.
.. figure:: ../images/sub_pixel_reads.png
:width: 512
:alt: Illustration for reading a region with fractional
coordinates (sub-pixel).
Args:
image (:class:`numpy.ndarray`):
Image to read from.
bounds (tuple(float)):
Bounds of the image to read in (left, top, right, bottom)
format.
output_size (tuple(int)):
The desired output size.
padding (int or tuple(int)):
Amount of padding to apply to the image region in pixels.
Defaults to 0.
stride (int or tuple(int)):
Stride when reading from img. Defaults to 1. A tuple is
interpreted as stride in x and y (axis 1 and 0
respectively).
interpolation (str):
Method of interpolation. Possible values are: nearest,
linear, cubic, lanczos, area. Defaults to nearest.
pad_at_baseline (bool):
Apply padding in terms of baseline pixels. Defaults to
False, meaning padding is added to the output image size in
pixels.
interpolation_padding (int):
Padding to temporarily apply before rescaling to avoid
border effects. Defaults to 2.
read_func (collections.abc.Callable):
Custom read function. Defaults to :func:`safe_padded_read`.
A function which recieves two positional args of the image
object and a set of integer bounds in addition to padding
key word arguments for reading a pixel-aligned bounding
region. This function should return a numpy array with 2 or
3 dimensions. See examples for more.
pad_mode (str):
Method for padding when reading areas are outside the input
image. Default is constant (0 padding). This is passed to
`read_func` which defaults to :func:`safe_padded_read`. See
:func:`safe_padded_read` for supported pad modes. Setting to
"none" or None will result in no padding being applied.
pad_constant_values (int, tuple(int)): Constant values to use
when padding with constant pad mode. Passed to the
:func:`numpy.pad` `constant_values` argument. Default is 0.
**read_kwargs (dict):
Arbitrary keyword arguments passed through to `read_func`.
**pad_kwargs (dict):
Arbitrary keyword arguments passed through to the padding
function :func:`numpy.pad`.
Returns:
:class:`numpy.ndimage`:
Output image region.
Raises:
ValueError:
Invalid arguments.
AssertionError:
Internal errors, possibly due to invalid values.
Examples:
>>> # Simple read
>>> bounds = (0, 0, 10.5, 10.5)
>>> sub_pixel_read(image, bounds)
>>> # Read with padding applied to bounds before reading:
>>> bounds = (0, 0, 10.5, 10.5)
>>> region = sub_pixel_read(
... image,
... bounds,
... padding=2,
... pad_mode="reflect",
... )
>>> # Read with padding applied after reading:
>>> bounds = (0, 0, 10.5, 10.5)
>>> region = sub_pixel_read(image, bounds)
>>> region = np.pad(region, padding=2, mode="reflect")
>>> # Custom read function which generates a diagonal gradient:
>>> bounds = (0, 0, 10.5, 10.5)
>>> def gradient(_, b, **kw):
... width, height = (b[2] - b[0], b[3] - b[1])
... return np.mgrid[:height, :width].sum(0)
>>> sub_pixel_read(bounds, read_func=gradient)
>>> # Custom read function which gets pixel data from a custom object:
>>> bounds = (0, 0, 10, 10)
>>> def openslide_read(image, bounds, **kwargs):
... # Note that bounds may contain negative integers
... left, top, right, bottom = bounds
... size = (right - left, bottom - top)
... pil_img = image.read_region((left, top), level=0, size=size)
... return np.array(pil_img.convert("RGB"))
>>> sub_pixel_read(bounds, read_func=openslide_read)
"""
# Handle inputs
if pad_kwargs is None:
pad_kwargs = {}
if read_kwargs is None:
read_kwargs = {}
if interpolation is None:
interpolation = "none"
if pad_mode == "constant" and "constant_values" not in pad_kwargs:
pad_kwargs["constant_values"] = pad_constant_values
if 0 in bounds2locsize(bounds)[1]:
raise ValueError("Bounds must have non-zero size")
# Normalize padding
padding = normalize_padding_size(padding)
# Check the bounds are valid or have a negative size
# The left/start_x and top/start_y values should usually be smaller
# than the right/end_x and bottom/end_y values.
bounds, fliplr, flipud = make_bounds_size_positive(bounds)
if fliplr or flipud:
logger.warning(
"Bounds have a negative size, output will be flipped.", stacklevel=2
)
if isinstance(image, Image.Image):
image = np.array(image)
# Normalize none pad_mode to None
if pad_mode.lower() == "none":
pad_mode = None
# Initialise variables
image_size = np.flip(image.shape[:2])
scaling = np.array([1, 1])
_, bounds_size = bounds2locsize(bounds)
if output_size is not None and interpolation != "none":
scaling = np.array(output_size) / bounds_size / stride
read_bounds = bounds
if pad_mode is None:
output_size = np.round(
bounds2locsize(find_overlap(*bounds2locsize(bounds), image_size))[1]
* scaling
).astype(int)
overlap_bounds = find_overlap(*bounds2locsize(bounds), image_size=image_size)
if pad_mode is None:
read_bounds = overlap_bounds
baseline_padding = padding
if not pad_at_baseline:
baseline_padding = padding * np.tile(scaling, 2)
# Check the padded bounds do not have zero size
_, padded_bounds_size = bounds2locsize(pad_bounds(bounds, baseline_padding))
if 0 in padded_bounds_size:
raise ValueError("Bounds have zero size after padding.")
read_bounds = pad_bounds(read_bounds, interpolation_padding + baseline_padding)
# 0 Expand to integers and find residuals
start, end = np.reshape(read_bounds, (2, -1))
int_read_bounds = np.concatenate(
[
np.floor(start),
np.ceil(end),
]
)
residuals = np.abs(int_read_bounds - read_bounds)
read_bounds = int_read_bounds
valid_int_bounds = find_overlap(
*bounds2locsize(int_read_bounds), image_size
).astype(int)
# 1 Read the region
_, valid_int_size = bounds2locsize(valid_int_bounds)
if read_func is None:
region = image[bounds2slices(valid_int_bounds, stride=stride)]
else:
region = read_func(image, valid_int_bounds, stride, **read_kwargs)
if region is None or 0 in region.shape:
raise ValueError("Read region is empty or None.")
region_size = region.shape[:2][::-1]
if not np.array_equal(region_size, valid_int_size):
raise ValueError("Read function returned a region of incorrect size.")
# 1.5 Pad the region
pad_width = find_padding(*bounds2locsize(read_bounds), image_size=image_size)
if pad_mode is None:
pad_width -= find_padding(*bounds2locsize(overlap_bounds), image_size)
# Apply stride to padding
pad_width = pad_width / stride
# Add 0 padding to channels if required
if len(image.shape) > 2:
pad_width = np.concatenate([pad_width, [(0, 0)]])
# 1.7 Do the padding
if pad_mode == "constant":
region = np.pad(
region,
pad_width.astype(int),
mode=pad_mode or "constant",
**pad_kwargs,
)
else:
region = np.pad(region, pad_width.astype(int), mode=pad_mode or "constant")
# 2 Re-scaling
if output_size is not None and interpolation != "none":
region = imresize(region, scale_factor=scaling, interpolation=interpolation)
# 3 Trim interpolation padding
region_size = np.flip(region.shape[:2])
trimming = bounds2slices(
np.round(
pad_bounds(
locsize2bounds((0, 0), region_size),
(-(interpolation_padding + residuals) * np.tile(scaling, 2)),
)
)
)
region = region[trimming + (...,)]
region_size = region.shape[:2][::-1]
# 4 Ensure output is the correct size
if output_size is not None and interpolation != "none":
total_padding_per_axis = padding.reshape(2, 2).sum(axis=0)
if pad_at_baseline:
output_size = np.round(
np.add(output_size, total_padding_per_axis * scaling)
).astype(int)
else:
output_size = np.add(output_size, total_padding_per_axis)
if not np.array_equal(region_size, output_size):
region = imresize(
region, output_size=tuple(output_size), interpolation=interpolation
)
# 5 Apply flips to account for negative bounds
if fliplr:
region = np.flipud(region)
if flipud:
region = np.fliplr(region)
return region # noqa: R504
| 23,901 | 34.835082 | 88 | py |
tiatoolbox | tiatoolbox-master/tiatoolbox/utils/exceptions.py | """Custom Errors and Exceptions for TIAToolbox."""
class FileNotSupported(Exception):
"""Raise No supported file found error."""
def __init__(self, message="File format is not supported"):
self.message = message
super().__init__(self.message)
class MethodNotSupported(Exception):
"""Raise No supported file found error."""
def __init__(self, message="Method is not supported"):
self.message = message
super().__init__(self.message)
| 486 | 26.055556 | 63 | py |
tiatoolbox | tiatoolbox-master/tiatoolbox/utils/misc.py | """Miscellaneous small functions repeatedly used in tiatoolbox."""
import copy
import json
import os
import pathlib
import zipfile
from typing import IO, Dict, Optional, Tuple, Union
import cv2
import joblib
import numpy as np
import pandas as pd
import requests
import torch
import yaml
from shapely.affinity import translate
from shapely.geometry import shape as feature2geometry
from skimage import exposure
from tiatoolbox import logger
from tiatoolbox.annotation.storage import Annotation, AnnotationStore, SQLiteStore
from tiatoolbox.utils.exceptions import FileNotSupported
def split_path_name_ext(full_path):
"""Split path of a file to directory path, file name and extensions.
Args:
full_path (str or pathlib.Path):
Path to a file.
Returns:
tuple:
Three parts of the input file path:
- :py:obj:`pathlib.Path` - Parent directory path
- :py:obj:`str` - File name
- :py:obj:`list(str)` - File extensions
Examples:
>>> from tiatoolbox.utils.misc import split_path_name_ext
>>> dir_path, file_name, extensions = split_path_name_ext(full_path)
"""
input_path = pathlib.Path(full_path)
return input_path.parent.absolute(), input_path.name, input_path.suffixes
def grab_files_from_dir(input_path, file_types=("*.jpg", "*.png", "*.tif")):
"""Grab file paths specified by file extensions.
Args:
input_path (str or pathlib.Path):
Path to the directory where files
need to be searched.
file_types (str or tuple(str)):
File types (extensions) to be searched.
Returns:
list:
File paths as a python list. It has been sorted to ensure
the same ordering across platforms.
Examples:
>>> from tiatoolbox import utils
>>> file_types = ("*.ndpi", "*.svs", "*.mrxs")
>>> files_all = utils.misc.grab_files_from_dir(input_path,
... file_types=file_types)
"""
input_path = pathlib.Path(input_path)
if isinstance(file_types, str):
if len(file_types.split(",")) > 1:
file_types = tuple(file_types.replace(" ", "").split(","))
else:
file_types = (file_types,)
files_grabbed = []
for files in file_types:
files_grabbed.extend(input_path.glob(files))
# Ensure same ordering
files_grabbed.sort()
return list(files_grabbed)
def save_yaml(
input_dict: dict,
output_path="output.yaml",
parents: bool = False,
exist_ok: bool = False,
):
"""Save dictionary as yaml.
Args:
input_dict (dict):
A variable of type 'dict'.
output_path (str or pathlib.Path):
Path to save the output file.
parents (bool):
Make parent directories if they do not exist. Default is
False.
exist_ok (bool):
Overwrite the output file if it exists. Default is False.
Returns:
Examples:
>>> from tiatoolbox import utils
>>> input_dict = {'hello': 'Hello World!'}
>>> utils.misc.save_yaml(input_dict, './hello.yaml')
"""
path = pathlib.Path(output_path)
if path.exists() and not exist_ok:
raise FileExistsError("File already exists.")
if parents:
path.parent.mkdir(parents=True, exist_ok=True)
with open( # skipcq: PTC-W6004: PTC-W6004
str(pathlib.Path(output_path)), "w"
) as yaml_file:
yaml.dump(input_dict, yaml_file)
def imwrite(image_path, img) -> None:
"""Write numpy array to an image.
Args:
image_path (str or pathlib.Path):
File path (including extension) to save image to.
img (:class:`numpy.ndarray`):
Image array of dtype uint8, MxNx3.
Examples:
>>> from tiatoolbox import utils
>>> import numpy as np
>>> utils.misc.imwrite('BlankImage.jpg',
... np.ones([100, 100, 3]).astype('uint8')*255)
"""
if isinstance(image_path, pathlib.Path):
image_path = str(image_path)
cv2.imwrite(image_path, cv2.cvtColor(img, cv2.COLOR_RGB2BGR))
def imread(image_path, as_uint8=True):
"""Read an image as numpy array.
Args:
image_path (str or pathlib.Path):
File path (including extension) to read image.
as_uint8 (bool):
Read an image in uint8 format.
Returns:
:class:`numpy.ndarray`:
Image array of dtype uint8, MxNx3.
Examples:
>>> from tiatoolbox import utils
>>> img = utils.misc.imread('ImagePath.jpg')
"""
if isinstance(image_path, pathlib.Path):
image_path = str(image_path)
if pathlib.Path(image_path).suffix == ".npy":
image = np.load(image_path)
else:
image = cv2.imread(image_path)
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
if as_uint8:
return image.astype(np.uint8)
return image
def load_stain_matrix(stain_matrix_input):
"""Load a stain matrix as a numpy array.
Args:
stain_matrix_input (ndarray or str, pathlib.Path):
Either a 2x3 or 3x3 numpy array or a path to a saved .npy /
.csv file. If using a .csv file, there should be no column
headers provided
Returns:
stain_matrix (:class:`numpy.ndarray`):
The loaded stain matrix.
Examples:
>>> from tiatoolbox import utils
>>> sm = utils.misc.load_stain_matrix(stain_matrix_input)
"""
if isinstance(stain_matrix_input, (str, pathlib.Path)):
_, __, suffixes = split_path_name_ext(stain_matrix_input)
if suffixes[-1] not in [".csv", ".npy"]:
raise FileNotSupported(
"If supplying a path to a stain matrix, use either a \
npy or a csv file"
)
if suffixes[-1] == ".csv":
return pd.read_csv(stain_matrix_input).to_numpy()
# only other option left for suffix[-1] is .npy
return np.load(str(stain_matrix_input))
if isinstance(stain_matrix_input, np.ndarray):
return stain_matrix_input
raise TypeError(
"Stain_matrix must be either a path to npy/csv file or a numpy array"
)
def get_luminosity_tissue_mask(img, threshold):
"""Get tissue mask based on the luminosity of the input image.
Args:
img (:class:`numpy.ndarray`):
Input image used to obtain tissue mask.
threshold (float):
Luminosity threshold used to determine tissue area.
Returns:
tissue_mask (:class:`numpy.ndarray`):
Binary tissue mask.
Examples:
>>> from tiatoolbox import utils
>>> tissue_mask = utils.misc.get_luminosity_tissue_mask(img, threshold=0.8)
"""
img = img.astype("uint8") # ensure input image is uint8
img = contrast_enhancer(img, low_p=2, high_p=98) # Contrast enhancement
img_lab = cv2.cvtColor(img, cv2.COLOR_RGB2LAB)
l_lab = img_lab[:, :, 0] / 255.0 # Convert to range [0,1].
tissue_mask = l_lab < threshold
# check it's not empty
if tissue_mask.sum() == 0:
raise ValueError("Empty tissue mask computed.")
return tissue_mask
def mpp2common_objective_power(
mpp, common_powers=(1, 1.25, 2, 2.5, 4, 5, 10, 20, 40, 60, 90, 100)
):
"""Approximate (commonly used value) of objective power from mpp.
Uses :func:`mpp2objective_power` to estimate and then rounds to the
nearest value in `common_powers`.
Args:
mpp (float or tuple(float)): Microns per-pixel.
common_powers (tuple or list of float): A sequence of objective
power values to round to. Defaults to
(1, 1.25, 2, 2.5, 4, 5, 10, 20, 40, 60, 90, 100).
Returns:
float:
Objective power approximation.
Examples:
>>> mpp2common_objective_power(0.253)
array(40)
>>> mpp2common_objective_power(
... [0.253, 0.478],
... common_powers=(10, 20, 40),
... )
array([40, 20])
"""
op = mpp2objective_power(mpp)
distances = [np.abs(op - power) for power in common_powers]
return common_powers[np.argmin(distances)]
mpp2common_objective_power = np.vectorize(
mpp2common_objective_power, excluded={"common_powers"}
)
@np.vectorize
def objective_power2mpp(objective_power):
r"""Approximate mpp from objective power.
The formula used for estimation is :math:`power = \frac{10}{mpp}`.
This is a self-inverse function and therefore
:func:`mpp2objective_power` is simply an alias to this function.
Note that this function is wrapped in :class:`numpy.vectorize`.
Args:
objective_power (float or tuple(float)): Objective power.
Returns:
:class:`numpy.ndarray`:
Microns per-pixel (MPP) approximations.
Examples:
>>> objective_power2mpp(40)
array(0.25)
>>> objective_power2mpp([40, 20, 10])
array([0.25, 0.5, 1.])
"""
return 10 / float(objective_power)
@np.vectorize
def mpp2objective_power(mpp):
"""Approximate objective_power from mpp.
Alias to :func:`objective_power2mpp` as it is a self-inverse
function.
Args:
mpp (float or tuple(float)): Microns per-pixel.
Returns:
:class:`numpy.ndarray`:
Objective power approximations.
Examples:
>>> mpp2objective_power(0.25)
array(40.)
>>> mpp2objective_power([0.25, 0.5, 1.0])
array([40., 20., 10.])
>>> mpp2objective_power(0.253)
array(39.5256917)
"""
return objective_power2mpp(mpp)
def contrast_enhancer(img, low_p=2, high_p=98):
"""Enhancing contrast of the input image using intensity adjustment.
This method uses both image low and high percentiles.
Args:
img (:class:`numpy.ndarray`): input image used to obtain tissue mask.
Image should be uint8.
low_p (scalar): low percentile of image values to be saturated to 0.
high_p (scalar): high percentile of image values to be saturated to 255.
high_p should always be greater than low_p.
Returns:
img (:class:`numpy.ndarray`):
Image (uint8) with contrast enhanced.
Raises:
AssertionError: Internal errors due to invalid img type.
Examples:
>>> from tiatoolbox import utils
>>> img = utils.misc.contrast_enhancer(img, low_p=2, high_p=98)
"""
# check if image is not uint8
if not img.dtype == np.uint8:
raise AssertionError("Image should be uint8.")
img_out = img.copy()
p_low, p_high = np.percentile(img_out, (low_p, high_p))
if p_low >= p_high:
p_low, p_high = np.min(img_out), np.max(img_out)
if p_high > p_low:
img_out = exposure.rescale_intensity(
img_out, in_range=(p_low, p_high), out_range=(0.0, 255.0)
)
return np.uint8(img_out)
def __numpy_array_to_table(input_table):
"""Checks numpy array to be 2 or 3 columns.
If it has two columns then class should be assigned None.
Args:
input_table (np.ndarray): input table.
Returns:
table (:class:`pd.DataFrame`): Pandas DataFrame with desired features.
Raises:
ValueError: If the number of columns is not equal to 2 or 3.
"""
if input_table.shape[1] == 2:
out_table = pd.DataFrame(input_table, columns=["x", "y"])
out_table["class"] = None
return out_table
if input_table.shape[1] == 3:
return pd.DataFrame(input_table, columns=["x", "y", "class"])
raise ValueError("Numpy table should be of format `x, y` or `x, y, class`.")
def __assign_unknown_class(input_table):
"""Creates a column and assigns None if class is unknown.
Args:
input_table (np.ndarray or pd.DataFrame): input table.
Returns:
table (:class:`pd.DataFrame`): Pandas DataFrame with desired features.
Raises:
ValueError:
If the number of columns is not equal to 2 or 3.
"""
if input_table.shape[1] not in [2, 3]:
raise ValueError("Input table must have 2 or 3 columns.")
if input_table.shape[1] == 2:
input_table["class"] = None
return input_table
def read_locations(input_table):
"""Read annotations as pandas DataFrame.
Args:
input_table (str or pathlib.Path or :class:`numpy.ndarray` or
:class:`pandas.DataFrame`): path to csv, npy or json. Input can also be a
:class:`numpy.ndarray` or :class:`pandas.DataFrame`.
First column in the table represents x position, second
column represents y position. The third column represents the class.
If the table has headers, the header should be x, y & class.
Json should have `x`, `y` and `class` fields.
Returns:
pd.DataFrame: DataFrame with x, y location and class type.
Raises:
FileNotSupported:
If the path to input table is not of supported type.
Examples:
>>> from tiatoolbox.utils.misc import read_locations
>>> labels = read_locations('./annotations.csv')
"""
if isinstance(input_table, (str, pathlib.Path)):
_, _, suffixes = split_path_name_ext(input_table)
if suffixes[-1] == ".npy":
out_table = np.load(input_table)
return __numpy_array_to_table(out_table)
if suffixes[-1] == ".csv":
out_table = pd.read_csv(input_table, sep=None, engine="python")
if "x" not in out_table.columns:
out_table = pd.read_csv(
input_table,
header=None,
names=["x", "y", "class"],
sep=None,
engine="python",
)
return __assign_unknown_class(out_table)
if suffixes[-1] == ".json":
out_table = pd.read_json(input_table)
return __assign_unknown_class(out_table)
raise FileNotSupported("File type not supported.")
if isinstance(input_table, np.ndarray):
return __numpy_array_to_table(input_table)
if isinstance(input_table, pd.DataFrame):
return __assign_unknown_class(input_table)
raise TypeError("Please input correct image path or an ndarray image.")
@np.vectorize
def conv_out_size(in_size, kernel_size=1, padding=0, stride=1):
r"""Calculate convolution output size.
This is a numpy vectorised function.
.. math::
\begin{split}
n_{out} &= \bigg\lfloor {{\frac{n_{in} +2p - k}{s}}} \bigg\rfloor + 1 \\
n_{in} &: \text{Number of input features} \\
n_{out} &: \text{Number of output features} \\
p &: \text{Padding size} \\
k &: \text{Kernel size} \\
s &: \text{Stride size} \\
\end{split}
Args:
in_size (int): Input size / number of input features.
kernel_size (int): Kernel size.
padding (int): Kernel size.
stride (int): Stride size.
Returns:
int:
Output size / number of features.
Examples:
>>> from tiatoolbox import utils
>>> import numpy as np
>>> utils.misc.conv_out_size(100, 3)
>>> np.array(98)
>>> utils.misc.conv_out_size(99, kernel_size=3, stride=2)
>>> np.array(98)
>>> utils.misc.conv_out_size((100, 100), kernel_size=3, stride=2)
>>> np.array([49, 49])
"""
return (np.floor((in_size - kernel_size + (2 * padding)) / stride) + 1).astype(int)
def parse_cv2_interpolaton(interpolation: Union[str, int]) -> int:
"""Convert a string to a OpenCV (cv2) interpolation enum.
Interpolation modes:
- nearest
- linear
- area
- cubic
- lanczos
Valid integer values for cv2 interpolation enums are passed through.
See the `cv::InterpolationFlags`_ documentation for more
on cv2 (OpenCV) interpolation modes.
.. _cv::InterpolationFlags:
https://docs.opencv.org/4.0.0/da/d54/group__imgproc__transform.html#ga5bb5a1fea74ea38e1a5445ca803ff121
Args:
interpolation (Union[str, int]):
Interpolation mode string. Possible values are: nearest,
linear, cubic, lanczos, area.
Raises:
ValueError:
Invalid interpolation mode.
Returns:
int:
OpenCV (cv2) interpolation enum.
"""
if isinstance(interpolation, str):
interpolation = interpolation.lower()
if interpolation in ["nearest", cv2.INTER_NEAREST]:
return cv2.INTER_NEAREST
if interpolation in ["area", cv2.INTER_AREA]:
return cv2.INTER_AREA
if interpolation in ["linear", cv2.INTER_LINEAR]:
return cv2.INTER_LINEAR
if interpolation in ["cubic", cv2.INTER_CUBIC]:
return cv2.INTER_CUBIC
if interpolation in ["lanczos", cv2.INTER_LANCZOS4]:
return cv2.INTER_LANCZOS4
raise ValueError("Invalid interpolation mode.")
def assert_dtype_int(input_var, message="Input must be integer."):
"""Generate error if dtype is not int.
Args:
input_var (ndarray):
Input variable to be tested.
message (str):
Error message to be displayed.
Raises:
AssertionError:
If input_var is not of type int.
"""
if not np.issubdtype(np.array(input_var).dtype, np.integer):
raise AssertionError(message)
def download_data(url, save_path, overwrite=False):
"""Download data from a given URL to location. Can overwrite data if demanded
else no action is taken
Args:
url (path): URL from where to download the data.
save_path (str): Location to unzip the data.
overwrite (bool): True to force overwriting of existing data, default=False
"""
print(f"Download from {url}")
print(f"Save to {save_path}")
save_dir = pathlib.Path(save_path).parent
if not os.path.exists(save_dir):
os.makedirs(save_dir)
if not overwrite and os.path.exists(save_path):
return
r = requests.get(url)
request_response = requests.head(url)
status_code = request_response.status_code
url_exists = status_code == 200
if not url_exists:
raise ConnectionError(f"Could not find URL at {url}")
with open(save_path, "wb") as f:
f.write(r.content)
def unzip_data(zip_path, save_path, del_zip=True):
"""Extract data from zip file.
Args:
zip_path (str): Path where the zip file is located.
save_path (str): Path where to save extracted files.
del_zip (bool): Whether to delete initial zip file after extraction.
"""
# Extract data from zip file
with zipfile.ZipFile(zip_path, "r") as zip_ref:
zip_ref.extractall(save_path)
if del_zip:
# Remove zip file
os.remove(zip_path)
def __walk_list_dict(in_list_dict):
"""Recursive walk and jsonify in place.
Args:
in_list_dict (list or dict): input list or a dictionary.
Returns:
list or dict
"""
if isinstance(in_list_dict, dict):
__walk_dict(in_list_dict)
elif isinstance(in_list_dict, list):
__walk_list(in_list_dict)
elif isinstance(in_list_dict, np.ndarray):
in_list_dict = in_list_dict.tolist()
__walk_list(in_list_dict)
elif isinstance(in_list_dict, np.generic):
in_list_dict = in_list_dict.item()
elif in_list_dict is not None and not isinstance(
in_list_dict, (int, float, str, bool)
):
raise ValueError(
f"Value type `{type(in_list_dict)}` `{in_list_dict}` is not jsonified."
)
return in_list_dict
def __walk_list(lst):
"""Recursive walk and jsonify a list in place.
Args:
lst (list): input list.
"""
for i, v in enumerate(lst):
lst[i] = __walk_list_dict(v)
def __walk_dict(dct):
"""Recursive walk and jsonify a dictionary in place.
Args:
dct (dict): input dictionary.
"""
for k, v in dct.items():
if not isinstance(k, (int, float, str, bool)):
raise ValueError(f"Key type `{type(k)}` `{k}` is not jsonified.")
dct[k] = __walk_list_dict(v)
def save_as_json(
data: Union[dict, list],
save_path: Union[str, pathlib.Path],
parents: bool = False,
exist_ok: bool = False,
):
"""Save data to a json file.
The function will deepcopy the `data` and then jsonify the content
in place. Support data types for jsonify consist of `str`, `int`, `float`,
`bool` and their np.ndarray respectively.
Args:
data (dict or list):
Input data to save.
save_path (str):
Output to save the json of `input`.
parents (bool):
Make parent directories if they do not exist. Default is
False.
exist_ok (bool):
Overwrite the output file if it exists. Default is False.
"""
shadow_data = copy.deepcopy(data) # make a copy of source input
if not isinstance(shadow_data, (dict, list)):
raise ValueError(f"Type of `data` ({type(data)}) must be in (dict, list).")
if isinstance(shadow_data, dict):
__walk_dict(shadow_data)
else:
__walk_list(shadow_data)
save_path = pathlib.Path(save_path)
if save_path.exists() and not exist_ok:
raise FileExistsError("File already exists.")
if parents:
save_path.parent.mkdir(parents=True, exist_ok=True)
with open(save_path, "w") as handle: # skipcq: PTC-W6004
json.dump(shadow_data, handle)
def select_device(on_gpu: bool) -> str:
"""Selects the appropriate device as requested.
Args:
on_gpu (bool): Selects gpu if True.
Returns:
str:
"gpu" if on_gpu is True otherwise returns "cpu"
"""
if on_gpu:
return "cuda"
return "cpu"
def model_to(on_gpu, model):
"""Transfers model to cpu/gpu.
Args:
on_gpu (bool): Transfers model to gpu if True otherwise to cpu
model (torch.nn.Module): PyTorch defined model.
Returns:
torch.nn.Module:
The model after being moved to cpu/gpu.
"""
if on_gpu: # DataParallel work only for cuda
model = torch.nn.DataParallel(model)
return model.to("cuda")
return model.to("cpu")
def get_bounding_box(img):
"""Get bounding box coordinate information.
Given an image with zero and non-zero values. This function will
return the minimal box that contains all non-zero values.
Args:
img (ndarray):
Image to get the bounding box.
Returns:
bound (ndarray):
Coordinates of the box in the form of `[start_x, start_y,
end_x, end_y]`.
"""
rows = np.any(img, axis=1)
cols = np.any(img, axis=0)
r_min, r_max = np.where(rows)[0][[0, -1]]
c_min, cmax = np.where(cols)[0][[0, -1]]
# due to python indexing, need to add 1 to max
# else accessing will be 1px in the box, not out
r_max += 1
cmax += 1
return np.array([c_min, r_min, cmax, r_max])
def string_to_tuple(in_str):
"""Splits input string to tuple at ','.
Args:
in_str (str):
input string.
Returns:
tuple:
Returns a tuple of strings by splitting in_str at ','.
"""
return tuple(substring.strip() for substring in in_str.split(","))
def ppu2mpp(ppu: int, units: Union[str, int]) -> float:
"""Convert pixels per unit (ppu) to microns per pixel (mpp)
Args:
ppu (int):
Pixels per unit.
units (Union[str, int]):
Units of pixels per unit. Valid options are "cm",
"centimeter", "inch", 2 (inches), 3(cm).
Returns:
mpp (float):
Microns per pixel.
"""
microns_per_unit = {
"centimeter": 1e4, # 10,000
"cm": 1e4, # 10,000
"mm": 1e3, # 1,000
"inch": 25400,
"in": 25400,
2: 25400, # inches in TIFF tags
3: 1e4, # cm in TIFF tags
}
if units not in microns_per_unit:
raise ValueError(f"Invalid units: {units}")
return 1 / ppu * microns_per_unit[units]
def select_cv2_interpolation(scale_factor):
"""Returns appropriate interpolation method for opencv based image resize.
Args:
scale_factor (int or float):
Image resize scale factor.
Returns:
str:
interpolation type
"""
if np.any(scale_factor > 1.0):
return "cubic"
return "area"
def store_from_dat(
fp: Union[IO, str, pathlib.Path],
scale_factor: Tuple[float, float] = (1, 1),
typedict: Optional[Dict] = None,
origin: Tuple[float, float] = (0, 0),
cls: AnnotationStore = SQLiteStore,
) -> "AnnotationStore":
"""Load annotations from a hovernet-style .dat file.
Args:
fp (Union[IO, str, Path]):
The file path or handle to load from.
scale_factor (Tuple[float, float]):
The scale factor in each dimension to use when loading the annotations.
All coordinates will be multiplied by this factor to allow import of
annotations saved at non-baseline resolution.
typedict (Dict[str, str]):
A dictionary mapping annotation types to annotation keys. Annotations
with a type that is a key in the dictionary, will have their type
replaced by the corresponding value. Useful for providing descriptive
names to non-descriptive types,
eg {1: 'Epithelial Cell', 2: 'Lymphocyte', 3: ...}.
For multi-head output, should be a dict of dicts, eg:
{'head1': {1: 'Epithelial Cell', 2: 'Lymphocyte', 3: ...},
'head2': {1: 'Gland', 2: 'Lumen', 3: ...}, ...}.
origin (Tuple[float, float]):
The x and y coordinates to use as the origin for the annotations.
cls (AnnotationStore):
The class to use for the annotation store. Defaults to SQLiteStore.
Returns:
AnnotationStore:
A new annotation store with the annotations loaded from the file.
"""
store = cls()
add_from_dat(store, fp, scale_factor, typedict=typedict, origin=origin)
return store
def make_valid_poly(poly, origin=None):
"""Helper function to make a valid polygon.
Args:
poly (Polygon):
The polygon to make valid.
origin (Tuple[float, float]):
The x and y coordinates to use as the origin for the annotation.
Returns:
A valid geometry.
"""
if origin != (0, 0):
# transform coords to be relative to given pt.
poly = translate(poly, -origin[0], -origin[1])
if poly.is_valid:
return poly
logger.warning("Invalid geometry found, fix using buffer().", stacklevel=3)
return poly.buffer(0.01)
def anns_from_hoverdict(data, props, typedict, origin, scale_factor):
"""Helper function to create list of Annotation objects.
Creates annotations from a hovernet-style dict of segmentations, mapping types
using type dict if provided.
Args:
data (dict):
A dictionary of segmentations
props (list):
A list of properties
typedict (dict):
A dictionary mapping annotation types to more descriptive names.
origin (tuple):
The x and y coordinates to use as the origin for the annotations.
scale_factor (float):
The scale factor to use when loading the annotations. All coordinates
will be multiplied by this factor.
Returns:
A list of Annotation objects.
"""
return [
Annotation(
make_valid_poly(
feature2geometry(
{
"type": ann.get("geom_type", "Polygon"),
"coordinates": scale_factor * np.array([ann["contour"]]),
}
),
origin,
),
{
prop: typedict[ann[prop]]
if prop == "type" and typedict is not None
else ann[prop]
for prop in props[3:]
if prop in ann
},
)
for ann in data.values()
]
def make_default_dict(data, subcat):
"""Helper function to create a default typedict if none is provided.
The unique types in the data are given a prefix to differentiate
types from different heads of a multi-head model.
For example, types 1,2, etc in the 'Gland' head will become
'Gla: 1', 'Gla: 2', etc.
Args:
data (dict):
The data loaded from the .dat file.
subcat:
The subcategory of the data, eg 'Gland' or 'Nuclei'.
Returns:
A dictionary mapping types to more descriptive names.
"""
types = {
data[subcat][ann_id]["type"]
for ann_id in data[subcat]
if "type" in data[subcat][ann_id]
}
num_chars = np.minimum(3, len(subcat))
return {t: f"{subcat[:num_chars]}: {t}" for t in types}
def add_from_dat(
store,
fp: Union[IO, str],
scale_factor: Tuple[float, float] = (1, 1),
typedict: Optional[Dict] = None,
origin: Tuple[float, float] = (0, 0),
) -> None:
"""Add annotations from a .dat file to an existing store.
Make the best effort to create valid shapely geometries from provided contours.
Args:
fp (Union[IO, str, Path]):
The file path or handle to load from.
scale_factor (float):
The scale factor to use when loading the annotations. All coordinates
will be multiplied by this factor to allow import of annotations saved
at non-baseline resolution.
typedict (Dict[str, str]):
A dictionary mapping annotation types to annotation keys. Annotations
with a type that is a key in the dictionary, will have their type
replaced by the corresponding value. Useful for providing descriptive
names to non-descriptive types,
eg {1: 'Epithelial Cell', 2: 'Lymphocyte', 3: ...}.
For multi-head output, should be a dict of dicts, e.g.:
{'head1': {1: 'Epithelial Cell', 2: 'Lymphocyte', 3: ...},
'head2': {1: 'Gland', 2: 'Lumen', 3: ...}, ...}.
origin [float, float]:
The x and y coordinates to use as the origin for the annotations.
"""
data = joblib.load(fp)
props = list(data[list(data.keys())[0]].keys())
if "contour" not in props:
# assume cerberus format with objects subdivided into categories
anns = []
for subcat in data:
if subcat == "resolution":
continue
props = next(iter(data[subcat].values()))
if not isinstance(props, dict):
continue
props = list(props.keys())
# use type dictionary if available else auto-generate
if typedict is None:
typedict_sub = make_default_dict(data, subcat)
else:
typedict_sub = typedict[subcat]
anns.extend(
anns_from_hoverdict(
data[subcat], props, typedict_sub, origin, scale_factor
)
)
else:
anns = anns_from_hoverdict(data, props, typedict, origin, scale_factor)
print(f"added {len(anns)} annotations")
store.append_many(anns)
| 31,597 | 29.093333 | 110 | py |
tiatoolbox | tiatoolbox-master/tiatoolbox/utils/env_detection.py | """Detection methods for the current environment.
This module contains methods for detecting aspects of the current
environment.
Some things which this module can detect are:
- Whether the current environment is interactive.
- Whether the current environment is a conda environment.
- Whether the current environment is running on Travis, Kaggle, or
Colab.
Note that these detections may not be correct 100% of the time but are
as accurate as can be reasonably be expected depending on what is being
detected.
"""
import os
import platform
import re
import shutil
import socket
import subprocess
import sys
import threading
from numbers import Number
from typing import List, Tuple
import torch
from tiatoolbox import logger
def has_gpu() -> bool:
"""Detect if the runtime has GPU.
This function calls torch function underneath. To mask an
environment to have no GPU, you can set "CUDA_VISIBLE_DEVICES"
environment variable to empty before running the python script.
Returns:
bool:
True if the current runtime environment has GPU, False
otherwise.
"""
return torch.cuda.is_available()
def is_interactive() -> bool:
"""Detect if the current environment is interactive.
This should return True for the following environments:
- Python REPL (`$ python`)
- IPython REPL (`$ ipython`)
- Interactive Python shell (`$ python -i`)
- Interactive IPython shell (`$ ipython -i`)
- IPython passed a string (`$ ipython -c "print('Hello')"`)
- Notebooks
- Jupyter (`$ jupyter notebook`)
- Google CoLab
- Kaggle Notebooks
- Jupyter lab (`$ jupyter lab`)
- VSCode Python Interactive window (`# %%` cells)
- VSCode Jupyter notebook environment
- PyCharm Console
This should return False for the following environments:
- Python script (`$ python script.py`)
- Python passed a string (`$ python -c "print('Hello')"`)
- PyCharm Run
- PyCharm Run (emulate terminal)
Returns:
bool:
True if the current environment is interactive, False
otherwise.
"""
return hasattr(sys, "ps1")
def is_notebook() -> bool:
"""Detect if the current environment is a Jupyter notebook.
Based on a method posted on StackOverflow:
- Question at https://stackoverflow.com/questions/15411967
- Question by Christoph
(https://stackoverflow.com/users/498873/christoph)
- Answer by Gustavo Bezerra
(https://stackoverflow.com/users/2132753/gustavo-bezerra)
Returns:
bool:
True if the current environment is a Jupyter notebook, False
otherwise.
"""
try:
from IPython import get_ipython
shell = get_ipython().__class__.__name__
if shell == "ZMQInteractiveShell":
return True # Jupyter notebook or qtconsole
if shell == "TerminalInteractiveShell": # noqa: PIE801
return False # Terminal running IPython
return False # Other type (?)
except (NameError, ImportError):
return False # Probably standard Python interpreter
def in_conda_env() -> bool:
"""Detect if the current environment is a conda environment.
Returns:
bool:
True if the current environment is a conda environment,
False otherwise.
"""
return "CONDA_DEFAULT_ENV" in os.environ and "CONDA_PREFIX" in os.environ
def running_on_travis() -> bool:
"""Detect if the current environment is running on travis.
Returns:
bool:
True if the current environment is on travis, False
otherwise.
"""
return os.environ.get("TRAVIS") == "true" and os.environ.get("CI") == "true"
def running_on_github() -> bool:
"""Detect if the current environment is running on GitHub Actions.
Returns:
bool:
True if the current environment is on GitHub, False
otherwise.
"""
return os.environ.get("GITHUB_ACTIONS") == "true"
def running_on_circleci() -> bool:
"""Detect if the current environment is running on CircleCI.
Returns:
bool:
True if the current environment is on CircleCI, False
otherwise.
"""
return os.environ.get("CIRCLECI") == "true"
def running_on_ci() -> bool:
"""Detect if the current environment is running on continuous integration (CI).
Returns:
bool:
True if the current environment is on CI, False
otherwise.
"""
return any(
(
os.environ.get("CI") == "true",
running_on_travis(),
running_on_github(),
running_on_circleci(),
)
)
def running_on_kaggle() -> bool:
"""Detect if the current environment is running on Kaggle.
Returns:
bool:
True if the current environment is on Kaggle, False
otherwise.
"""
return os.environ.get("KAGGLE_KERNEL_RUN_TYPE") == "Interactive"
def running_on_colab() -> bool:
"""Detect if the current environment is running on Google Colab.
Returns:
bool:
True if the current environment is on Colab, False
otherwise.
"""
return "COLAB_GPU" in os.environ
def colab_has_gpu() -> bool:
"""Detect if the current environment is running on Google Colab with a GPU.
Returns:
bool:
True if the current environment is on colab with a GPU,
False otherwise.
"""
return bool(int(os.environ.get("COLAB_GPU", 0)))
def has_network(
hostname="one.one.one.one", timeout: Number = 3
) -> bool: # noqa: CCR001
"""Detect if the current environment has a network connection.
Create a socket connection to the hostname and check if the connection
is successful.
Args:
hostname (str):
The hostname to ping. Defaults to "one.one.one.one".
timeout (Number):
Timeout in seconds for the fallback GET request.
Returns:
bool:
True if the current environment has a network connection,
False otherwise.
"""
try:
# Check DNS listing
host = socket.gethostbyname(hostname)
# Connect to host
connection = socket.create_connection((host, 80), timeout=timeout)
connection.close()
return True
except (socket.gaierror, socket.timeout):
return False
def pixman_versions() -> List[Tuple[int, ...]]: # noqa: CCR001
"""The version(s) of pixman that are installed.
Some package managers (brew) may report multiple versions of pixman
installed as part of a dependency tree.
Returns:
list of tuple of int:
The versions of pixman that are installed as tuples of ints.
Raises:
Exception:
If pixman is not installed or the version could not be
determined.
"""
versions = []
using = None
if in_conda_env():
# Using anaconda to check for pixman
using = "conda"
try:
conda_list = subprocess.Popen(("conda", "list"), stdout=subprocess.PIPE)
conda_pixman = subprocess.check_output(
("grep", "pixman"), stdin=conda_list.stdout
)
conda_list.wait()
except subprocess.SubprocessError:
conda_pixman = b""
matches = re.search(
r"^pixman\s*(\d+.\d+)*",
conda_pixman.decode("utf-8"),
flags=re.MULTILINE,
)
if matches:
versions = [version_to_tuple(matches.group(1))]
if shutil.which("dpkg") and not versions:
# Using dpkg to check for pixman
using = "dpkg"
try:
dkpg_output = subprocess.check_output(
["/usr/bin/dpkg", "-s", "libpixman-1-0"]
)
except subprocess.SubprocessError:
dkpg_output = b""
matches = re.search(
r"^Version: ((?:\d+[._]+)+\d*)",
dkpg_output.decode("utf-8"),
flags=re.MULTILINE,
)
if matches:
versions = [version_to_tuple(matches.group(1))]
if shutil.which("brew") and not versions:
# Using homebrew to check for pixman
using = "brew"
try:
brew_list = subprocess.Popen(
("brew", "list", "--versions"), stdout=subprocess.PIPE
)
brew_pixman = subprocess.check_output(
("grep", "pixman"), stdin=brew_list.stdout
)
brew_list.wait()
except subprocess.SubprocessError:
brew_pixman = b""
matches = re.findall(
r"((?:\d+[._]+)+\d*)",
brew_pixman.decode("utf-8"),
flags=re.MULTILINE,
)
if matches:
versions = [version_to_tuple(match) for match in matches]
if platform.system() == "Darwin" and shutil.which("port") and not versions:
# Using macports to check for pixman. Also checks the platform
# is Darwin, as macports is only available on macOS.
using = "port"
port_list = subprocess.Popen(("port", "installed"), stdout=subprocess.PIPE)
port_pixman = subprocess.check_output(
("grep", "pixman"), stdin=port_list.stdout
)
port_list.wait()
matches = re.findall(
r"((?:\d+[._]+)+\d*)",
port_pixman.decode("utf-8"),
flags=re.MULTILINE,
)
if matches:
versions = [version_to_tuple(matches.group(1))]
if versions:
return versions, using
raise EnvironmentError("Unable to detect pixman version(s).")
def version_to_tuple(match: str) -> Tuple[int, ...]:
"""Convert a version string to a tuple of ints.
Only supports versions containing integers and periods.
Args:
match (str): The version string to convert.
Returns:
tuple:
The version string as a tuple of ints.
"""
# Check that the string only contains integers and periods
if not re.match(r"^\d+([._]\d+)*$", match):
raise ValueError(f"{match} is not a valid version string.")
return tuple(int(part) for part in match.split("."))
def pixman_warning() -> None: # pragma: no cover
"""Detect if pixman version 0.38 is being used.
If so, warn the user that the pixman version may cause problems.
Suggest a fix if possible.
"""
def _show_warning() -> None:
"""Show a warning message if pixman is version 0.38."""
try:
versions, using = pixman_versions()
except EnvironmentError:
# Unable to determine the pixman version
return
# If the pixman version is bad, suggest some fixes
fix = ""
if using == "conda":
fix = (
"You may be able do this with the command: "
'conda install -c conda-forge pixman">=0.39"'
)
if using == "dpkg":
fix = (
"To fix this you may need to do one of the following:\n"
" 1. Install libpixman-1-dev from your package manager (e.g. apt).\n"
" 2. Set up an anaconda environment with pixman >=0.39\n"
" 3. Install pixman >=0.39 from source. "
"Instructions to compile from source can be found at the GitLab "
"mirror here: "
"https://gitlab.freedesktop.org/pixman/pixman/-/blob/master/INSTALL"
)
if using == "brew":
fix = "You may be able do this with the command: brew upgrade pixman"
if using == "port":
fix = "You may be able do this with the command: port upgrade pixman"
# Log a warning if there is a pixman version in the range [0.38, 0.39)
if any((0, 38) <= v < (0, 39) for v in versions):
logger.warning(
"It looks like you are using Pixman version 0.38 (via %s). "
"This version is known to cause issues with OpenSlide. "
"Please consider upgrading to Pixman version 0.39 or later. "
"%s",
using,
fix,
)
thread = threading.Thread(target=_show_warning, args=(), kwargs={})
thread.start()
| 12,448 | 29.437653 | 86 | py |
tiatoolbox | tiatoolbox-master/tiatoolbox/utils/metrics.py | """This module defines several metrics used in computational pathology."""
import numpy as np
from scipy.optimize import linear_sum_assignment
from scipy.spatial import distance
def pair_coordinates(set_a, set_b, radius):
"""Find optimal unique pairing between two sets of coordinates.
This function uses the Munkres or Kuhn-Munkres algorithm behind the
scene to find the most optimal unique pairing when pairing points in
set B against points in set A, using euclidean distance as the cost
function.
Args:
set_a (ndarray):
An array of shape Nx2 contains the of XY coordinate of N
different points.
set_b (ndarray):
An array of shape Mx2 contains the of XY coordinate of M
different points.
radius:
Valid area around a point in set A to consider a given
coordinate in set B a candidate for matching.
Returns:
tuple:
- :class:`numpy.ndarray` - Pairing:
An array of shape Kx2, each item in K contains indices
where point at index [0] in set A paired with point in
set B at index [1].
- :class:`numpy.ndarray` - Unpaired A:
Indices of unpaired points in set A.
- :class:`numpy.ndarray` - Unpaired B:
Indices of unpaired points in set B.
"""
# * Euclidean distance as the cost matrix
pair_distance = distance.cdist(set_a, set_b, metric="euclidean")
# * Munkres pairing with scipy library
# The algorithm return (row indices, matched column indices) if
# there is multiple same cost in a row, index of first occurrence is
# return, thus the unique pairing is ensured.
indices_a, paired_indices_b = linear_sum_assignment(pair_distance)
# Extract the paired cost and remove instances outside designated
# radius.
pair_cost = pair_distance[indices_a, paired_indices_b]
paired_a = indices_a[pair_cost <= radius]
paired_b = paired_indices_b[pair_cost <= radius]
pairing = np.concatenate([paired_a[:, None], paired_b[:, None]], axis=-1)
unpaired_a = np.delete(np.arange(set_a.shape[0]), paired_a)
unpaired_b = np.delete(np.arange(set_b.shape[0]), paired_b)
return pairing, unpaired_a, unpaired_b
def f1_detection(true, pred, radius):
"""Calculate the F1-score for predicted set of coordinates."""
(paired_true, unpaired_true, unpaired_pred) = pair_coordinates(true, pred, radius)
tp = len(paired_true)
fp = len(unpaired_pred)
fn = len(unpaired_true)
return tp / (tp + 0.5 * fp + 0.5 * fn)
def dice(gt_mask, pred_mask):
r"""This function computes `Sørensen–Dice coefficient
<https://en.wikipedia.org/wiki/S%C3%B8rensen%E2%80%93Dice_coefficient>`_,
between the two masks.
.. math::
DSC = 2 * |X ∩ Y| / |X| + |Y|
Args:
gt_mask (:class:`numpy.ndarray`):
A binary ground truth mask
pred_mask (:class:`numpy.ndarray`):
A binary predicted mask
Returns:
:class:`float`:
A dice overlap
"""
if gt_mask.shape != pred_mask.shape:
raise ValueError(f'{"Shape mismatch between the two masks."}')
gt_mask = gt_mask.astype(np.bool_)
pred_mask = pred_mask.astype(np.bool_)
sum_masks = gt_mask.sum() + pred_mask.sum()
if sum_masks == 0:
return np.NAN
return 2 * np.logical_and(gt_mask, pred_mask).sum() / sum_masks
| 3,502 | 34.383838 | 86 | py |
tiatoolbox | tiatoolbox-master/tiatoolbox/utils/__init__.py | """Utils package for toolbox utilities."""
from tiatoolbox.utils import (
env_detection,
exceptions,
image,
metrics,
misc,
transforms,
visualization,
)
| 180 | 15.454545 | 42 | py |
tiatoolbox | tiatoolbox-master/tiatoolbox/utils/transforms.py | """Define Image transforms."""
from typing import Tuple, Union
import cv2
import numpy as np
from PIL import Image
from tiatoolbox.utils.misc import parse_cv2_interpolaton, select_cv2_interpolation
def background_composite(image, fill=255, alpha=False):
"""Image composite with specified background.
Args:
image (ndarray or PIL.Image):
Input image.
fill (int):
Fill value for the background, defaults to 255.
alpha (bool):
True if alpha channel is required.
Returns:
:class:`numpy.ndarray`:
Image with background composite.
Examples:
>>> from tiatoolbox.utils import transforms
>>> import numpy as np
>>> from matplotlib import pyplot as plt
>>> img_with_alpha = np.zeros((2000, 2000, 4)).astype('uint8')
>>> img_with_alpha[:1000, :, 3] = 255 # edit alpha channel
>>> img_back_composite = transforms.background_composite(
... img_with_alpha
... )
>>> plt.imshow(img_with_alpha)
>>> plt.imshow(img_back_composite)
>>> plt.show()
"""
if not isinstance(image, Image.Image):
image = Image.fromarray(image)
image = image.convert("RGBA")
composite = Image.fromarray(
np.full(list(image.size[::-1]) + [4], fill, dtype=np.uint8)
)
composite.alpha_composite(image)
if not alpha:
return np.asarray(composite.convert("RGB"))
return np.asarray(composite)
def imresize(img, scale_factor=None, output_size=None, interpolation="optimise"):
"""Resize input image.
Args:
img (:class:`numpy.ndarray`):
Input image, assumed to be in `HxWxC` or `HxW` format.
scale_factor (tuple(float)):
Scaling factor to resize the input image.
output_size (tuple(int)):
Output image size, (width, height).
interpolation (str or int):
Interpolation method used to interpolate the image using
`opencv interpolation flags
<https://docs.opencv.org/3.4/da/d54/group__imgproc__transform.html>`_
default='optimise', uses cv2.INTER_AREA for scale_factor
<1.0 otherwise uses cv2.INTER_CUBIC.
Returns:
:class:`numpy.ndarray`: Resized image. The image may be of different `np.dtype`
compared to the input image. However, the numeric precision is ensured.
Examples:
>>> from tiatoolbox.wsicore import wsireader
>>> from tiatoolbox.utils import transforms
>>> wsi = wsireader.WSIReader(input_path="./CMU-1.ndpi")
>>> slide_thumbnail = wsi.slide_thumbnail()
>>> # Resize the image to half size using scale_factor 0.5
>>> transforms.imresize(slide_thumbnail, scale_factor=0.5)
"""
if scale_factor is None and output_size is None:
raise TypeError("One of scale_factor and output_size must be not None.")
if scale_factor is not None:
scale_factor = np.array(scale_factor)
if scale_factor.size == 1:
scale_factor = np.repeat(scale_factor, 2)
# Handle None arguments
if output_size is None:
width = int(img.shape[1] * scale_factor[0])
height = int(img.shape[0] * scale_factor[1])
output_size = (width, height)
if scale_factor is None:
scale_factor = img.shape[:2][::-1] / np.array(output_size)
# Return original if scale factor is 1
if np.all(scale_factor == 1.0):
return img
# Get appropriate cv2 interpolation enum
if interpolation == "optimise":
interpolation = select_cv2_interpolation(scale_factor)
# a list of (original type, converted type) tuple
# all `converted type` are np.dtypes that cv2.resize
# can work on out-of-the-box (anything else will cause
# error). The `converted type` has been selected so that
# they can maintain the numeric precision of the `original type`.
dtype_mapping = [
(np.bool_, np.uint8),
(np.int8, np.int16),
(np.int16, np.int16),
(np.int32, np.float32),
(np.uint8, np.uint8),
(np.uint16, np.uint16),
(np.uint32, np.float32),
(np.int64, np.float64),
(np.uint64, np.float64),
(np.float16, np.float32),
(np.float32, np.float32),
(np.float64, np.float64),
]
source_dtypes = [v[0] for v in dtype_mapping]
original_dtype = img.dtype
if original_dtype not in source_dtypes:
raise ValueError(
f"Does not support resizing for array of dtype: {original_dtype}"
)
converted_dtype = dtype_mapping[source_dtypes.index(original_dtype)][1]
img = img.astype(converted_dtype)
interpolation = parse_cv2_interpolaton(interpolation)
# Resize the image
# Handle case for 1x1 images which cv2 v4.5.4 no longer handles
if img.shape[0] == img.shape[1] == 1:
return img.repeat(output_size[1], 0).repeat(output_size[0], 1)
if len(img.shape) == 3 and img.shape[-1] > 4:
img_channels = [
cv2.resize(img[..., ch], tuple(output_size), interpolation=interpolation)[
..., None
]
for ch in range(img.shape[-1])
]
return np.concatenate(img_channels, axis=-1)
return cv2.resize(img, tuple(output_size), interpolation=interpolation)
def rgb2od(img):
r"""Convert from RGB to optical density (:math:`OD_{RGB}`) space.
.. math::
RGB = 255 * exp^{-1*OD_{RGB}}
Args:
img (:class:`numpy.ndarray` of type :class:`numpy.uint8`):
RGB image.
Returns:
:class:`numpy.ndarray`:
Optical density (OD) RGB image.
Examples:
>>> from tiatoolbox.utils import transforms, misc
>>> rgb_img = misc.imread('path/to/image')
>>> od_img = transforms.rgb2od(rgb_img)
"""
mask = img == 0
img[mask] = 1
return np.maximum(-1 * np.log(img / 255), 1e-6)
def od2rgb(od):
r"""Convert from optical density (:math:`OD_{RGB}`) to RGB.
.. math::
RGB = 255 * exp^{-1*OD_{RGB}}
Args:
od (:class:`numpy.ndarray`):
Optical density (OD) RGB image.
Returns:
:class:`numpy.ndarray`:
RGB Image.
Examples:
>>> from tiatoolbox.utils import transforms, misc
>>> rgb_img = misc.imread('path/to/image')
>>> od_img = transforms.rgb2od(rgb_img)
>>> rgb_img = transforms.od2rgb(od_img)
"""
od = np.maximum(od, 1e-6)
return (255 * np.exp(-1 * od)).astype(np.uint8)
def bounds2locsize(bounds, origin="upper"):
"""Calculate the size of a tuple of bounds.
Bounds are expected to be in the `(left, top, right, bottom)` or
`(start_x, start_y, end_x, end_y)` format.
Args:
bounds (tuple(int)):
A 4-tuple or length 4 array of bounds values in `(left, top,
right, bottom)` format.
origin (str):
Upper (Top-left) or lower (bottom-left) origin.
Defaults to upper.
Returns:
tuple:
A 2-tuple containing integer 2-tuples for location and size:
- :py:obj:`tuple` - location tuple
- :py:obj:`int` - x
- :py:obj:`int` - y
- :py:obj:`size` - size tuple
- :py:obj:`int` - width
- :py:obj:`int` - height
Examples:
>>> from tiatoolbox.utils.transforms import bounds2locsize
>>> bounds = (0, 0, 10, 10)
>>> location, size = bounds2locsize(bounds)
>>> from tiatoolbox.utils.transforms import bounds2locsize
>>> _, size = bounds2locsize((12, 4, 24, 16))
"""
left, top, right, bottom = bounds
origin = origin.lower()
if origin == "upper":
return np.array([left, top]), np.array([right - left, bottom - top])
if origin == "lower":
return np.array([left, bottom]), np.array([right - left, top - bottom])
raise ValueError("Invalid origin. Only 'upper' or 'lower' are valid.")
def locsize2bounds(location, size):
"""Convert a location and size to bounds.
Args:
location (tuple(int)):
A 2-tuple or length 2 array of x,y coordinates.
size (tuple(int)):
A 2-tuple or length 2 array of width and height.
Returns:
tuple:
A tuple of bounds:
- :py:obj:`int` - left / start_x
- :py:obj:`int` - top / start_y
- :py:obj:`int` - right / end_x
- :py:obj:`int` - bottom / end_y
"""
return (
location[0],
location[1],
location[0] + size[0],
location[1] + size[1],
)
def bounds2slices(
bounds: Tuple[int, int, int, int],
stride: Union[int, Tuple[int, int, Tuple[int, int]]] = 1,
) -> Tuple[slice]:
"""Convert bounds to slices.
Create a tuple of slices for each start/stop pair in bounds.
Arguments:
bounds (tuple(int)):
Iterable of integer bounds. Must be even in length with the
first half as starting values and the second half as end
values, e.g. (start_x, start_y, stop_x, stop_y).
stride (int):
Stride to apply when converting to slices.
Returns:
tuple of slice:
Tuple of slices in image read order (y, x, channels).
Example:
>>> from tiatoolbox.utils.transforms import bounds2slices
>>> import numpy as np
>>> bounds = (5, 5, 10, 10)
>>> array = np.ones((10, 10, 3))
>>> slices = bounds2slices(bounds)
>>> region = array[slices, ...]
"""
if np.size(stride) not in [1, 2]:
raise ValueError("Invalid stride shape.")
if np.size(stride) == 1:
stride = np.tile(stride, 4)
elif np.size(stride) == 2: # pragma: no cover
stride = np.tile(stride, 2)
start, stop = np.reshape(bounds, (2, -1)).astype(int)
slice_array = np.stack([start[::-1], stop[::-1]], axis=1)
return tuple(slice(*x, s) for x, s in zip(slice_array, stride))
def pad_bounds(
bounds: Tuple[int, int, int, int],
padding: Union[int, Tuple[int, int], Tuple[int, int, int, int]],
) -> Tuple[int, int, int, int]:
"""Add padding to bounds.
Arguments:
bounds (tuple(int)):
Iterable of integer bounds. Must be even in length with the
first half as starting values and the second half as end
values, e.g. (start_x, start_y, stop_x, stop_y).
padding (int):
Padding to add to bounds.
Examples:
>>> pad_bounds((0, 0, 0, 0), 1)
Returns:
tuple of int:
Tuple of bounds with padding to the edges.
"""
if np.size(bounds) % 2 != 0:
raise ValueError("Bounds must have an even number of elements.")
ndims = np.size(bounds) // 2
if np.size(padding) not in [1, 2, np.size(bounds)]:
raise ValueError("Invalid number of padding elements.")
if np.size(padding) == 1 or np.size(padding) == np.size(bounds):
pass
elif np.size(padding) == ndims: # pragma: no cover
padding = np.tile(padding, 2)
signs = np.repeat([-1, 1], ndims)
return np.add(bounds, padding * signs)
| 11,241 | 30.757062 | 87 | py |
tiatoolbox | tiatoolbox-master/tiatoolbox/data/__init__.py | # skipcq: PTC-W6004
"""Package to define datasets available to download via TIAToolbox."""
import pathlib
import tempfile
import zipfile
from typing import Optional, Union
from urllib.parse import urlparse
import numpy as np
import pkg_resources
import requests
import yaml
# Load a dictionary of sample files data (names and urls)
SAMPLE_FILES_REGISTRY_PATH = pkg_resources.resource_filename(
"tiatoolbox", "data/remote_samples.yaml"
)
with open(SAMPLE_FILES_REGISTRY_PATH) as registry_handle:
SAMPLE_FILES = yaml.safe_load(registry_handle)["files"]
__all__ = ["stain_norm_target"]
def _fetch_remote_sample(
key: str, tmp_path: Optional[Union[str, pathlib.Path]] = None
) -> pathlib.Path:
"""Get the path to a sample file, after downloading from remote if required.
Loads remote resources by name. This is done by looking up files in
`tiatoolbox/data/remote_samples.yaml`.
Args:
key (str):
The name of the resource to fetch.
tmp_path (str or pathlib.Path):
The directory to use for local caching. Defaults to the OS
tmp path, see `tempfile.gettempdir` for more information.
During testing, `tmp_path` should be set to a temporary test
location using `tmp_path_factory.mktemp()`.
Returns:
pathlib.Path:
The local path to the cached sample file after downloading.
"""
tmp_path = (
pathlib.Path(tmp_path) if tmp_path else pathlib.Path(tempfile.gettempdir())
)
if not tmp_path.is_dir():
raise ValueError("tmp_path must be a directory.")
sample = SAMPLE_FILES[key]
url = "/".join(sample["url"])
url_filename = pathlib.Path(urlparse(url).path).name
# Get the filename from SAMPLE_FILES, else use the URL filename
filename = SAMPLE_FILES[key].get("filename", url_filename)
file_path = tmp_path / filename
# Download the file if it doesn't exist
if not file_path.is_file():
print(f"Downloading sample file {filename}")
# Start the connection with a 5s timeout to avoid hanging forever
response = requests.get(url, stream=True, timeout=5)
# Raise an exception for status codes != 200
response.raise_for_status()
# Write the file in blocks of 1024 bytes to avoid running out of memory
with open(file_path, "wb") as handle:
for block in response.iter_content(1024):
handle.write(block)
# Extract the (zip) archive contents if required
if sample.get("extract"):
print(f"Extracting sample file {filename}")
extract_path = tmp_path / filename.replace(".zip", "")
with zipfile.ZipFile(file_path, "r") as zip_handle:
zip_handle.extractall(path=extract_path)
file_path = extract_path
return file_path
print(f"Skipping download of sample file {filename}")
if sample.get("extract"):
file_path = tmp_path / filename.replace(".zip", "")
return file_path
def _local_sample_path(path: Union[str, pathlib.Path]) -> pathlib.Path:
"""Get the path to a data file bundled with the package.
Args:
path (str or pathlib.Path):
Relative path to the package data file.
Returns:
pathlib.Path:
Path within the package to the data file.
Example:
>>> # Get the path to a sample target image for performing
>>> # stain normalization.
>>> from tiatoolbox.data import stain_norm_target
>>> img = stain_norm_target()
"""
return pkg_resources.resource_filename(
"tiatoolbox", str(pathlib.Path("data") / path)
)
def stain_norm_target() -> np.ndarray:
"""Target image for stain normalization."""
from tiatoolbox.utils.misc import imread
return imread(_local_sample_path("target_image.png"))
def small_svs() -> pathlib.Path:
"""Small SVS file for testing."""
return _fetch_remote_sample("svs-1-small")
| 3,991 | 33.413793 | 83 | py |
tiatoolbox | tiatoolbox-master/tiatoolbox/annotation/storage.py | """Storage of annotations.
This module contains a collection of classes for handling storage of
annotations in memory in addition to serialization/deserialization to/from
disk.
Definitions
-----------
For the sake of clarity it is helpful to define a few terms used throughout
this documentation.
Annotation
A geometry and associated properties.
Geometry
One of: a point, a polygon, or a line string.
.. figure:: ../images/geometries.png
:width: 512
Properties
Key-value pairs associated with a geometry.
"""
import contextlib
import copy
import io
import json
import os
import pickle
import sqlite3
import sys
import tempfile
import uuid
import zlib
from abc import ABC, abstractmethod
from collections import defaultdict
from collections.abc import MutableMapping
from dataclasses import dataclass, field
from functools import lru_cache
from numbers import Number
from pathlib import Path
from typing import (
IO,
Any,
Callable,
DefaultDict,
Dict,
Generator,
Iterable,
Iterator,
List,
Optional,
Set,
Tuple,
Union,
)
import numpy as np
import pandas as pd
from shapely import speedups, wkb, wkt
from shapely.affinity import scale, translate
from shapely.geometry import LineString, Point, Polygon
from shapely.geometry import mapping as geometry2feature
from shapely.geometry import shape as feature2geometry
import tiatoolbox
from tiatoolbox import DuplicateFilter, logger
from tiatoolbox.annotation.dsl import (
PY_GLOBALS,
SQL_GLOBALS,
json_contains,
json_list_sum,
py_regexp,
)
sqlite3.enable_callback_tracebacks(True)
if speedups.available: # pragma: no branch
speedups.enable()
Geometry = Union[Point, Polygon, LineString]
Properties = Dict[str, Union[Dict, List, Number, str]]
BBox = Tuple[Number, Number, Number, Number]
QueryGeometry = Union[BBox, Geometry]
CallablePredicate = Callable[[Properties], bool]
CallableSelect = Callable[[Properties], Properties]
Predicate = Union[str, bytes, CallablePredicate]
Select = Union[str, bytes, CallableSelect]
ASCII_FILE_SEP = "\x1c"
ASCII_GROUP_SEP = "\x1d"
ASCII_RECORD_SEP = "\x1e"
ASCII_UNIT_SEP = "\x1f"
ASCII_NULL = "\0"
ISO_8601_DATE_FORMAT = r"%Y-%m-%dT%H:%M:%S.%f%z"
# Only Python 3.10+ supports using slots for dataclasses
# https://docs.python.org/3/library/dataclasses.html#dataclasses.dataclass
# therefore we use the following workaround to only use them when available.
# Using slots gives a performance boost at object creation time.
_DATACLASS_KWARGS = {"frozen": True}
if sys.version_info >= (3, 10): # pragma: no cover
_DATACLASS_KWARGS["slots"] = True
@dataclass(**_DATACLASS_KWARGS)
class Annotation:
"""An annotation: a geometry and associated properties.
Attributes:
geometry (Geometry):
The geometry of the annotation.
properties (dict):
The properties of the annotation.
"""
geometry: Geometry
properties: Properties = field(default_factory=dict)
def to_feature(self) -> Dict:
"""
Return a feature representation of this annotation.
A feature representation is a Python dictionary with the
same schema as a geoJSON feature.
Returns:
dict:
A feature representation of this annotation.
"""
return {
"type": "Feature",
"geometry": geometry2feature(self.geometry),
"properties": self.properties,
}
def to_geojson(self) -> str:
"""
Return a GeoJSON string representation of this annotation.
Returns:
str:
A GeoJSON representation of this annotation.
"""
return json.dumps(self.to_feature())
def __repr__(self) -> str:
return f"Annotation({self.geometry}, {self.properties})"
class AnnotationStore(ABC, MutableMapping):
"""Annotation store abstract base class."""
def __new__(cls, *args, **kwargs):
"""Return an instance of a subclass of AnnotationStore."""
if cls is AnnotationStore:
raise TypeError(
"AnnotationStore is an abstract class and cannot be instantiated."
" Use a subclass such as DictionaryStore or SQLiteStore instead."
)
return super().__new__(cls)
@staticmethod
def _is_right_angle(a, b, c) -> bool:
"""Returns True if three points make a right angle.
Used for optimising queries.
This function will have positional only arguments when support
for Python 3.7 is dropped.
Args:
a (Sequence[Number]):
First coordinate.
b (Sequence[Number]):
Second coordinate.
c (Sequence[Number]):
Third coordinate.
"""
return np.dot(np.subtract(a, b), np.subtract(b, c)) == 0
@staticmethod
def _is_rectangle(a, b, c, d, *args) -> bool:
"""Determine if a set of coordinates form a rectangle.
Used for optimising queries. If more than five points are given,
or if the optional fifth point is not equal to `a` then this
returns False.
Args:
a (Sequence[Number]):
First coordinate.
b (Sequence[Number]):
Second coordinate.
c (Sequence[Number])::
Third coordinate.
d (Sequence[Number]):
Fourth coordinate.
Returns:
True if the coordinates form a rectangle, False otherwise.
"""
# Only allow one extra coordinate for looping back to the first point
if (len(args) == 1 and not np.array_equal(args[:1], [a])) or len(args) > 1:
return False
# Check that all angles are right angles
return all(
AnnotationStore._is_right_angle(*xyz)
for xyz in ((a, b, c), (b, c, d), (c, d, a))
)
@staticmethod
def _connection_to_path(connection: Union[str, Path, IO]) -> Path:
"""Normalise a connection object to a Path.
Here we refer to a 'connection' as anything which references a
file e.g. a string, a pathlibPath, or a file-like object (IO).
Args:
connection (Union[str, Path, IO]):
The connection object to normalise.
Returns:
Path:
The normalised path.
"""
if not isinstance(
connection,
(
str,
Path,
io.IOBase,
io.TextIOBase,
tempfile._TemporaryFileWrapper, # skipcq: PYL-W0212
),
):
raise TypeError(
"Connection must be a string, Path, or an IO object, "
f"not {type(connection)}"
)
if isinstance(
connection,
(
io.IOBase,
io.TextIOBase,
tempfile._TemporaryFileWrapper, # skipcq: PYL-W0212
),
):
connection = connection.name
return Path(connection)
@staticmethod
def _validate_equal_lengths(*args):
"""Validate that all given args are either None or have the same length."""
lengths = [len(v) for v in args if v is not None]
if lengths and any(length != lengths[0] for length in lengths):
raise ValueError("All arguments must be None or of equal length.")
@staticmethod
def _geometry_predicate(name: str, a: Geometry, b: Geometry) -> Callable:
"""Apply a binary geometry predicate.
For more information on geometric predicates see the `Shapely
documentation <https://shapely.readthedocs.io/en/stable/
manual.html#binary-predicates>`_.
Args:
name(str):
Name of the predicate to apply.
a(Geometry):
The first geometry.
b(Geometry):
The second geometry.
Returns:
bool:
True if the geometry predicate holds.
"""
return getattr(a, name)(b)
# All valid shapely binary predicates
_geometry_predicate_names = [
"equals",
"contains",
"covers",
"covered_by",
"crosses",
"disjoint",
"intersects",
"overlaps",
"touches",
"within",
# Special non-shapely case, bounding-boxes intersect.
"bbox_intersects",
# Special non-shapely case, query centroid within k of
# annotation bounds center.
"centers_within_k",
]
@classmethod # noqa: A003
@abstractmethod
def open(cls, fp: Union[Path, str, IO]) -> "AnnotationStore": # noqa: A003
"""Load a store object from a path or file-like object.
Args:
fp(Path or str or IO): The file path or file handle.
Returns:
AnnotationStoreABC:
An instance of an annotation store.
"""
@staticmethod
def serialise_geometry(geometry: Geometry) -> Union[str, bytes]:
"""Serialise a geometry to a string or bytes.
This defaults to well-known text (WKT) but may be overridden to
any other format which a Shapely geometry could be serialised to
e.g. well-known binary (WKB) or geoJSON etc.
Args:
geometry(Geometry):
The Shapely geometry to be serialised.
Returns:
bytes or str: The serialised geometry.
"""
return geometry.wkt
@staticmethod
@lru_cache(32)
def deserialize_geometry(data: Union[str, bytes]) -> Geometry:
"""Deserialize a geometry from a string or bytes.
This default implementation will deserialize bytes as well-known
binary (WKB) and strings as well-known text (WKT). This can be
overridden to deserialize other formats such as geoJSON etc.
Args:
data(bytes or str):
The serialised representation of a Shapely geometry.
Returns:
Geometry: The deserialized Shapely geometry.
"""
return wkt.loads(data) if isinstance(data, str) else wkb.loads(data)
@abstractmethod
def commit(self) -> None:
"""Commit any in-memory changes to disk."""
@abstractmethod
def dump(self, fp: Union[Path, str, IO]) -> None:
"""Serialise a copy of the whole store to a file-like object.
Args:
fp(Path or str or IO):
A file path or file handle object for output to disk.
"""
@abstractmethod
def dumps(self) -> Union[str, bytes]:
"""Serialise and return a copy of store as a string or bytes.
Returns:
str or bytes:
The serialised store.
"""
def append(
self,
annotation: Annotation,
key: Optional[str] = None,
) -> int:
"""Insert a new annotation, returning the key.
Args:
annotation (Annotation):
The shapely annotation to insert.
key (str):
Optional. The unique key used to identify the annotation in the
store. If not given a new UUID4 will be generated and returned
instead.
Returns:
str:
The unique key of the newly inserted annotation.
"""
keys = key if key is None else [key]
return self.append_many([annotation], keys)[0]
def append_many(
self,
annotations: Iterable[Annotation],
keys: Optional[Iterable[str]] = None,
) -> List[str]:
"""Bulk append of annotations.
This may be more performant than repeated calls to `append`.
Args:
annotations (iter(Annotation)):
An iterable of annotations.
keys (iter(str)):
An iterable of unique keys associated with each geometry being
inserted. If None, a new UUID4 is generated for each geometry.
Returns:
list(str):
A list of unique keys for the inserted geometries.
"""
annotations = list(annotations)
keys = list(keys) if keys else None
self._validate_equal_lengths(keys, annotations)
result = []
if keys:
result.extend(
self.append(annotation, key)
for key, annotation in zip(keys, annotations)
)
return result
result.extend(self.append(annotation) for annotation in annotations)
return result
def patch(
self,
key: str,
geometry: Optional[Geometry] = None,
properties: Optional[Dict[str, Any]] = None,
) -> None:
"""Patch an annotation at given key.
Partial update of an annotation. Providing only a geometry will update
the geometry and leave properties unchanged. Providing a properties
dictionary applies a patch operation to the properties. Only updating
the properties which are given and leaving the rest unchanged. To
completely replace an annotation use `__setitem__`.
Args:
key(str):
The key of the annotation to update.
geometry(Geometry):
The new geometry. If None, the geometry is not updated.
properties(dict):
A dictionary of properties to patch and their new values.
If None, the existing properties are not altered.
"""
if key not in self:
self.append(Annotation(geometry, properties), key)
return
geometry = geometry if geometry is None else [geometry]
properties = properties if properties is None else [properties]
self.patch_many([key], geometry, properties)
def patch_many(
self,
keys: Iterable[int],
geometries: Optional[Iterable[Geometry]] = None,
properties_iter: Optional[Iterable[Properties]] = None,
) -> None:
"""Bulk patch of annotations.
This may be more efficient than calling `patch` repeatedly
in a loop.
Args:
geometries (iter(Geometry)):
An iterable of geometries to update.
properties_iter (iter(dict)):
An iterable of properties to update.
keys (iter(str)):
An iterable of keys for each annotation to be updated.
"""
# Validate inputs
if not any([geometries, properties_iter]):
raise ValueError(
"At least one of geometries or properties_iter must be given"
)
keys = list(keys)
geometries = list(geometries) if geometries else None
properties_iter = list(properties_iter) if properties_iter else None
self._validate_equal_lengths(keys, geometries, properties_iter)
properties_iter = properties_iter or ({} for _ in keys) # pragma: no branch
geometries = geometries or (None for _ in keys) # pragma: no branch
# Update the store
for key, geometry, properties in zip(keys, geometries, properties_iter):
properties = copy.deepcopy(properties)
self.patch(key, geometry, properties)
def remove(self, key: str) -> None:
"""Remove annotation from the store with its unique key.
Args:
key (str):
The key of the annotation to be removed.
"""
self.remove_many([key])
def remove_many(self, keys: Iterable[str]) -> None:
"""Bulk removal of annotations by keys.
Args:
keys (iter(str)):
An iterable of keys for the annotation to be removed.
"""
for key in keys:
self.remove(key)
def setdefault(self, key: str, default: Annotation = None) -> Annotation:
"""Return the value of the annotation with the given key.
If the key does not exist, insert the default value and return
it.
Args:
key (str):
The key of the annotation to be fetched.
default (Annotation):
The value to return if the key is not found.
Returns:
Annotation:
The annotation or default if the key is not found.
"""
if not isinstance(default, Annotation):
raise TypeError("default value must be an Annotation instance.")
return super().setdefault(key, default)
def __delitem__(self, key: str) -> None:
"""Delete an annotation by key.
An alias of `remove`.
Args:
key (str):
The key of the annotation to be removed.
"""
self.remove(key)
def keys(self) -> Iterable[str]:
"""Return an iterable (usually generator) of all keys in the store.
Returns:
Iterable[str]:
An iterable of keys.
"""
for key, _ in self.items():
yield key
def values(self) -> Iterable[Annotation]:
"""Return an iterable of all annotation in the store.
Returns:
Iterable[Annotation]:
An iterable of annotations.
"""
for _, annotation in self.items():
yield annotation
def __iter__(self) -> Iterable[str]:
"""Return an iterable of keys in the store.
An alias of `keys`.
Returns:
Iterable[str]:
An iterable of keys.
"""
yield from self.keys()
@staticmethod
def _eval_where(
predicate: Optional[Predicate],
properties: Dict[str, Any],
) -> bool:
"""Evaluate properties predicate against properties.
Args:
predicate (str or bytes or Callable):
The predicate to evaluate on properties. The predicate may be a
string, pickled bytes, or a callable (e.g. a function).
properties (dict):
A dictionary of JSON serializable
properties on which to evaluate the predicate.
Returns:
bool:
Returns True if the predicate holds.
"""
if predicate is None:
return True
if isinstance(predicate, str):
return bool(
eval(predicate, PY_GLOBALS, {"props": properties}) # skipcq: PYL-W0123
)
if isinstance(predicate, bytes):
predicate = pickle.loads(predicate) # skipcq: BAN-B301
return bool(predicate(properties))
def query(
self,
geometry: Optional[QueryGeometry] = None,
where: Optional[Predicate] = None,
geometry_predicate: str = "intersects",
distance: float = 0,
) -> Dict[str, Annotation]:
"""Query the store for annotations.
Args:
geometry (Geometry or Iterable):
Geometry to use when querying. This can be a bounds
(iterable of length 4) or a Shapely geometry (e.g.
Polygon).
where (str or bytes or Callable):
A statement which should evaluate to a boolean value.
Only annotations for which this predicate is true will
be returned. Defaults to None (assume always true). This
may be a string, callable, or pickled function as bytes.
Callables are called to filter each result returned
from the annotation store backend in python before being
returned to the user. A pickle object is, where
possible, hooked into the backend as a user defined
function to filter results during the backend query.
Strings are expected to be in a domain specific language
and are converted to SQL on a best-effort basis. For
supported operators of the DSL see
:mod:`tiatoolbox.annotation.dsl`. E.g. a simple python
expression `props["class"] == 42` will be converted to a
valid SQLite predicate when using `SQLiteStore` and
inserted into the SQL query. This should be faster than
filtering in python after or during the query.
Additionally, the same string can be used across
different backends (e.g. the previous example predicate
string is valid for both `DictionaryStore `and a
`SQliteStore`). On the other hand it has many more
limitations. It is important to note that untrusted user
input should never be accepted to this argument as
arbitrary code can be run via pickle or the parsing of
the string statement.
geometry_predicate (str):
A string defining which binary geometry predicate to
use when comparing the query geometry and a geometry in
the store. Only annotations for which this binary
predicate is true will be returned. Defaults to
"intersects". For more information see the `shapely
documentation on binary predicates <https://shapely.
readthedocs.io/en/stable/manual.html#binary-predicates>`_.
distance (float):
Distance used when performing a distance based query.
E.g. "centers_within_k" geometry predicate.
Returns:
list:
A list of Annotation objects.
"""
if all(x is None for x in (geometry, where)):
raise ValueError("At least one of geometry or where must be set.")
if geometry_predicate not in self._geometry_predicate_names:
raise ValueError(
"Invalid geometry predicate."
f"Allowed values are: {', '.join(self._geometry_predicate_names)}."
)
query_geometry = geometry
if isinstance(query_geometry, Iterable):
query_geometry = Polygon.from_bounds(*query_geometry)
if geometry_predicate == "centers_within_k":
query_point = Polygon.from_bounds(*query_geometry.bounds).centroid
def bbox_intersects(
annotation_geometry: Geometry, query_geometry: Geometry
) -> bool:
"""True if bounding box of the annotation intersects the query geometry."""
return Polygon.from_bounds(*query_geometry.bounds).intersects(
Polygon.from_bounds(*annotation_geometry.bounds)
)
def centers_within_k(
annotation_geometry: Geometry, query_point: Point, distance: float
) -> bool:
"""True if centre of annotation within k of query geometry center.
Here the "center" is the centroid of the bounds.
"""
ann_centre = Polygon.from_bounds(*annotation_geometry.bounds).centroid
return query_point.dwithin(ann_centre, distance)
def filter_function(annotation: Annotation) -> bool:
"""Filter function for querying annotations.
Args:
annotation (Annotation):
The annotation to filter.
Returns:
bool:
True if the annotation should be included in the
query result.
"""
return ( # Geometry is None or the geometry predicate matches
query_geometry is None
or any(
[
(
geometry_predicate == "bbox_intersects"
and bbox_intersects(annotation.geometry, query_geometry)
),
(
geometry_predicate == "centers_within_k"
and centers_within_k(
annotation.geometry, query_point, distance
)
),
(
geometry_predicate
not in ("bbox_intersects", "centers_within_k")
and self._geometry_predicate(
geometry_predicate, query_geometry, annotation.geometry
)
),
]
)
) and self._eval_where(where, annotation.properties)
return {
key: annotation
for key, annotation in self.items()
if filter_function(annotation)
}
def iquery(
self,
geometry: QueryGeometry,
where: Optional[Predicate] = None,
geometry_predicate: str = "intersects",
) -> List[int]:
"""Query the store for annotation keys.
Acts the same as `AnnotationStore.query` except returns keys
instead of annotations.
Args:
geometry (Geometry or Iterable):
Geometry to use when querying. This can be a bounds
(iterable of length 4) or a Shapely geometry (e.g.
Polygon).
where (str or bytes or Callable):
A statement which should evaluate to a boolean value.
Only annotations for which this predicate is true will
be returned. Defaults to None (assume always true). This
may be a string, callable, or pickled function as bytes.
Callables are called to filter each result returned
from the annotation store backend in python before being
returned to the user. A pickle object is, where
possible, hooked into the backend as a user defined
function to filter results during the backend query.
Strings are expected to be in a domain specific language
and are converted to SQL on a best-effort basis. For
supported operators of the DSL see
:mod:`tiatoolbox.annotation.dsl`. E.g. a simple python
expression `props["class"] == 42` will be converted to a
valid SQLite predicate when using `SQLiteStore` and
inserted into the SQL query. This should be faster than
filtering in python after or during the query.
Additionally, the same string can be used across
different backends (e.g. the previous example predicate
string is valid for both `DictionaryStore `and a
`SQliteStore`). On the other hand it has many more
limitations. It is important to note that untrusted user
input should never be accepted to this argument as
arbitrary code can be run via pickle or the parsing of
the string statement.
geometry_predicate:
A string which define which binary geometry predicate to
use when comparing the query geometry and a geometry in
the store. Only annotations for which this binary
predicate is true will be returned. Defaults to
"intersects". For more information see the `shapely
documentation on binary predicates <https://shapely.
readthedocs.io/en/stable/manual.html#binary-predicates>`_.
Returns:
list:
A list of keys for each Annotation.
"""
if geometry_predicate not in self._geometry_predicate_names:
raise ValueError(
"Invalid geometry predicate."
f"Allowed values are: {', '.join(self._geometry_predicate_names)}."
)
query_geometry = geometry
if isinstance(query_geometry, Iterable):
query_geometry = Polygon.from_bounds(*query_geometry)
return [
key
for key, annotation in self.items()
if (
self._geometry_predicate(
geometry_predicate, query_geometry, annotation.geometry
)
and self._eval_where(where, annotation.properties)
)
]
def bquery(
self,
geometry: Optional[QueryGeometry] = None,
where: Predicate = None,
) -> Dict[str, Tuple[float, float, float, float]]:
"""Query the store for annotation bounding boxes.
Acts similarly to `AnnotationStore.query` except it checks for
intersection between stored and query geometry bounding boxes.
This may be faster than a regular query in some cases, e.g. for
SQliteStore with a large number of annotations.
Note that this method only checks for bounding box intersection
and therefore may give a different result to using
`AnnotationStore.query` with a box polygon and the "intersects"
geometry predicate. Also note that geometry predicates are not
supported for this method.
Args:
geometry (Geometry or Iterable):
Geometry to use when querying. This can be a bounds
(iterable of length 4) or a Shapely geometry (e.g.
Polygon). If a geometry is provided, the bounds of the
geometry will be used for the query. Full geometry
intersection is not used for the query method.
where (str or bytes or Callable):
A statement which should evaluate to a boolean value.
Only annotations for which this predicate is true will
be returned. Defaults to None (assume always true). This
may be a string, callable, or pickled function as bytes.
Callables are called to filter each result returned
from the annotation store backend in python before being
returned to the user. A pickle object is, where
possible, hooked into the backend as a user defined
function to filter results during the backend query.
Strings are expected to be in a domain specific language
and are converted to SQL on a best-effort basis. For
supported operators of the DSL see
:mod:`tiatoolbox.annotation.dsl`. E.g. a simple python
expression `props["class"] == 42` will be converted to a
valid SQLite predicate when using `SQLiteStore` and
inserted into the SQL query. This should be faster than
filtering in python after or during the query.
Additionally, the same string can be used across
different backends (e.g. the previous example predicate
string is valid for both `DictionaryStore` and a
`SQliteStore`). On the other hand it has many more
limitations. It is important to note that untrusted user
input should never be accepted to this argument as
arbitrary code can be run via pickle or the parsing of
the string statement.
Returns:
list:
A list of bounding boxes for each Annotation.
Example:
>>> from tiatoolbox.annotation.storage import DictionaryStore
>>> from shapely.geometry import Polygon
>>> store = DictionaryStore()
>>> store.append(
... Annotation(
... geometry=Polygon.from_bounds(0, 0, 1, 1),
... properties={"class": 42},
... ),
... key="foo",
... )
>>> store.bquery(where="props['class'] == 42")
{'foo': (0.0, 0.0, 1.0, 1.0)}
"""
query_geometry = geometry
if isinstance(query_geometry, Iterable):
query_geometry = Polygon.from_bounds(*query_geometry)
return {
key: annotation.geometry.bounds
for key, annotation in self.items()
if (
query_geometry is None
or Polygon.from_bounds(*annotation.geometry.bounds).intersects(
Polygon.from_bounds(*query_geometry.bounds)
)
and self._eval_where(where, annotation.properties)
)
}
def pquery(
self,
select: Select,
geometry: Optional[QueryGeometry] = None,
where: Optional[Predicate] = None,
unique: bool = True,
squeeze: bool = True,
) -> Union[Dict[str, Any], Set[Any]]:
"""Query the store for annotation properties.
Acts similarly to `AnnotationStore.query` but returns only the
value defined by `select`.
Args:
select (str or bytes or Callable):
A statement defining the value to look up from the
annotation properties. If `select = "*"`, all properties
are returned for each annotation (`unique` must be
False).
geometry (Geometry or Iterable):
Geometry to use when querying. This can be a bounds
(iterable of length 4) or a Shapely geometry (e.g.
Polygon). If a geometry is provided, the bounds of the
geometry will be used for the query. Full geometry
intersection is not used for the query method.
where (str or bytes or Callable):
A statement which should evaluate to a boolean value.
Only annotations for which this predicate is true will
be returned. Defaults to None (assume always true). This
may be a string, callable, or pickled function as bytes.
Callables are called to filter each result returned the
from annotation store backend in python before being
returned to the user. A pickle object is, where
possible, hooked into the backend as a user defined
function to filter results during the backend query.
Strings are expected to be in a domain specific language
and are converted to SQL on a best-effort basis. For
supported operators of the DSL see
:mod:`tiatoolbox.annotation.dsl`. E.g. a simple python
expression `props["class"] == 42` will be converted to a
valid SQLite predicate when using `SQLiteStore` and
inserted into the SQL query. This should be faster than
filtering in python after or during the query. It is
important to note that untrusted user input should never
be accepted to this argument as arbitrary code can be
run via pickle or the parsing of the string statement.
unique (bool):
If True, only unique values for each selected property
will be returned as a list of sets. If False, all values
will be returned as a dictionary mapping keys values.
Defaults to True.
squeeze (bool):
If True, when querying for a single value with
`unique=True`, the result will be a single set instead
of a list of sets.
Examples:
>>> from tiatoolbox.annotation.storage import DictionaryStore
>>> from shapely.geometry import Point
>>> store = DictionaryStore()
>>> annotation = Annotation(
... geometry=Point(0, 0),
... properties={"class": 42},
... )
>>> store.append(annotation, "foo")
>>> store.pquery("*", unique=False)
... {'foo': {'class': 42}}
>>> from tiatoolbox.annotation.storage import DictionaryStore
>>> from shapely.geometry import Point
>>> store = DictionaryStore()
>>> annotation = Annotation(
... geometry=Point(0, 0),
... properties={"class": 42},
... )
>>> store.append(annotation, "foo")
>>> store.pquery("props['class']")
... {42}
>>> annotation = Annotation(Point(1, 1), {"class": 123})
>>> store.append(annotation, "foo")
>>> store.pquery("props['class']")
... {42, 123}
""" # noqa
if where is not None and type(select) is not type(where):
raise TypeError("select and where must be of the same type")
if not isinstance(select, (str, bytes)) and not callable(select):
raise TypeError(
f"select must be str, bytes, or callable, not {type(select)}"
)
# Are we scanning through all annotations?
is_scan = not any((geometry, where))
items = self.items() if is_scan else self.query(geometry, where).items()
def select_values(
select: Select, annotation: Annotation
) -> Union[Properties, Any, Tuple[Any, ...]]:
"""Get the value(s) to return from an annotation via a select.
Args:
select (str or bytes or Callable):
A statement defining the value to look up from the
annotation properties. If `select = "*"`, all properties
are returned for each annotation (`unique` must be
False).
annotation (Annotation):
The annotation to get the value(s) from.
Raises:
ValueError:
If arguments have incompatible values.
Returns:
Union[Properties, Any, Tuple[Any, ...]]:
The value(s) to return from the annotation. This
will be a dictionary if unique is False. Otherwise,
it will be a list of sets. If squeeze and unique are
True in addtion to there only being one set in the
results list, the result will be a single set.
""" # noqa Q440, Q441
if select == "*" and unique:
raise ValueError("unique=True cannot be used with select='*'")
if select == "*": # Special case for all properties
return annotation.properties
if isinstance(select, str):
py_locals = {"props": annotation.properties}
return eval(select, PY_GLOBALS, py_locals) # skipcq: PYL-W0123
if isinstance(select, bytes):
return pickle.loads(select)(annotation.properties) # skipcq: BAN-B301
return select(annotation.properties)
return self._handle_pquery_results(
select, unique, squeeze, items, select_values
)
def nquery(
self,
geometry: Optional[Geometry] = None,
where: Optional[Predicate] = None,
n_where: Optional[Predicate] = None,
distance: float = 5.0,
geometry_predicate: str = "intersects",
mode: str = "poly-poly",
) -> Dict[str, Dict[str, Annotation]]:
"""Query for annotations within a distance of another annotation.
Args:
geometry (Geometry):
A geometry to use to query for the initial set of
annotations to perform a neighbourhood search around. If
None, all annotations in the store are considered.
Defaults to None.
where (str or bytes or Callable):
A statement which should evaluate to a boolean value.
Only annotations for which this predicate is true will be
returned. Defaults to None (assume always true). This may
be a string, callable, or pickled function as bytes.
Callables are called to filter each result returned the
annotation store backend in python before being returned
to the user. A pickle object is, where possible, hooked
into the backend as a user defined function to filter
results during the backend query. Strings are expected to
be in a domain specific language and are converted to SQL
on a best-effort basis. For supported operators of the DSL
see :mod:`tiatoolbox.annotation.dsl`. E.g. a simple python
expression `props["class"] == 42` will be converted to a
valid SQLite predicate when using `SQLiteStore` and
inserted into the SQL query. This should be faster than
filtering in python after or during the query. It is
important to note that untrusted user input should never
be accepted to this argument as arbitrary code can be
run via pickle or the parsing of the string statement.
n_where (str or bytes or Callable):
Predicate to filter the nearest annotations by. Defaults
to None (assume always true). See `where` for more
details.
distance (float):
The distance to search for annotations within. Defaults to
5.0.
geometry_predicate (str):
The predicate to use when comparing geometries. Defaults
to "intersects". Other options include "within" and
"contains". Ignored if `mode` is "boxpoint-boxpoint" or
"box-box".
mode (tuple[str, str] or str):
The method to use for determining distance during the
query. Defaults to "box-box". This may significantly
change performance depending on the backend. Possible
options are:
- "poly-poly": Polygon boundary to polygon boundary.
- "boxpoint-boxpoint": Bounding box centre point to
bounding box centre point.
- "box-box": Bounding box to bounding box.
May be specified as a dash separated string or a tuple
of two strings. The first string is the mode for the
query geometry and the second string is the mode for
the nearest annotation geometry.
Returns:
Dict[str, Dict[str, Annotation]]:
A dictionary mapping annotation keys to another
dictionary which represents an annotation key and all
annotations within `distance` of it.
The `mode` argument is used to determine how to calculate the
distance between annotations. The default mode is "box-box".
The "box-box" mode uses the bounding boxes of stored annotations
and the query geometry when determining if annotations are
within the neighbourhood.
.. figure:: ../images/nquery-box-box.png
:width: 512
:alt: "box-box" mode
The "poly-poly" performs full polygon-polygon intersection with
the polygon boundary of stored annotations and the query
geometry to determine if annotations are within the
neighbourhood.
.. figure:: ../images/nquery-poly-poly.png
:width: 512
:alt: "poly-poly" mode
The "boxpoint-boxpoint" mode uses the centre point of the
bounding box of stored annotations and the query geometry when
determining if annotations are within the neighbourhood.
.. figure:: ../images/nquery-boxpoint-boxpoint.png
:width: 512
:alt: "boxpoint-boxpoint" mode
Examples:
Example bounding box query with one neighbour within a
distance of 2.0.
>>> from shapely.geometry import Point, Polyon
>>> from tiatoolbox.annotation.storage import Annotation, SQLiteStore
>>> store = SQLiteStore()
>>> annotation = Annotation(Point(0, 0), {"class": 42})
>>> store.append(annotation, "foo")
>>> neighbour = Annotation(Point(1, 1), {"class": 123})
>>> store.add(neighbour, "bar")
>>> store.nquery((-.5, -.5, .5, .5), distance=2.0)
{
"foo": {
Annotation(POINT (0 0), {'class': 42}): {
"bar": Annotation(POINT (1 1), {'class': 123}),
}
},
}
Example bounding box query with no neighbours within a
distance of 1.0.
>>> from shapely.geometry import Point
>>> from tiatoolbox.annotation.storage import Annotation, SQLiteStore
>>> store = SQLiteStore()
>>> annotation = Annotation(Point(0, 0), {"class": 42})
>>> store.add(annotation, "foo")
>>> store.nquery((-.5, -.5, .5, .5), distance=1.0)
{"foo": {Annotation(POINT (0 0), {'class': 42}): {}}}
Example of querying for TILs - lympocytes within 3 units
of tumour cells.
>>> from tiatoolbox.annotation.storage import SQLiteStore
>>> store = SQLiteStore("hovernet-pannuke-output.db")
>>> tils = store.nquery(
... where="props['class'] == 1", # Tumour cells
... n_where="props['class'] == 0", # Lymphocytes
... distance=32.0, # n_where within 32 units of where
... mode="point-point", # Use point to point distance
... )
"""
# This is a naive generic implementation which can be overridden
# by back ends which can do this more efficiently.
if not isinstance(mode, (str, tuple)):
raise TypeError("mode must be a string or tuple of strings")
if isinstance(mode, str):
mode = tuple(mode.split("-"))
if mode not in (("box", "box"), ("boxpoint", "boxpoint"), ("poly", "poly")):
raise ValueError(
"mode must be one of 'box-box', 'boxpoint-boxpoint', or 'poly-poly'"
)
from_mode, _ = mode
# Initial selection of annotations to query around
selection = self.query(
geometry=geometry,
where=where,
)
# Query for others within the distance of initial selection
result = {}
for key, ann in selection.items():
geometry = ann.geometry
if from_mode == "box":
geometry_predicate = "bbox_intersects"
min_x, min_y, max_x, max_y = ann.geometry.bounds
geometry = Polygon.from_bounds(
min_x - distance,
min_y - distance,
max_x + distance,
max_y + distance,
)
elif from_mode == "boxpoint":
geometry_predicate = "centers_within_k"
elif from_mode == "poly": # pragma: no branch
geometry = ann.geometry
geometry = geometry.buffer(distance)
subquery_result = self.query(
geometry=geometry,
where=n_where,
geometry_predicate=geometry_predicate,
distance=distance,
)
if subquery_result:
result[key] = subquery_result
return result
@staticmethod
def _handle_pquery_results(
select: Select,
unique: bool,
squeeze: bool,
items: Generator[Tuple[str, Properties], None, None],
get_values: Callable[
[Select, Annotation], Union[Properties, Any, Tuple[Any, ...]]
],
):
"""Package the results of a pquery into the right output format.
Args:
select (str or bytes or Callable):
A statement defining the value to look up from the
annotation properties. If `select = "*"`, all properties
are returned for each annotation (`unique` must be
False).
unique (bool):
If True, only unique values for each selected property
will be returned as a list of sets. If False, all values
will be returned as a dictionary mapping keys values.
Defaults to True.
squeeze (bool):
If True, when querying for a single value with
`unique=True`, the result will be a single set instead
of a list of sets.
items (Dict[str, Properties]):
A dictionary mapping annotation keys/IDs to annotation
properties.
get_values (Callable):
A function to get the values to return from an
annotation via a select.
""" # noqa Q440, Q441
result = defaultdict(set) if unique else {}
for key, annotation in items:
values = get_values(select, annotation)
if unique:
# Wrap scalar values in a tuple
if not isinstance(values, tuple):
values = (values,)
# Add each value to the result set
for i, value in enumerate(values):
result[i].add(value)
else:
result[key] = values
if unique:
result = list(result.values())
if unique and squeeze and len(result) == 1:
result = result[0]
return result # noqa CCR001
def features(self) -> Generator[Dict[str, Any], None, None]:
"""Return annotations as a list of geoJSON features.
Returns:
list:
List of features as dictionaries.
"""
for a in self.values():
yield a.to_feature()
def to_geodict(self) -> Dict[str, Any]:
"""Return annotations as a dictionary in geoJSON format.
Returns:
dict:
Dictionary of annotations in geoJSON format.
"""
return {
"type": "FeatureCollection",
"features": list(self.features()),
}
@staticmethod
def _dump_cases(
fp: Union[IO, str, Path, None],
file_fn: Callable[[IO], None],
none_fn: Callable[[], Union[str, bytes]],
) -> Optional[Union[str, bytes]]:
"""Helper function to handle cases for dumping.
Args:
fp:
The file path or handle to dump to.
file_fn(Callable):
The function to call when fp is a file handle.
none_fn(Callable):
The function to call when fp is None.
Returns:
Any:
The result of dump. Depends on the provided functions.
"""
if fp is not None:
# It is a file-like object, write to it
if hasattr(fp, "write"):
return file_fn(fp)
# Turn a path into a file handle, then write to it
with open(fp, "w", encoding="utf-8") as file_handle:
return file_fn(file_handle)
# Return as str or bytes if no handle/path is given
return none_fn()
@staticmethod
def _load_cases(
fp: Union[IO, str, Path],
string_fn: Callable[[Union[str, bytes]], Any],
file_fn: Callable[[IO], Any],
) -> Any:
"""Loads cases for an input file handle or path."""
with contextlib.suppress(OSError):
if isinstance(fp, (Path, str)) and Path(fp).exists():
with open(fp) as file_handle:
return file_fn(file_handle)
if isinstance(fp, (str, bytes)):
return string_fn(fp)
if hasattr(fp, "read"):
return file_fn(fp)
raise IOError("Invalid file handle or path.")
@classmethod
def from_geojson(
cls,
fp: Union[IO, str],
scale_factor: Tuple[float, float] = (1, 1),
origin: Tuple[float, float] = (0, 0),
) -> "AnnotationStore":
"""Create a new database with annotations loaded from a geoJSON file.
Args:
fp (Union[IO, str, Path]):
The file path or handle to load from.
scale_factor (Tuple[float, float]):
The scale factor in each dimension to use when loading the annotations.
All coordinates will be multiplied by this factor to allow import of
annotations saved at non-baseline resolution.
origin (Tuple[float, float]):
The x and y coordinates to use as the origin for the annotations.
Returns:
AnnotationStore:
A new annotation store with the annotations loaded from the file.
"""
store = cls()
store.add_from_geojson(fp, scale_factor, origin=origin)
return store
def add_from_geojson(
self,
fp: Union[IO, str],
scale_factor: Tuple[float, float] = (1, 1),
origin: Tuple[float, float] = (0, 0),
) -> None:
"""Add annotations from a .geojson file to an existing store. Make
the best effort to create valid shapely geometries from provided contours.
Args:
fp (Union[IO, str, Path]):
The file path or handle to load from.
scale_factor (float):
The scale factor to use when loading the annotations. All coordinates
will be multiplied by this factor to allow import of annotations saved
at non-baseline resolution.
origin [float, float]:
The x and y coordinates to use as the origin for the annotations.
"""
def transform_geometry(geom):
"""Helper function to transform a geometry if needed."""
if origin != (0, 0):
# transform coords to be relative to given origin.
geom = translate(geom, -origin[0], -origin[1])
if scale_factor != (1, 1):
geom = scale(
geom,
xfact=scale_factor[0],
yfact=scale_factor[1],
origin=(0, 0, 0),
)
return geom
geojson = self._load_cases(
fp=fp,
string_fn=json.loads,
file_fn=json.load,
)
annotations = [
Annotation(
transform_geometry(
feature2geometry(feature["geometry"]),
),
feature["properties"],
)
for feature in geojson["features"]
]
print(f"added {len(annotations)} annotations")
self.append_many(annotations)
def to_geojson(self, fp: Optional[Union[IO, str, Path]] = None) -> Optional[str]:
"""Serialise the store to geoJSON.
For more information on the geoJSON format see:
- https://geojson.org/
- https://tools.ietf.org/html/rfc7946
Args:
fp (IO):
A file-like object supporting `.read`. Defaults to None
which returns geoJSON as a string.
Returns:
Optional[str]:
None if writing to file or the geoJSON string if `fp` is
None.
"""
def write_geojson_to_file_handle(file_handle: IO):
"""Write the store to a GeoJson file give a handle.
This replaces the naive method which uses a lot of memory::
json.dump(self.to_geodict(), file_handle)
"""
# Write head
file_handle.write('{"type": "FeatureCollection", "features": [')
# Write each feature
for feature in self.features():
file_handle.write(json.dumps(feature)) # skipcq: PY-W0079
tell = file_handle.tell()
# Comma separate features
file_handle.write(",")
# Seek to before last comma
file_handle.seek(tell, os.SEEK_SET)
# Write tail
file_handle.write("]}")
return self._dump_cases(
fp=fp,
file_fn=write_geojson_to_file_handle,
none_fn=lambda: json.dumps(self.to_geodict()),
)
def to_ndjson(self, fp: Optional[IO] = None) -> Optional[str]:
"""Serialise to New Line Delimited JSON.
Each line contains a JSON object with the following format:
.. code-block:: json
{
"key": "...",
"geometry": {
"type": "...",
"coordinates": [...]
},
"properties": {
"...": "..."
}
}
That is a geoJSON object with an additional key field.
For more information on the NDJSON format see:
- ndjson Specification: http://ndjson.org
- JSON Lines Documentation: https://jsonlines.org
- Streaming JSON: https://w.wiki/4Qan
- GeoJSON RFC: https://tools.ietf.org/html/rfc7946
- JSON RFC: https://tools.ietf.org/html/rfc7159
Args:
fp (IO): A file-like object supporting `.read`. Defaults to
None which returns geoJSON as a string.
Returns:
Optional[str]:
None if writing to file or the geoJSON string if`fp` is
None.
"""
string_lines_generator = (
json.dumps({"key": key, **annotation.to_feature()}, separators=(",", ":"))
+ "\n"
for key, annotation in self.items()
)
return self._dump_cases(
fp=fp,
file_fn=lambda fp: fp.writelines(string_lines_generator),
none_fn=lambda: "".join(string_lines_generator),
)
@classmethod
def from_ndjson(cls, fp: Union[IO, str]) -> "AnnotationStore":
"""Load annotations from NDJSON.
Expects each line to be a JSON object with the following format:
.. code-block:: json
{
"key": "...",
"geometry": {
"type": "...",
"coordinates": [...]
},
"properties": {
"...": "..."
}
}
That is a geoJSON object with an additional key field. If this key
field is missing, then a new UUID4 key will be generated for this
annotation.
Args:
fp (IO): A file-like object supporting `.read`.
Returns:
AnnotationStore:
The loaded annotations.
"""
store = cls()
for line in cls._load_cases(
fp=fp,
string_fn=lambda fp: fp.splitlines(),
file_fn=lambda fp: fp.readlines(),
):
dictionary = json.loads(line)
key = dictionary.get("key", uuid.uuid4().hex)
geometry = feature2geometry(dictionary["geometry"])
properties = dictionary["properties"]
store.append(Annotation(geometry, properties), key)
return store
@classmethod
def from_dataframe(cls, df: pd.DataFrame) -> "AnnotationStore":
"""Converts to AnnotationStore from :class:`pandas.DataFrame`."""
store = cls()
for key, row in df.iterrows():
geometry = row["geometry"]
properties = dict(row.filter(regex="^(?!geometry|key).*$"))
store.append(Annotation(geometry, properties), str(key))
return store
def to_dataframe(self) -> pd.DataFrame:
"""Converts AnnotationStore to :class:`pandas.DataFrame`."""
features = (
{
"geometry": annotation.geometry,
"properties": annotation.properties,
"key": key,
}
for key, annotation in self.items()
)
return pd.json_normalize(features).set_index("key")
def transform(self, transform: Callable[[Geometry], Geometry]) -> None:
"""Transform all annotations in the store using provided function.
Useful for transforming coordinates from slide space into
patch/tile/core space, or to a different resolution, for example.
Args:
transform (callable[Geometry, Geometry]):
A function that takes a geometry and returns a new
transformed geometry.
"""
transformed_geoms = {
key: transform(annotation.geometry) for key, annotation in self.items()
}
self.patch_many(transformed_geoms.keys(), transformed_geoms.values())
def __del__(self) -> None:
self.close()
def clear(self) -> None:
"""Remove all annotations from the store.
This is a naive implementation, it simply iterates over all annotations
and removes them. Faster implementations may be possible in specific
cases and may be implemented by subclasses.
"""
for key in list(self.keys()):
del self[key]
class SQLiteMetadata(MutableMapping):
"""Metadata storage for an SQLiteStore.
Attributes:
connection (Union[str, Path, IO]):
A reference to where the data is stored. It maybe a string (
e.g. ":memory:" or "./data.db"), a pathlib Path, or a file
handle.
path (Path):
The path to the annotation store data. This will be
":memory:" if the annotation store is in-memory. This is
derived from `connection` and normalised to be a pathlib
Path object.
con (sqlite3.Connection):
The sqlite3 database connection.
"""
def __init__(self, con: sqlite3.Connection) -> None:
self.con = con
self.con.execute(
"CREATE TABLE IF NOT EXISTS metadata (key TEXT UNIQUE, value TEXT)"
)
self.con.commit()
def __contains__(self, key: str) -> bool:
cursor = self.con.execute("SELECT 1 FROM metadata WHERE [key] = ?", (key,))
return cursor.fetchone() is not None
def __setitem__(self, key: str, value: Union[dict, list, int, float, str]) -> None:
"""Set a metadata value."""
value = json.dumps(value)
self.con.execute(
"REPLACE INTO metadata (key, value) VALUES (?,?)", (key, value)
)
self.con.commit()
def __getitem__(self, key: str) -> Union[dict, list, int, float, str]:
"""Get a metadata value."""
cursor = self.con.execute("SELECT value FROM metadata WHERE [key] = ?", (key,))
result = cursor.fetchone()
if result is None:
raise KeyError(key)
return json.loads(result[0])
def __delitem__(self, key: str) -> None:
"""Delete a metadata value."""
if key not in self:
raise KeyError(key)
self.con.execute("DELETE FROM metadata WHERE [key] = ?", (key,))
def __iter__(self) -> Iterator[str]:
"""Iterate over all keys."""
cursor = self.con.execute("SELECT [key] FROM metadata")
for row in cursor:
yield row[0]
def __len__(self) -> int:
"""Return the number of metadata entries."""
cursor = self.con.execute("SELECT COUNT(*) FROM metadata")
return cursor.fetchone()[0]
class SQLiteStore(AnnotationStore):
"""SQLite backed annotation store.
Uses and rtree index for fast spatial queries.
Version History:
1.0.0:
Initial version.
1.0.1 (07/10/2022):
Added optional "area" column and queries sorted/filtered by area.
"""
@classmethod # noqa: A003
def open(cls, fp: Union[Path, str]) -> "SQLiteStore": # noqa: A003
"""Opens :class:`SQLiteStore` from file pointer or path."""
return SQLiteStore(fp)
def __init__(
self,
connection: Union[Path, str, IO] = ":memory:",
compression: str = "zlib",
compression_level: int = 9,
auto_commit: bool = True,
) -> None:
super().__init__()
# Check that JSON and RTree support is enabled
compile_options = self.compile_options()
if sqlite3.sqlite_version_info >= (3, 38, 0):
if not all(
["OMIT_JSON" not in compile_options, "ENABLE_RTREE" in compile_options]
):
raise EnvironmentError(
"""RTREE sqlite3 compile option is required, and
JSON must not be disabled with OMIT_JSON compile option"""
)
else:
if not all(
["ENABLE_JSON1" in compile_options, "ENABLE_RTREE" in compile_options]
):
raise EnvironmentError(
"RTREE and JSON1 sqlite3 compile options are required."
)
# Check that math functions are enabled
if "ENABLE_MATH_FUNCTIONS" not in compile_options:
logger.warning(
"SQLite math functions are not enabled."
" This may cause problems with some queries."
" For example, floor division (//) will not work."
" For a full list see https://tia-toolbox.readthedocs.io/"
"en/v%s/_autosummary/tiatoolbox.annotation.dsl.html",
tiatoolbox.__version__,
)
# Set up database connection and cursor
self.connection = connection
self.path = self._connection_to_path(self.connection)
self.auto_commit = auto_commit
# Check if the path is a non-empty file
exists = (
# Use 'and' to short-circuit
self.path.is_file()
and self.path.stat().st_size > 0
)
self.con = sqlite3.connect(str(self.path), isolation_level="DEFERRED")
self.con.execute("BEGIN")
# Set up metadata
self.metadata = SQLiteMetadata(self.con)
if not exists:
self.metadata["version"] = "1.0.1"
self.metadata["compression"] = compression
self.metadata["compression_level"] = compression_level
# store locally as constantly fetching from db in (de)serialization is slow
self.compression = self.metadata["compression"]
self.compression_level = self.metadata["compression_level"]
# Register predicate functions as custom SQLite functions
def wkb_predicate(
name: str, wkb_a: bytes, b: bytes, cx: float, cy: float
) -> bool:
"""Wrapper function to allow WKB as inputs to binary predicates."""
a = wkb.loads(wkb_a)
b = self._unpack_geometry(b, cx, cy)
return self._geometry_predicate(name, a, b)
def pickle_expression(pickle_bytes: bytes, properties: str) -> bool:
"""Function to load and execute pickle bytes with a "properties" dict."""
fn = pickle.loads(pickle_bytes) # skipcq: BAN-B301
properties = json.loads(properties)
return fn(properties)
def get_area(wkb_bytes: bytes, cx: float, cy: float) -> float:
"""Function to get the area of a geometry."""
return self._unpack_geometry(
wkb_bytes,
cx,
cy,
).area
# Register custom functions
def register_custom_function(
name: str, nargs: int, fn: Callable, deterministic: bool = False
) -> None:
"""Register a custom SQLite function.
Only Python >= 3.8 supports deterministic functions,
fallback to without this argument if not available.
Args:
name:
The name of the function.
nargs:
The number of arguments the function takes.
fn:
The function to register.
deterministic:
Whether the function is deterministic.
"""
try:
self.con.create_function(name, nargs, fn, deterministic=deterministic)
except TypeError:
self.con.create_function(name, nargs, fn)
register_custom_function(
"geometry_predicate", 5, wkb_predicate, deterministic=True
)
register_custom_function(
"pickle_expression", 2, pickle_expression, deterministic=True
)
register_custom_function("REGEXP", 2, py_regexp)
register_custom_function("REGEXP", 3, py_regexp)
register_custom_function("LISTSUM", 1, json_list_sum)
register_custom_function("CONTAINS", 1, json_contains)
register_custom_function("get_area", 3, get_area)
if exists:
self.table_columns = self._get_table_columns()
return
# Create tables for geometry and RTree index
self.con.execute(
"""
CREATE VIRTUAL TABLE rtree USING rtree(
id, -- Integer primary key
min_x, max_x, -- 1st dimension min, max
min_y, max_y -- 2nd dimension min, max
)
"""
)
self.con.execute(
"""
CREATE TABLE annotations(
id INTEGER PRIMARY KEY, -- Integer primary key
key TEXT UNIQUE, -- Unique identifier (UUID)
objtype TEXT, -- Object type
cx FLOAT NOT NULL, -- X of centroid/representative point
cy FLOAT NOT NULL, -- Y of centroid/representative point
geometry BLOB, -- Detailed geometry
properties TEXT, -- JSON properties
area FLOAT NOT NULL -- Area (for ordering)
)
"""
)
if self.auto_commit:
self.con.commit()
self.table_columns = self._get_table_columns()
def serialise_geometry( # skipcq: PYL-W0221
self, geometry: Geometry
) -> Union[str, bytes]:
"""Serialise a geometry to WKB with optional compression.
Converts shapely geometry objects to well-known binary (WKB) and
applies optional compression.
Args:
geometry(Geometry):
The Shapely geometry to be serialised.
Returns:
bytes or str:
The serialised geometry.
"""
data = geometry.wkb
if self.compression is None:
return data
if self.compression == "zlib":
return zlib.compress(data, level=self.compression_level)
raise ValueError("Unsupported compression method.")
def _unpack_geometry(
self, data: Union[str, bytes], cx: float, cy: float
) -> Geometry:
"""Return the geometry using WKB data and rtree bounds index.
For space optimisation, points are stored as centroids and all
other geometry types are stored as WKB. This function unpacks
the WKB data and uses the rtree index to find the centroid for
points where the data is null.
Args:
data(bytes or str):
The WKB/WKT data to be unpacked.
cx(int):
The X coordinate of the centroid/representative point.
cy(float):
The Y coordinate of the centroid/representative point.
Returns:
Geometry:
The Shapely geometry.
"""
return Point(cx, cy) if data is None else self.deserialize_geometry(data)
def deserialize_geometry( # skipcq: PYL-W0221
self,
data: Union[str, bytes],
) -> Geometry:
"""Deserialize a geometry from a string or bytes.
Args:
data(bytes or str):
The serialised representation of a Shapely geometry.
Returns:
Geometry:
The deserialized Shapely geometry.
"""
if self.compression == "zlib":
data = zlib.decompress(data)
elif self.compression is not None:
raise ValueError("Unsupported compression method.")
if isinstance(data, str):
return wkt.loads(data)
return wkb.loads(data)
@staticmethod
def compile_options() -> List[str]:
"""Get the list of options that sqlite3 was compiled with.
Example:
>>> for opt in SQLiteRTreeStore.compile_options():
>>> print(opt)
COMPILER=gcc-7.5.0
ENABLE_COLUMN_METADATA
ENABLE_DBSTAT_VTAB
ENABLE_FTS3
ENABLE_FTS3_PARENTHESIS
ENABLE_FTS3_TOKENIZER
ENABLE_FTS4
ENABLE_FTS5
ENABLE_JSON1
ENABLE_LOAD_EXTENSION
ENABLE_PREUPDATE_HOOK
ENABLE_RTREE
ENABLE_SESSION
ENABLE_STMTVTAB
ENABLE_UNLOCK_NOTIFY
ENABLE_UPDATE_DELETE_LIMIT
HAVE_ISNAN
LIKE_DOESNT_MATCH_BLOBS
MAX_SCHEMA_RETRY=25
MAX_VARIABLE_NUMBER=250000
OMIT_LOOKASIDE
SECURE_DELETE
SOUNDEX
TEMP_STORE=1
THREADSAFE=1
"""
with sqlite3.connect(":memory:") as conn:
conn.enable_load_extension(True)
options = conn.execute("pragma compile_options").fetchall()
return [opt for opt, in options]
def close(self) -> None:
"""Closes :class:`SQLiteStore` from file pointer or path."""
if self.auto_commit:
self.con.commit()
self.optimize(vacuum=False, limit=1000)
self.con.close()
def _make_token(self, annotation: Annotation, key: Optional[str]) -> Dict:
"""Create token data dict for tokenized SQL transaction."""
key = key or str(uuid.uuid4())
geometry = annotation.geometry
if geometry.geom_type == "Point":
serialised_geometry = None
else:
serialised_geometry = self.serialise_geometry(geometry)
return {
"key": key,
"geometry": serialised_geometry,
"cx": geometry.centroid.x,
"cy": geometry.centroid.y,
"min_x": geometry.bounds[0],
"min_y": geometry.bounds[1],
"max_x": geometry.bounds[2],
"max_y": geometry.bounds[3],
"geom_type": geometry.geom_type,
"properties": json.dumps(annotation.properties, separators=(",", ":")),
"area": geometry.area,
}
def append_many(
self,
annotations: Iterable[Annotation],
keys: Optional[Iterable[str]] = None,
) -> List[str]:
"""Appends new annotations to specified keys."""
annotations = list(annotations)
keys = list(keys) if keys else [str(uuid.uuid4()) for _ in annotations]
self._validate_equal_lengths(keys, annotations)
cur = self.con.cursor()
if self.auto_commit:
cur.execute("BEGIN")
result = []
for annotation, key in zip(annotations, keys):
self._append(key, annotation, cur)
result.append(key)
if self.auto_commit:
self.con.commit()
return result
def _append(self, key: str, annotation: Annotation, cur: sqlite3.Cursor) -> None:
"""Append without starting a transaction.
Args:
key(str):
The unique identifier (UUID) for the annotation.
annotation(Annotation):
The annotation to be appended.
cur(sqlite3.Cursor):
The cursor to use for the transaction.
"""
token = self._make_token(
annotation=annotation,
key=key,
)
cur.execute(
"""
INSERT INTO annotations VALUES(
NULL, :key, :geom_type,
:cx, :cy, :geometry, :properties, :area
)
""",
token,
)
row_id = cur.lastrowid
token.update({"row_id": row_id})
cur.execute(
"""
INSERT INTO rtree VALUES(
:row_id, :min_x, :max_x, :min_y, :max_y
)
""",
token,
)
@staticmethod
def _initialize_query_string_parameters(
query_geometry: Optional[Geometry],
query_parameters: Dict[str, Any],
geometry_predicate: Optional[str],
columns: str,
where: Union[bytes, str],
distance: float = 0,
) -> Tuple[str, Dict[str, Any]]:
"""Initialises the query string and parameters."""
query_string = (
"SELECT " # skipcq: BAN-B608
+ columns # skipcq: BAN-B608
+ """
FROM annotations, rtree
WHERE annotations.id == rtree.id
"""
)
# There is query geometry, add a simple rtree bounds check to
# rapidly narrow candidates down.
if query_geometry is not None:
# Add rtree index checks to the query.
# For special case of centers_within_k, Check for
# center of the annotation bounds within query geometry
# centroid + k.
if geometry_predicate == "centers_within_k":
# Use rtree index to check distance between points
query_string += (
"AND (POWER((:min_x + :max_x)/2 - (min_x + max_x)/2, 2) + "
" POWER((:min_y + :max_y)/2 - (min_y + max_y)/2, 2)) < :distance2 "
)
query_parameters["distance2"] = distance**2
# Otherwise, perform a regular bounding box intersection
else:
query_string += (
"AND max_x >= :min_x "
"AND min_x <= :max_x "
"AND max_y >= :min_y "
"AND min_y <= :max_y "
)
# Find the bounds of the geometry for the rtree index
min_x, min_y, max_x, max_y = query_geometry.bounds
# Update query parameters
query_parameters.update(
{
"min_x": min_x,
"max_x": max_x,
"min_y": min_y,
"max_y": max_y,
"geometry_predicate": geometry_predicate,
"query_geometry": query_geometry.wkb,
}
)
# The query is a full intersection check, not a simple bounds
# check only.
if geometry_predicate is not None and geometry_predicate not in (
"bbox_intersects",
"centers_within_k",
):
query_string += (
"\nAND geometry_predicate("
":geometry_predicate, :query_geometry, geometry, cx, cy"
") "
)
query_parameters["geometry_predicate"] = geometry_predicate
query_parameters["query_geometry"] = query_geometry.wkb
# Predicate is pickled function
if isinstance(where, bytes):
query_string += "\nAND pickle_expression(:where, properties)"
query_parameters["where"] = where
# Predicate is a string
if isinstance(where, str):
sql_predicate = eval(where, SQL_GLOBALS, {}) # skipcq: PYL-W0123
query_string += f" AND {sql_predicate}"
return query_string, query_parameters
def _query(
self,
columns: str,
geometry: Optional[Geometry] = None,
callable_columns: Optional[str] = None,
geometry_predicate="intersects",
where: Optional[Predicate] = None,
unique: bool = False,
no_constraints_ok: bool = False,
index_warning: bool = False,
min_area=None,
distance: float = 0,
) -> sqlite3.Cursor:
"""Common query construction logic for `query` and `iquery`.
Args:
columns(str):
The columns to select.
geometry(tuple or Geometry):
The geometry being queried against.
select_callable(str):
The rows to select when a callable is given to `where`.
callable_columns(str):
The columns to select when a callable is given to
`where`.
where (str or bytes or Callable):
The predicate to evaluate against candidate properties
during the query.
unique(bool):
Whether to return only unique results. Defaults to
False.
no_constraints_ok(bool):
Whether to allow the query to return results without
constraints (e.g. when the geometry or where predicate
is not provided). Defaults to False.
index_warning(bool):
Whether to warn if the query is not using an index.
Defaults to False.
distance (float):
Distance used when performing a distance based query.
E.g. "centers_within_k" geometry predicate.
Returns:
sqlite3.Cursor:
A database cursor for the current query.
"""
if not no_constraints_ok and all(x is None for x in (geometry, where)):
raise ValueError("At least one of `geometry` or `where` must be specified.")
query_geometry = geometry
if callable_columns is None:
callable_columns = columns
if geometry_predicate not in self._geometry_predicate_names:
raise ValueError(
"Invalid geometry predicate."
f"Allowed values are: {', '.join(self._geometry_predicate_names)}."
)
cur = self.con.cursor()
# Normalise query geometry and determine if it is a rectangle
if isinstance(query_geometry, Iterable):
query_geometry = Polygon.from_bounds(*query_geometry)
if isinstance(where, Callable):
columns = callable_columns
query_parameters = {}
query_string, query_parameters = self._initialize_query_string_parameters(
query_geometry,
query_parameters,
geometry_predicate,
columns,
where,
distance=distance,
)
if min_area is not None and "area" in self.table_columns:
query_string += f"\nAND area > {min_area}"
elif min_area is not None:
raise ValueError(
"""Cannot use `min_area` without an area column.
SQLiteStore.add_area_column() can be used to add an area column."""
)
if unique:
query_string = query_string.replace("SELECT", "SELECT DISTINCT")
# Warn if the query is not using an index
if index_warning:
query_plan = cur.execute(
"EXPLAIN QUERY PLAN " + query_string, query_parameters
).fetchone()
if "USING INDEX" not in query_plan[-1]:
logger.warning(
"Query is not using an index. "
"Consider adding an index to improve performance.",
stacklevel=2,
)
# if area column exists, sort annotations by area
if "area" in self.table_columns:
query_string += "\nORDER BY area DESC"
cur.execute(query_string, query_parameters)
return cur
def iquery(
self,
geometry: Optional[QueryGeometry] = None,
where: Optional[Predicate] = None,
geometry_predicate="intersects",
min_area=None,
distance: float = 0,
) -> List[str]:
"""Query the store for annotation keys.
Acts the same as `AnnotationStore.query` except returns keys
instead of annotations.
Args:
geometry (Geometry or Iterable):
Geometry to use when querying. This can be a bounds
(iterable of length 4) or a Shapely geometry (e.g.
Polygon).
where (str or bytes or Callable):
A statement which should evaluate to a boolean value.
Only annotations for which this predicate is true will
be returned. Defaults to None (assume always true). This
may be a string, callable, or pickled function as bytes.
Callables are called to filter each result returned
from the annotation store backend in python before being
returned to the user. A pickle object is, where
possible, hooked into the backend as a user defined
function to filter results during the backend query.
Strings are expected to be in a domain specific language
and are converted to SQL on a best-effort basis. For
supported operators of the DSL see
:mod:`tiatoolbox.annotation.dsl`. E.g. a simple python
expression `props["class"] == 42` will be converted to a
valid SQLite predicate when using `SQLiteStore` and
inserted into the SQL query. This should be faster than
filtering in python after or during the query.
Additionally, the same string can be used across
different backends (e.g. the previous example predicate
string is valid for both `DictionaryStore `and a
`SQliteStore`). On the other hand it has many more
limitations. It is important to note that untrusted user
input should never be accepted to this argument as
arbitrary code can be run via pickle or the parsing of
the string statement.
geometry_predicate:
A string which define which binary geometry predicate to
use when comparing the query geometry and a geometry in
the store. Only annotations for which this binary
predicate is true will be returned. Defaults to
"intersects". For more information see the `shapely
documentation on binary predicates <https://shapely.
readthedocs.io/en/stable/manual.html#binary-predicates>`_.
distance (float):
Distance used when performing a distance based query.
E.g. "centers_within_k" geometry predicate.
Returns:
list:
A list of keys for each Annotation.
"""
query_geometry = geometry
cur = self._query(
"[key]",
geometry=query_geometry,
geometry_predicate=geometry_predicate,
where=where,
callable_columns="[key], properties",
min_area=min_area,
distance=distance,
)
if isinstance(where, Callable):
return [
key
for key, properties in cur.fetchall()
if where(json.loads(properties))
]
return [key for key, in cur.fetchall()]
def query(
self,
geometry: Optional[QueryGeometry] = None,
where: Optional[Predicate] = None,
geometry_predicate: str = "intersects",
min_area=None,
distance: float = 0,
) -> Dict[str, Annotation]:
"""Runs Query."""
query_geometry = geometry
cur = self._query(
columns="[key], properties, cx, cy, geometry",
geometry=query_geometry,
geometry_predicate=geometry_predicate,
where=where,
min_area=min_area,
distance=distance,
)
if isinstance(where, Callable):
return {
key: Annotation(
geometry=self._unpack_geometry(blob, cx, cy),
properties=json.loads(properties),
)
for key, properties, cx, cy, blob in cur.fetchall()
if where(json.loads(properties))
}
return {
key: Annotation(
geometry=self._unpack_geometry(blob, cx, cy),
properties=json.loads(properties),
)
for key, properties, cx, cy, blob in cur.fetchall()
}
def bquery(
self,
geometry: Optional[QueryGeometry] = None,
where: Union[str, bytes, Callable[[Geometry, Dict[str, Any]], bool]] = None,
min_area=None,
) -> Dict[str, Tuple[float, float, float, float]]:
"""Query the store for annotation bounding boxes.
Acts similarly to `AnnotationStore.query` except it checks for
intersection between stored and query geometry bounding boxes.
This may be faster than a regular query in some cases, e.g. for
SQliteStore with a large number of annotations.
Note that this method only checks for bounding box intersection
and therefore may give a different result to using
`AnnotationStore.query` with a box polygon and the "intersects"
geometry predicate. Also note that geometry predicates are not
supported for this method.
Args:
geometry (Geometry or Iterable):
Geometry to use when querying. This can be a bounds
(iterable of length 4) or a Shapely geometry (e.g.
Polygon). If a geometry is provided, the bounds of the
geometry will be used for the query. Full geometry
intersection is not used for the query method.
where (str or bytes or Callable):
A statement which should evaluate to a boolean value.
Only annotations for which this predicate is true will
be returned. Defaults to None (assume always true). This
may be a string, callable, or pickled function as bytes.
Callables are called to filter each result returned
from the annotation store backend in python before being
returned to the user. A pickle object is, where
possible, hooked into the backend as a user defined
function to filter results during the backend query.
Strings are expected to be in a domain specific language
and are converted to SQL on a best-effort basis. For
supported operators of the DSL see
:mod:`tiatoolbox.annotation.dsl`. E.g. a simple python
expression `props["class"] == 42` will be converted to a
valid SQLite predicate when using `SQLiteStore` and
inserted into the SQL query. This should be faster than
filtering in python after or during the query.
Additionally, the same string can be used across
different backends (e.g. the previous example predicate
string is valid for both `DictionaryStore` and a
`SQliteStore`). On the other hand it has many more
limitations. It is important to note that untrusted user
input should never be accepted to this argument as
arbitrary code can be run via pickle or the parsing of
the string statement.
Returns:
list:
A list of bounding boxes for each Annotation.
Example:
>>> from tiatoolbox.annotation.storage import SQLiteStore
>>> from shapely.geometry import Polygon
>>> store = SQLiteStore()
>>> store.append(
... Annotation(
... geometry=Polygon.from_bounds(0, 0, 1, 1),
... properties={"class": 42},
... ),
... key="foo",
... )
>>> store.bquery(where="props['class'] == 42")
{'foo': (0.0, 0.0, 1.0, 1.0)}
"""
cur = self._query(
columns="[key], min_x, min_y, max_x, max_y",
geometry=geometry,
geometry_predicate="bbox_intersects",
where=where,
callable_columns="[key], properties, min_x, min_y, max_x, max_y",
min_area=min_area,
)
if isinstance(where, Callable):
return {
key: bounds
for key, properties, *bounds in cur.fetchall()
if where(json.loads(properties))
}
return {key: bounds for key, *bounds in cur.fetchall()}
@staticmethod
def _handle_pickle_callable_pquery(
select: CallableSelect,
where: Optional[CallablePredicate],
cur: sqlite3.Cursor,
unique: bool,
) -> Union[Dict[str, Set[Properties]], Dict[str, Properties]]:
"""Package the results of a pquery into the right output format.
This variant is used when select and where are callable or
pickle objects.
Args:
select (Union[str, bytes, Callable]):
A callable to select the properties to return.
where (CallablePredicate):
A callable predicate to filter the rows with. Maybe
None for no-op (no filtering).
cur (sqlite3.Cursor):
The cursor for the query.
unique (bool):
Whether to return only unique results.
Returns:
dict:
If unique, a dictionary of sets is returned. Otherwise,
a dictionary mapping annotation keys to JSON-like
property dictionaries is returned.
"""
def add_props_to_result(
result: DefaultDict[str, set], properties: Dict[str, Any]
) -> None:
"""Add the properties to the appropriate set in result.
Args:
result (DefaultDict[str, set]):
The result dictionary to add the properties to.
properties (Dict[str, Any]):
The properties to add to the result.
"""
# Get the selected values
selection = select(properties)
# Wrap scalar values into a tuple
if not isinstance(selection, tuple):
selection = (selection,)
# Add the properties to the appropriate set
for i, value in enumerate(selection):
result[i].add(value)
# Load a pickled select function
if isinstance(select, bytes):
select = pickle.loads(select) # skipcq: BAN-B301
if unique:
# Create a dictionary of sets to store the unique properties
# for each property key / name.
result = defaultdict(set)
for (properties_string,) in cur.fetchall():
properties = json.loads(properties_string)
# Apply where filter and skip if False
if where and not where(properties):
continue
add_props_to_result(result, properties)
return list(result.values())
if not where:
return {
key: select(json.loads(properties))
for key, properties in cur.fetchall()
}
return {
key: select(json.loads(properties))
for key, properties in cur.fetchall()
if where(json.loads(properties))
}
@staticmethod
def _handle_str_pquery(
cur: sqlite3.Cursor,
unique: bool,
star_query: bool,
) -> Union[Dict[str, Set[Properties]], Dict[str, Properties]]:
"""Package the results of a pquery into the right output format.
This variant is used when select and where are DSL strings.
Args:
cur (sqlite3.Cursor):
The cursor for the query.
unique (bool):
Whether to return only unique results.
star_query (bool):
True if the query is a star query, i.e. select == "*".
Returns:
dict:
If unique, a dictionary of sets is returned. Otherwise,
a dictionary mapping annotation keys to JSON-like
property dictionaries is returned.
"""
if unique:
result = defaultdict(set)
for values in cur.fetchall():
for i, value in enumerate(values):
result[i].add(value)
return list(result.values())
return {key: json.loads(x) if star_query else x for key, x in cur.fetchall()}
@staticmethod
def _kind_of_pquery(
select: Union[str, bytes, Callable],
where: Union[str, bytes, Callable],
) -> Tuple[bool, bool, bool]:
"""Determine boolean flags for the kind of pquery this is.
If either one of `select` or `where` is a str, bytes, or
callable, then is_callable_query, is_pickle_query, and
is_str_query respectively will be set to True.
Returns:
tuple:
A tuple of bools:
- True if select or where are callable (functions).
- True if select or where are bytes (pickle expressions).
- True if select or where are str (SQL expressions).
"""
is_callable_query = any(isinstance(x, Callable) for x in (select, where) if x)
is_pickle_query = any(isinstance(x, bytes) for x in (select, where) if x)
is_str_query = any(isinstance(x, str) for x in (select, where) if x)
return is_callable_query, is_pickle_query, is_str_query
@staticmethod
def _validate_select_where_type(
select: Union[str, bytes, Callable],
where: Union[str, bytes, Callable],
) -> None:
"""Validate that select and where are valid types.
1. Check that select and where are the same type if where is given.
2. Check that select is in (str, bytes, callable).
Raises:
TypeError:
If select and where are not the same type or not in
(str, bytes, callable).
"""
if where is not None and type(select) is not type(where):
raise TypeError("select and where must be of the same type")
if not isinstance(select, (str, bytes)) and not callable(select):
raise TypeError(
f"select must be str, bytes, or callable, not {type(select)}"
)
def pquery(
self,
select: Select,
geometry: Optional[QueryGeometry] = None,
where: Optional[Predicate] = None,
geometry_predicate: str = "intersects",
unique: bool = True,
squeeze: bool = True,
) -> Union[Dict[str, Any], Set[Any]]:
"""Query the store for annotation properties.
Acts similarly to `AnnotationStore.query` but returns only the
value defined by `select`.
Args:
select (str or bytes or Callable):
A statement defining the value to look up from the
annotation properties. If `select = "*"`, all properties
are returned for each annotation (`unique` must be
False).
geometry (Geometry or Iterable):
Geometry to use when querying. This can be a bounds
(iterable of length 4) or a Shapely geometry (e.g.
Polygon). If a geometry is provided, the bounds of the
geometry will be used for the query. Full geometry
intersection is not used for the query method.
where (str or bytes or Callable):
A statement which should evaluate to a boolean value.
Only annotations for which this predicate is true will
be returned. Defaults to None (assume always true). This
may be a string, callable, or pickled function as bytes.
Callables are called to filter each result returned the
from annotation store backend in python before being
returned to the user. A pickle object is, where
possible, hooked into the backend as a user defined
function to filter results during the backend query.
Strings are expected to be in a domain specific language
and are converted to SQL on a best-effort basis. For
supported operators of the DSL see
:mod:`tiatoolbox.annotation.dsl`. E.g. a simple python
expression `props["class"] == 42` will be converted to a
valid SQLite predicate when using `SQLiteStore` and
inserted into the SQL query. This should be faster than
filtering in python after or during the query. It is
important to note that untrusted user input should never
be accepted to this argument as arbitrary code can be
run via pickle or the parsing of the string statement.
geometry_predicate (str):
A string defining which binary geometry predicate to
use when comparing the query geometry and a geometry in
the store. Only annotations for which this binary
predicate is true will be returned. Defaults to
"intersects". For more information see the `shapely
documentation on binary predicates <https://shapely.
readthedocs.io/en/stable/manual.html#binary-predicates>`_.
unique (bool):
If True, only unique values for each selected property
will be returned as a list of sets. If False, all values
will be returned as a dictionary mapping keys values.
Defaults to True.
squeeze (bool):
If True, when querying for a single value with
`unique=True`, the result will be a single set instead
of a list of sets.
Examples:
>>> from tiatoolbox.annotation.storage import SQLiteStore
>>> from shapely.geometry import Point
>>> store = SQLiteStore()
>>> annotation = Annotation(
... geometry=Point(0, 0),
... properties={"class": 42},
... )
>>> store.append(annotation, "foo")
>>> store.pquery("*", unique=False)
... {'foo': {'class': 42}}
>>> from tiatoolbox.annotation.storage import SQLiteStore
>>> from shapely.geometry import Point
>>> store = SQLiteStore()
>>> annotation = Annotation(
... geometry=Point(0, 0),
... properties={"class": 42},
... )
>>> store.append(annotation, "foo")
>>> store.pquery("props['class']")
... {42}
>>> annotation = Annotation(Point(1, 1), {"class": 123})
>>> store.append(annotation, "foo")
>>> store.pquery("props['class']")
... {42, 123}
""" # noqa
self._validate_select_where_type(select, where)
is_callable_query, is_pickle_query, is_str_query = self._kind_of_pquery(
select, where
)
is_star_query = select == "*" # Get all properties, special case
query_geometry = geometry # Rename arg
return_columns = [] # Initialise return rows list of column names
if is_star_query and unique:
raise ValueError("unique=True cannot be used with select='*'")
if not unique:
return_columns.append("[key]")
if is_str_query and not is_star_query:
select_names = eval(select, SQL_GLOBALS, {}) # skipcq: PYL-W0123
return_columns += [str(select_names)]
if is_callable_query or is_star_query or is_pickle_query:
return_columns.append("properties")
columns = ", ".join(return_columns)
cur = self._query(
columns=columns,
geometry=query_geometry,
geometry_predicate=geometry_predicate,
where=where,
unique=unique,
no_constraints_ok=True,
index_warning=True,
)
if is_pickle_query or is_callable_query:
# Where to apply after database query
# only done for callable where.
post_where = where if is_callable_query else None
result = self._handle_pickle_callable_pquery(
select, post_where, cur, unique
)
else:
result = self._handle_str_pquery(cur, unique, is_star_query)
if unique and squeeze and len(result) == 1:
return result[0]
return result
def __len__(self) -> int:
cur = self.con.cursor()
cur.execute("SELECT COUNT(*) FROM annotations")
(count,) = cur.fetchone()
return count
def __contains__(self, key: str) -> bool:
cur = self.con.cursor()
cur.execute("SELECT EXISTS(SELECT 1 FROM annotations WHERE [key] = ?)", (key,))
return cur.fetchone()[0] == 1
def __getitem__(self, key: str) -> Annotation:
cur = self.con.cursor()
cur.execute(
"""
SELECT geometry, properties, cx, cy
FROM annotations
WHERE [key] = :key
""",
{"key": key},
)
row = cur.fetchone()
if row is None:
raise KeyError(key)
serialised_geometry, serialised_properties, cx, cy = row
properties = json.loads(serialised_properties or "{}")
geometry = self._unpack_geometry(
serialised_geometry,
cx,
cy,
)
return Annotation(geometry, properties)
def keys(self) -> Iterable[int]:
"""Return an iterable (usually generator) of all keys in the store.
Returns:
Iterable[str]:
An iterable of keys.
"""
yield from self
def __iter__(self) -> Iterable[int]:
cur = self.con.cursor()
cur.execute(
"""
SELECT [key]
FROM annotations
"""
)
while True:
row = cur.fetchone()
if row is None:
break
yield row[0] # The key
def values(self) -> Iterable[Tuple[int, Annotation]]:
"""Return an iterable of all annotation in the store.
Returns:
Iterable[Annotation]:
An iterable of annotations.
"""
for _, value in self.items():
yield value
def items(self) -> Iterable[Tuple[int, Annotation]]:
"""Returns iterable (generator) over key and annotations."""
cur = self.con.cursor()
cur.execute(
"""
SELECT [key], cx, cy, geometry, properties
FROM annotations
"""
)
while True:
row = cur.fetchone()
if row is None:
break
key, cx, cy, serialised_geometry, serialised_properties = row
if serialised_geometry is not None:
geometry = self._unpack_geometry(serialised_geometry, cx, cy)
else:
geometry = Point(cx, cy)
properties = json.loads(serialised_properties)
yield key, Annotation(geometry, properties)
def patch_many(
self,
keys: Iterable[int],
geometries: Optional[Iterable[Geometry]] = None,
properties_iter: Optional[Iterable[Properties]] = None,
) -> None:
"""Bulk patch of annotations.
This may be more efficient than calling `patch` repeatedly
in a loop.
Args:
geometries (iter(Geometry)):
An iterable of geometries to update.
properties_iter (iter(dict)):
An iterable of properties to update.
keys (iter(str)):
An iterable of keys for each annotation to be updated.
"""
# Validate inputs
if not any([geometries, properties_iter]):
raise ValueError(
"At least one of geometries or properties_iter must be given"
)
keys = list(keys)
geometries = list(geometries) if geometries else None
properties_iter = list(properties_iter) if properties_iter else None
self._validate_equal_lengths(keys, geometries, properties_iter)
properties_iter = properties_iter or ({} for _ in keys) # pragma: no branch
geometries = geometries or (None for _ in keys) # pragma: no branch
# Update the database
cur = self.con.cursor()
# Begin a transaction
if self.auto_commit:
cur.execute("BEGIN")
for key, geometry, properties in zip(keys, geometries, properties_iter):
# Annotation is not in DB:
if key not in self:
self._append(key, Annotation(geometry, properties), cur)
continue
# Annotation is in DB:
if geometry:
self._patch_geometry(key, geometry, cur)
if properties:
cur.execute(
"""
UPDATE annotations
SET properties = json_patch(properties, :properties)
WHERE [key] = :key
""",
{
"key": key,
"properties": json.dumps(properties, separators=(",", ":")),
},
)
if self.auto_commit:
self.con.commit()
def _patch_geometry(
self, key: str, geometry: Geometry, cur: sqlite3.Cursor
) -> None:
"""Patch a geometry in the database.
Update the geometry of the annotation with the given key but
leave the properties untouched.
Args:
key: The key of the annotation to patch.
geometry: The new geometry.
cur: The cursor to use.
"""
bounds = dict(zip(("min_x", "min_y", "max_x", "max_y"), geometry.bounds))
xy = dict(zip("xy", np.array(geometry.centroid.coords[0])))
query_parameters = dict(
**bounds,
**xy,
key=key,
geometry=self.serialise_geometry(geometry),
)
cur.execute(
"""
UPDATE rtree
SET min_x = :min_x, min_y = :min_y,
max_x = :max_x, max_y = :max_y
WHERE EXISTS
(SELECT 1
FROM annotations
WHERE rtree.id = annotations.id
AND annotations.key == :key);
""",
query_parameters,
)
cur.execute(
"""
UPDATE annotations
SET cx = :x, cy = :y, geometry = :geometry
WHERE [key] = :key
""",
query_parameters,
)
def remove_many(self, keys: Iterable[str]) -> None:
"""Bulk removal of annotations by keys.
Args:
keys (iter(str)):
An iterable of keys for the annotation to be removed.
"""
cur = self.con.cursor()
if self.auto_commit:
cur.execute("BEGIN")
for key in keys:
cur.execute(
"""
DELETE
FROM rtree
WHERE EXISTS
(SELECT 1
FROM annotations
WHERE rtree.id = annotations.id
AND annotations.key == ?);
""",
(key,),
)
cur.execute(
"DELETE FROM annotations WHERE [key] = ?",
(key,),
)
if self.auto_commit:
self.con.commit()
def __setitem__(self, key: str, annotation: Annotation) -> None:
if key in self:
self.patch(key, annotation.geometry, annotation.properties)
return
self.append(annotation, key)
def _get_table_columns(self):
"""Get a list of columns in the annotations table."""
cur = self.con.execute("PRAGMA table_info(annotations)")
return [row[1] for row in cur.fetchall()]
def add_area_column(self, mk_index=True):
"""Add a column to store the area of the geometry."""
cur = self.con.cursor()
cur.execute(
"""
ALTER TABLE annotations
ADD COLUMN area INTEGER NOT NULL DEFAULT 0
"""
)
cur.execute(
"""
UPDATE annotations
SET area = get_area(geometry, cx, cy)
"""
)
if mk_index:
self.create_index("area", '"area"')
self.con.commit()
self.table_columns.append("area")
def remove_area_column(self):
"""Remove the area column from the store."""
if "area" in self.indexes():
self.drop_index("area")
cur = self.con.cursor()
cur.execute(
"""
ALTER TABLE annotations
DROP COLUMN area
"""
)
self.con.commit()
self.table_columns.remove("area")
def to_dataframe(self) -> pd.DataFrame:
"""Converts AnnotationStore to :class:`pandas.DataFrame`."""
df = pd.DataFrame()
df_rows = (
{
"key": key,
"geometry": annotation.geometry,
"properties": annotation.properties,
}
for key, annotation in self.items()
)
df = pd.concat([df, pd.json_normalize(df_rows)])
return df.set_index("key")
def features(self) -> Generator[Dict[str, Any], None, None]:
"""Return annotations as a list of geoJSON features.
Returns:
list:
List of features as dictionaries.
"""
return (
{
"type": "Feature",
"geometry": geometry2feature(annotation.geometry),
"properties": annotation.properties,
}
for annotation in self.values()
)
def commit(self) -> None:
"""Commit any in-memory changes to disk."""
self.con.commit()
def dump(self, fp: Union[Path, str, IO]) -> None:
"""Serialise a copy of the whole store to a file-like object.
Args:
fp(Path or str or IO):
A file path or file handle object for output to disk.
"""
if hasattr(fp, "write"):
fp = fp.name
target = sqlite3.connect(fp)
self.con.backup(target)
def dumps(self) -> str:
"""Serialise and return a copy of store as a string or bytes.
Returns:
str or bytes:
The serialised store.
"""
return "\n".join(self.con.iterdump())
def clear(self) -> None:
"""Remove all annotations from the store."""
cur = self.con.cursor()
cur.execute("DELETE FROM rtree")
cur.execute("DELETE FROM annotations")
if self.auto_commit:
self.con.commit()
def create_index(
self, name: str, where: Union[str, bytes], analyze: bool = True
) -> None:
"""Create an SQLite expression index based on the provided predicate.
Note that an expression index will only be used if the query expression
(in the WHERE clause) exactly matches the expression used when creating
the index (excluding minor inconsequential changes such as
whitespace).
An SQLite expression indexes require SQLite version 3.9.0 or higher.
Args:
name (str):
Name of the index to create.
where:
The predicate used to create the index.
analyze (bool):
Whether to run the "ANALYZE" command after creating the
index.
"""
_, minor, _ = sqlite3.sqlite_version_info
if minor < 9:
raise EnvironmentError("Requires sqlite version 3.9.0 or higher.")
cur = self.con.cursor()
if not isinstance(where, str):
raise TypeError(f"Invalid type for `where` ({type(where)}).")
sql_predicate = eval(where, SQL_GLOBALS) # skipcq: PYL-W0123
cur.execute(f"CREATE INDEX {name} ON annotations({sql_predicate})")
if analyze:
cur.execute(f"ANALYZE {name}")
def indexes(self) -> List[str]:
"""Returns a list of the names of all indexes in the store.
Returns:
List[str]:
The list of index names.
"""
cur = self.con.cursor()
cur.execute("SELECT name FROM sqlite_master WHERE TYPE = 'index'")
return [row[0] for row in cur.fetchall()]
def drop_index(self, name: str) -> None:
"""Drop an index from the store.
Args:
name (str):
The name of the index to drop.
"""
cur = self.con.cursor()
cur.execute(f"DROP INDEX {name}")
def optimize(self, vacuum: bool = True, limit: int = 1000) -> None:
"""Optimize the database with VACUUM and ANALYZE.
Args:
vacuum (bool):
Whether to run VACUUM.
limit (int):
The approximate maximum number of rows to examine when
running ANALYZE. If zero or negative, not limit will be
used. For more information see
https://www.sqlite.org/pragma.html#pragma_analysis_limit.
"""
if vacuum:
self.con.execute("VACUUM")
# Cannot use parameterized statements with PRAGMA!
self.con.execute(f"PRAGMA analysis_limit = {int(limit)}")
self.con.execute("PRAGMA optimize")
class DictionaryStore(AnnotationStore):
"""Pure python dictionary backed annotation store."""
def __init__(self, connection: Union[Path, str, IO] = ":memory:") -> None:
super().__init__()
self._rows = {}
self.connection = connection
self.path = self._connection_to_path(connection)
if self.connection not in [None, ":memory:"] and self.path.exists():
for line in self._load_cases(
fp=self.connection,
string_fn=lambda fp: fp.splitlines(),
file_fn=lambda fp: fp.readlines(),
):
dictionary = json.loads(line)
key = dictionary.get("key", uuid.uuid4().hex)
geometry = feature2geometry(dictionary["geometry"])
properties = dictionary["properties"]
self.append(Annotation(geometry, properties), key=key)
def append(
self,
annotation: Annotation,
key: Optional[str] = None,
) -> str:
"""Insert a new annotation, returning the key.
Args:
annotation (Annotation):
The shapely annotation to insert.
key (str):
Optional. The unique key used to identify the annotation in the
store. If not given a new UUID4 will be generated and returned
instead.
Returns:
str:
The unique key of the newly inserted annotation.
"""
if not isinstance(annotation.geometry, (Polygon, Point, LineString)):
raise TypeError("Invalid geometry type.")
key = key or str(uuid.uuid4())
self._rows[key] = {"annotation": annotation}
return key
def patch(
self,
key: str,
geometry: Optional[Geometry] = None,
properties: Optional[Dict[str, Any]] = None,
) -> None:
"""Patch an annotation at given key.
Partial update of an annotation. Providing only a geometry will update
the geometry and leave properties unchanged. Providing a properties
dictionary applies a patch operation to the properties. Only updating
the properties which are given and leaving the rest unchanged. To
completely replace an annotation use `__setitem__`.
Args:
key(str):
The key of the annotation to update.
geometry(Geometry):
The new geometry. If None, the geometry is not updated.
properties(dict):
A dictionary of properties to patch and their new values.
If None, the existing properties are not altered.
"""
if key not in self:
self.append(Annotation(geometry, properties), key)
return
existing = self[key]
geometry = geometry or existing.geometry
properties = properties or {}
new_properties = copy.deepcopy(existing.properties)
new_properties.update(properties)
self[key] = Annotation(geometry, new_properties)
def remove(self, key: str) -> None:
"""Remove annotation from the store with its unique key.
Args:
key (str):
The key of the annotation to be removed.
"""
del self._rows[key]
def __getitem__(self, key: str) -> Annotation:
return self._rows[key]["annotation"]
def __setitem__(self, key: str, annotation: Annotation) -> None:
if key in self._rows:
self._rows[key]["annotation"] = annotation
self._rows[key] = {"annotation": annotation}
def __contains__(self, key: str) -> bool:
return key in self._rows
def items(self) -> Generator[Tuple[str, Annotation], None, None]:
"""Returns iterable (generator) over key and annotations."""
for key, row in self._rows.items():
yield key, row["annotation"]
def __len__(self) -> int:
return len(self._rows)
@classmethod # noqa: A003
def open(cls, fp: Union[Path, str, IO]) -> "DictionaryStore": # noqa: A003
"""Opens :class:`DictionaryStore` from file pointer or path."""
return cls.from_ndjson(fp)
def commit(self) -> None:
"""Commit any in-memory changes to disk."""
if str(self.connection) == ":memory:":
logger.warning("In-memory store. Nothing to commit.", stacklevel=2)
return
if not self.path.exists():
self.path.touch()
self.dump(self.connection)
def dump(self, fp: Union[Path, str, IO]) -> None:
"""Serialise a copy of the whole store to a file-like object.
Args:
fp(Path or str or IO):
A file path or file handle object for output to disk.
"""
return self.to_ndjson(fp)
def dumps(self) -> str:
"""Serialise and return a copy of store as a string or bytes.
Returns:
str or bytes:
The serialised store.
"""
return self.to_ndjson()
def close(self) -> None:
"""Closes :class:`DictionaryStore` from file pointer or path."""
duplicate_filter = DuplicateFilter()
logger.addFilter(duplicate_filter)
# Try to commit any changes if the file is still open.
with contextlib.suppress(ValueError):
self.commit()
logger.removeFilter(duplicate_filter)
| 124,137 | 36.289877 | 88 | py |
tiatoolbox | tiatoolbox-master/tiatoolbox/annotation/__init__.py | """Module initialisation."""
from tiatoolbox.annotation import dsl, storage
| 76 | 24.666667 | 46 | py |
tiatoolbox | tiatoolbox-master/tiatoolbox/annotation/dsl.py | """Domain specific langauge (DSL) for use in AnnotationStore queries and indexes.
This module facilitates conversion from a restricted subset of python
to another domain specific language, for example SQL. This is done using
`eval` and a set of provided globals and locals. Mainly used for
construction of predicate statements for AnnotationStore queries but
also used in statements for the creation of indexes to accelerate
queries.
This conversion should be assumed to be on a best-effort basis. Not
every expression valid in python can be evaluated to form a valid
matching SQL expression. However, for many common cases this will be
possible. For example, the simple python expression `props["class"] ==
42` can be converted to a valid SQL (SQLite flavour) predicate which
will access the properties JSON column and check that the value under
the key of "class" equals 42.
This predicate statement can be used as part of an SQL query and should
be faster than post-query filtering in python or filtering during the
query via a registered custom function callback.
An additional benefit is that the same input string can be used across
different backends. For example, the previous simple example predicate
string can be evaluated as both a valid python expression and can be
converted to an equivalent valid SQL expression simply by running `eval`
with a different set of globals from this module.
It is important to note that untrusted user input should not be
accepted, as arbitrary code can be run during the parsing of an input
string.
Supported operators and functions:
- Property access: `props["key"]`
- Math operations (`+`, `-`, `*`, `/`, `//`, `**`, `%`):
`props["key"] + 1`
- Boolean operations (`and`, `or`, `not`): `props["key"] and
props["key"] == 1`
- Key checking: `"key" in props`
- List indexing: `props["key"][0]`
- List sum: `sum(props["key"])`
- List contains: `"value" in props["key"]`
- None check (with a provided function): `is_none(props["key"])`
`is_not_none(props["key"])`
- Regex (with a provided function): `regexp(pattern, props["key"])`
Unsupported operations:
- The `is` operator: `props["key"] is None`
- Imports: `import re`
- List length: `len(props["key"])` (support planned)
Compile options:
Some mathematical functions will not function if the compile option
`ENABLE_MATH_FUNCTIONS` is not set. These are:
- `//` (floor division)
"""
import json
import operator
import re
from dataclasses import dataclass
from numbers import Number
from typing import Any, Callable, Optional, Union
@dataclass
class SQLNone:
"""Sentinel object for SQL NULL within expressions."""
def __str__(self) -> str:
return "NULL"
def __repr__(self) -> str:
return str(self) # pragma: no cover
class SQLExpression:
"""SQL expression base class."""
__hash__ = None
def __repr__(self):
return str(self) # pragma: no cover
def __add__(self, other):
return SQLTriplet(self, operator.add, other)
def __radd__(self, other):
return SQLTriplet(other, operator.add, self)
def __mul__(self, other):
return SQLTriplet(self, operator.mul, other)
def __rmul__(self, other):
return SQLTriplet(other, operator.mul, self)
def __sub__(self, other):
return SQLTriplet(other, operator.sub, self)
def __rsub__(self, other):
return SQLTriplet(self, operator.sub, other)
def __truediv__(self, other):
return SQLTriplet(self, operator.truediv, other)
def __rtruediv__(self, other):
return SQLTriplet(other, operator.truediv, self)
def __floordiv__(self, other):
return SQLTriplet(self, operator.floordiv, other)
def __rfloordiv__(self, other):
return SQLTriplet(other, operator.floordiv, self)
def __mod__(self, other):
return SQLTriplet(self, operator.mod, other)
def __rmod__(self, other):
return SQLTriplet(other, operator.mod, self)
def __gt__(self, other):
return SQLTriplet(self, operator.gt, other)
def __ge__(self, other):
return SQLTriplet(self, operator.ge, other)
def __lt__(self, other):
return SQLTriplet(self, operator.lt, other)
def __le__(self, other):
return SQLTriplet(self, operator.le, other)
def __abs__(self):
return SQLTriplet(self, operator.abs)
def __eq__(self, other):
return SQLTriplet(self, operator.eq, other)
def __ne__(self, other: object):
return SQLTriplet(self, operator.ne, other)
def __neg__(self):
return SQLTriplet(self, operator.neg)
def __contains__(self, other):
return SQLTriplet(self, "contains", other)
def __pow__(self, x):
return SQLTriplet(self, operator.pow, x)
def __rpow__(self, x):
return SQLTriplet(x, operator.pow, self)
def __and__(self, other):
return SQLTriplet(self, operator.and_, other)
def __rand__(self, other):
return SQLTriplet(other, operator.and_, self)
def __or__(self, other):
return SQLTriplet(self, operator.or_, other)
def __ror__(self, other):
return SQLTriplet(other, operator.or_, self)
class SQLTriplet(SQLExpression):
"""Representation of an SQL triplet expression (LHS, operator, RHS).
Attributes:
lhs (SQLExpression): Left hand side of expression.
op (str): Operator string.
rhs (SQLExpression): Right hand side of expression.
"""
def __init__(
self,
lhs: Union["SQLTriplet", str],
op: Union[Callable, str] = None,
rhs: Union["SQLTriplet", str] = None,
):
self.lhs = lhs
self.op = op
self.rhs = rhs
self.formatters = {
operator.mul: lambda a, b: f"({a} * {b})",
operator.gt: lambda a, b: f"({a} > {b})",
operator.ge: lambda a, b: f"({a} >= {b})",
operator.lt: lambda a, b: f"({a} < {b})",
operator.le: lambda a, b: f"({a} <= {b})",
operator.add: lambda a, b: f"({a} + {b})",
operator.sub: lambda a, b: f"({a} - {b})",
operator.neg: lambda a, _: f"(-{a})",
operator.truediv: lambda a, b: f"({a} / {b})",
operator.floordiv: lambda a, b: f"FLOOR({a} / {b})",
operator.and_: lambda a, b: f"({a} AND {b})",
operator.or_: lambda a, b: f"({a} OR {b})",
operator.abs: lambda a, _: f"ABS({a})",
operator.not_: lambda a, _: f"NOT({a})",
operator.eq: lambda a, b: f"({a} == {b})",
operator.ne: lambda a, b: f"({a} != {b})",
operator.pow: lambda a, p: f"POWER({a}, {p})",
operator.mod: lambda a, b: f"({a} % {b})",
"is_none": lambda a, _: f"({a} IS NULL)",
"is_not_none": lambda a, _: f"({a} IS NOT NULL)",
"list_sum": lambda a, _: f"LISTSUM({a})",
"if_null": lambda x, d: f"IFNULL({x}, {d})",
"contains": lambda j, o: f"CONTAINS({j}, {o})",
"bool": lambda x, _: f"({x} != 0)",
}
def __str__(self) -> str:
lhs = self.lhs
rhs = self.rhs
if isinstance(rhs, str):
# is this ok? fixes categorical where predicate
rhs = f'"{rhs}"' # noqa: B028
if lhs and self.op:
return self.formatters[self.op](lhs, rhs)
raise ValueError("Invalid SQLTriplet.")
class SQLJSONDictionary(SQLExpression):
"""Representation of an SQL expression to access JSON properties."""
def __init__(self, acc: str = None) -> None:
self.acc = acc or ""
def __str__(self) -> str:
return f"json_extract(properties, {json.dumps(f'$.{self.acc}')})"
def __getitem__(self, key: str) -> "SQLJSONDictionary":
if isinstance(key, (int,)):
key_str = f"[{key}]"
else:
key_str = str(key)
joiner = "." if self.acc and not isinstance(key, int) else ""
return SQLJSONDictionary(acc=self.acc + joiner + f"{key_str}")
def get(self, key, default=None):
"""Returns SQLTriplet specified by key."""
return SQLTriplet(self[key], "if_null", default or SQLNone())
class SQLRegex(SQLExpression):
"""Representation of an SQL expression to match a string against a regex."""
def __init__(self, pattern: str, string: str, flags: int = 0) -> None:
self.pattern = pattern
self.string = string
self.flags = flags
def __str__(self) -> str:
string = self.string
pattern = self.pattern
flags = self.flags
if isinstance(string, (str, Number)):
string = json.dumps(string)
if isinstance(pattern, (str, Number)):
pattern = json.dumps(pattern)
if flags != 0:
return f"REGEXP({pattern}, {string}, {flags})"
return f"({string} REGEXP {pattern})"
@classmethod
def search(cls, pattern: str, string: str, flags: int = 0) -> "SQLRegex":
"""Returns an SQL expression to match a string against a pattern."""
return SQLRegex(pattern, string, int(flags))
def py_is_none(x: Any) -> bool:
"""Check if x is None."""
return x is None
def py_is_not_none(x: Any) -> bool:
"""Check if x is not None."""
return x is not None
def py_regexp(pattern: str, string: str, flags: int = 0) -> Optional[str]:
"""Check if string matches pattern."""
reg = re.compile(pattern, flags=flags)
match = reg.search(string)
if match:
return match[0]
return None
def json_list_sum(json_list: str) -> Number:
"""Return the sum of a list of numbers in a JSON string.
Args:
json_list: JSON string containing a list of numbers.
Returns:
Number:
The sum of the numbers in the list.
"""
return sum(json.loads(json_list))
def json_contains(json_str: str, x: object) -> bool:
"""Return True if a JSON string contains x.
Args:
json_str: JSON string.
x: Value to search for.
Returns:
bool:
True if x is in json_str.
"""
return x in json.loads(json_str)
def sql_is_none(x: Union[SQLExpression, Number, str, bool]) -> SQLTriplet:
"""Check if x is None.
Returns:
SQLTriplet:
SQLTriplet representing None check.
"""
return SQLTriplet(x, "is_none")
def sql_is_not_none(x: Union[SQLExpression, Number, str, bool]) -> SQLTriplet:
"""Check if x is not None.
Returns:
SQLTriplet:
SQLTriplet representing not None check.
"""
return SQLTriplet(x, "is_not_none")
def sql_list_sum(x: SQLJSONDictionary) -> SQLTriplet:
"""Return a representation of the sum of a list.
Args:
x (SQLJSONDictionary):
The list to sum.
Returns:
SQLTriplet:
SQLTriplet for a function call to sum the list.
"""
return SQLTriplet(x, "list_sum")
def sql_has_key(dictionary: SQLJSONDictionary, key: Union[str, int]) -> SQLTriplet:
"""Check if a dictionary has a key.
Args:
dictionary (SQLProperties):
SQLProperties object representing a JSON dict.
key(str or int):
Key to check for.
Returns:
SQLTriplet:
SQLTriplet representing key check.
"""
if not isinstance(dictionary, (SQLJSONDictionary,)):
raise TypeError("Unsupported type for has_key.")
return SQLTriplet(dictionary[key], "is_not_none")
# Constants defining the global variables for use in eval() when
# evaluating expressions.
_COMMON_GLOBALS = {
"__builtins__": {"abs": abs},
"re": re.RegexFlag,
}
SQL_GLOBALS = {
"__builtins__": {**_COMMON_GLOBALS["__builtins__"], "sum": sql_list_sum},
"props": SQLJSONDictionary(),
"is_none": sql_is_none,
"is_not_none": sql_is_not_none,
"regexp": SQLRegex.search,
"has_key": sql_has_key,
"re": _COMMON_GLOBALS["re"],
}
PY_GLOBALS = {
"__builtins__": {**_COMMON_GLOBALS["__builtins__"], "sum": sum},
"is_none": py_is_none,
"is_not_none": py_is_not_none,
"regexp": py_regexp,
"has_key": lambda a, b: b in a,
"re": _COMMON_GLOBALS["re"],
}
| 12,255 | 29.64 | 83 | py |
spinOS | spinOS-master/spinOS.py | """
Copyright 2020, 2021, 2022 Matthias Fabry
This file is part of spinOS.
spinOS is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
spinOS is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with spinOS. If not, see <https://www.gnu.org/licenses/>.
Main script for launching spinOS
"""
import sys
try:
wd = sys.argv[1]
except IndexError:
wd = None
import modules.gui as gui
gui.run(wd)
| 820 | 26.366667 | 68 | py |
spinOS | spinOS-master/modules/constants.py | """
Copyright 2020, 2021, 2022 Matthias Fabry
This file is part of spinOS.
spinOS is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
spinOS is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with spinOS. If not, see <https://www.gnu.org/licenses/>.
"""
import numpy as np
AU2KM = 1.495978707e8 # (km)
PC2KM = 3.085677581e13 # (km)
MSUN = 1.9885e30 # (kg)
RSUN = 6.957e5 # (km)
G = 6.67430e-20 # (km3 kg-1 s-2)
DEG2RAD = np.pi / 180
RAD2DEG = 180 / np.pi
MAS2RAD = 1e-3 / 3600 * DEG2RAD
DAY2SEC = 86400
RAD2MAS = RAD2DEG * 3600 * 1e3
TITLESIZE = 20
HCOLOR = '#3399ff'
BGCOLOR = '#d9d9d9'
FONTCOLOR = "#000000"
TIME_STR = r'time [day]'
PHASE_STR = r'orbital phase'
PARAM_LIST = ['p', 'e', 'i', 'omega', 'Omega', 't0', 'd', 'k1', 'k2', 'gamma1', 'gamma2', 'mt']
START_LIMS = [-50, 50, -0.15, 1.15, -10, 10, -10, 10]
LIM_STRINGS = ['RV y lower limit',
'RV y upper limit',
'RV x lower limit',
'RV x upper limit',
'Orbit y lower limit',
'Orbit y upper limit',
'Orbit x lower limit',
'Orbit x upper limit']
RV1COLORS = ['mediumblue', 'indigo', 'fuchsia', 'midnightblue']
RV2COLORS = ['indianred', 'maroon', 'orangered', 'goldenrod']
ASCOLORS = ['darkred', 'green', 'navy', 'mediumvioletred']
ASDISTCOLORS = ['lightcoral', 'limegreen', 'slateblue', 'orchid']
VERSION = "2.7.3"
| 1,802 | 34.352941 | 95 | py |
spinOS | spinOS-master/modules/plotting.py | """
Copyright 2020, 2021, 2022 Matthias Fabry
This file is part of spinOS.
spinOS is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
spinOS is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with spinOS. If not, see <https://www.gnu.org/licenses/>.
"""
import tkinter as tk
import matplotlib as mpl
import numpy as np
from matplotlib import pyplot as plt
from matplotlib.collections import EllipseCollection
import modules.constants as cst
mpl.use("TkAgg") # set the backend
plt.style.use('rsc/spinOS.mplstyle') # load the style sheet
def move_figure(f, x, y):
"""
moves window f by x, y pixels
:param f: window
:param x: x offset
:param y: y offset
"""
f.canvas.manager.window.wm_geometry("+{}+{}".format(x, y))
class Plotting:
def __init__(self, gui):
self.gui = gui
# figure and line objects
self.rv_fig = None
self.as_fig = None
self.rv_ax = None
self.as_ax = None
self.rv1_dot = None
self.rv2_dot = None
self.as_dot = None
self.rv1_line = None
self.gamma1_line = None
self.rv2_line = None
self.gamma2_line = None
self.as_line = None
self.as_dist_lines = list()
self.rv1data_lines = list()
self.rv2data_lines = list()
self.asdata_lines = list()
self.peri_dot = None
self.node_line = None
self.semi_major = None
self.as_ellipses = list()
self.as_legend = None
self.rv_legend = None
# vars
self.do_phasedot = tk.BooleanVar()
self.do_datarv1 = tk.BooleanVar()
self.do_datarv2 = tk.BooleanVar()
self.do_dataas = tk.BooleanVar()
self.do_modelrv1 = tk.BooleanVar()
self.do_modelgamma1 = tk.BooleanVar()
self.do_modelrv2 = tk.BooleanVar()
self.do_modelgamma2 = tk.BooleanVar()
self.do_modelas = tk.BooleanVar()
self.do_nodeline = tk.BooleanVar()
self.do_semimajor = tk.BooleanVar()
self.do_peri = tk.BooleanVar()
self.do_as_dist = tk.BooleanVar()
self.do_grids = tk.BooleanVar(value=True)
self.rv_plot_boolvars = [self.do_datarv1, self.do_datarv2,
self.do_modelrv1,
self.do_modelrv2]
self.as_plot_boolvars = [self.do_dataas, self.do_modelas,
self.do_nodeline,
self.do_semimajor, self.do_peri]
self.phase = tk.DoubleVar()
self.do_legend = tk.BooleanVar()
self.axeslabelsize = tk.DoubleVar(value=20)
self.ticklabelsize = tk.DoubleVar(value=20)
self.limcontrol = tk.BooleanVar(value=True)
self.limits = []
for i in range(8):
self.limits.append(tk.DoubleVar(value=cst.START_LIMS[i]))
self.plot_vs_phase = tk.BooleanVar(value=True)
def matchLimits(self):
self.limits[0].set(np.round(self.rv_ax.get_ylim()[0], 1))
self.limits[1].set(np.round(self.rv_ax.get_ylim()[1], 1))
self.limits[2].set(np.round(self.rv_ax.get_xlim()[0], 1))
self.limits[3].set(np.round(self.rv_ax.get_xlim()[1], 1))
self.limits[4].set(np.round(self.as_ax.get_ylim()[0], 1))
self.limits[5].set(np.round(self.as_ax.get_ylim()[1], 1))
self.limits[6].set(
np.round(self.as_ax.get_xlim()[1], 1)) # east is to the left
self.limits[7].set(np.round(self.as_ax.get_xlim()[0], 1))
def update_plots(self):
# cannot find a way to condense this without messing up references
# to line objects
if self.do_dataas.get():
self.plot_as_data()
else:
for i in range(len(self.asdata_lines)):
if self.asdata_lines[i]:
self.asdata_lines[i].remove()
self.asdata_lines[i] = None
for i in range(len(self.as_ellipses)):
if self.as_ellipses[i]:
self.as_ellipses[i].remove()
self.as_ellipses[i] = None
if self.do_phasedot.get() and self.plot_vs_phase.get():
self.plot_dots()
else:
if self.as_dot:
self.as_dot.remove()
self.as_dot = None
if self.rv1_dot:
self.rv1_dot.remove()
self.rv1_dot = None
if self.rv2_dot:
self.rv2_dot.remove()
self.rv2_dot = None
if self.do_peri.get():
self.plot_periastron()
else:
if self.peri_dot:
self.peri_dot.remove()
self.peri_dot = None
if self.do_semimajor.get():
self.plot_semimajor_axis()
else:
if self.semi_major:
self.semi_major.remove()
self.semi_major = None
if self.do_nodeline.get():
self.plot_node_line()
else:
if self.node_line:
self.node_line.remove()
self.node_line = None
if self.do_modelas.get():
self.plot_relative_orbit()
else:
if self.as_line:
self.as_line.remove()
self.as_line = None
if self.do_modelrv2.get():
self.plot_rv2_curve()
else:
if self.rv2_line:
self.rv2_line.remove()
self.rv2_line = None
if self.do_modelgamma1.get():
self.plot_gamma1()
else:
if self.gamma1_line:
self.gamma1_line.remove()
self.gamma1_line = None
if self.do_modelgamma2.get():
self.plot_gamma2()
else:
if self.gamma2_line:
self.gamma2_line.remove()
self.gamma2_line = None
if self.do_modelrv1.get():
self.plot_rv1_curve()
else:
if self.rv1_line:
self.rv1_line.remove()
self.rv1_line = None
if self.do_datarv2.get():
self.plot_rv2_data()
else:
for i in range(len(self.rv2data_lines)):
if self.rv2data_lines[i]:
self.rv2data_lines[i].remove()
self.rv2data_lines[i] = None
if self.do_datarv1.get():
self.plot_rv1_data()
else:
for i in range(len(self.rv1data_lines)):
if self.rv1data_lines[i]:
self.rv1data_lines[i].remove()
self.rv1data_lines[i] = None
if self.do_as_dist.get():
self.plot_as_dist()
else:
for i in range(len(self.as_dist_lines)):
if self.as_dist_lines[i]:
for line in self.as_dist_lines[i]:
line.remove()
self.as_dist_lines[i] = None
self.plot_legends()
self.setup_rv_ax()
self.setup_as_ax()
if self.limcontrol.get():
self.relim_plots()
else:
self.rv_lims()
self.as_lims()
self.rv_fig.canvas.draw()
self.as_fig.canvas.draw()
def init_plots(self):
"""
sets up the plot windows
"""
if self.rv_fig is not None:
plt.close(self.rv_fig)
if self.as_fig is not None:
plt.close(self.as_fig)
self.rv_fig = plt.figure(num='RV curve')
move_figure(self.rv_fig, int(0.37 * self.gui.w) + 10, 0)
self.as_fig = plt.figure(num='Apparent orbit')
move_figure(self.as_fig, int(0.37 * self.gui.w) + 10,
int(0.5 * self.gui.h + 10))
self.rv_ax = self.rv_fig.add_subplot(111)
self.as_ax = self.as_fig.add_subplot(111, aspect=1)
self.rv_ax.grid(self.do_grids.get())
self.setup_rv_ax()
self.as_ax.axhline(linestyle=':', color='black')
self.as_ax.axvline(linestyle=':', color='black')
self.as_ax.grid(self.do_grids.get())
self.setup_as_ax()
self.rv_lims()
self.as_lims()
self.rv_fig.tight_layout()
self.as_fig.tight_layout()
plt.ion() # important: this lets mpl release the event loop to tk,
# ie plt.show() doesn't block app
plt.show()
def setup_rv_ax(self):
self.rv_ax.tick_params(axis='both', labelsize=self.ticklabelsize.get())
if self.plot_vs_phase.get():
self.rv_ax.set_xlabel(cst.PHASE_STR,
fontdict={'size': self.axeslabelsize.get()})
else:
self.rv_ax.set_xlabel(cst.TIME_STR,
fontdict={'size': self.axeslabelsize.get()})
self.rv_ax.set_ylabel(r'$RV$ (km s$^{-1}$)',
fontdict={'size': self.axeslabelsize.get()})
self.rv_ax.grid(self.do_grids.get())
def rv_lims(self):
self.rv_ax.set_xlim((self.limits[2].get(), self.limits[3].get()))
self.rv_ax.set_ylim((self.limits[0].get(), self.limits[1].get()))
def setup_as_ax(self):
self.as_ax.tick_params(axis='both', labelsize=self.ticklabelsize.get())
self.as_ax.set_xlabel(r'East (mas)',
fontdict={'size': self.axeslabelsize.get()})
self.as_ax.set_ylabel(r'North (mas)',
fontdict={'size': self.axeslabelsize.get()})
self.as_ax.grid(self.do_grids.get())
def as_lims(self):
self.as_ax.set_xlim((self.limits[7].get(), self.limits[6].get()))
self.as_ax.set_ylim((self.limits[4].get(), self.limits[5].get()))
def relim_plots(self):
"""
resizes the plots according to the data limits
"""
for plot_bool in self.rv_plot_boolvars:
if plot_bool.get():
self.rv_ax.relim()
self.rv_ax.axis('auto')
for plot_bool in self.as_plot_boolvars:
if plot_bool.get():
self.as_ax.relim()
self.as_ax.axis('image')
def plot_rv1_data(self):
"""
plot the rv1 data
"""
if not self.gui.datamanager.hasRV1():
return
for i in range(len(self.gui.datamanager.datasets['RV1'])):
data = self.gui.datamanager.datasets['RV1'][i].getData()
if data is not None:
if self.rv1data_lines[i] is not None:
self.rv1data_lines[i].remove()
self.rv1data_lines[i] = None
if self.plot_vs_phase.get():
phases, rv, err = self.gui.system.create_phase_extended_RV(
data, 0.15)
else:
phases, rv, err = data[:, 0], data[:, 1], data[:, 2]
self.rv1data_lines[i] = \
self.rv_ax.errorbar(phases, rv, yerr=err, ls='',
capsize=0.1, marker='o', ms=5,
color=cst.RV1COLORS[
i % len(
cst.RV1COLORS)],
label='primary RV')
def plot_rv2_data(self):
"""
plot the rv2 data
"""
if not self.gui.datamanager.hasRV2():
return
for i in range(len(self.gui.datamanager.datasets['RV2'])):
data = self.gui.datamanager.datasets['RV2'][i].getData()
if data is not None:
if self.rv2data_lines[i] is not None:
self.rv2data_lines[i].remove()
self.rv2data_lines[i] = None
if self.plot_vs_phase.get():
phases, rv, err = self.gui.system.create_phase_extended_RV(
data, 0.15)
else:
phases, rv, err = data[:, 0], data[:, 1], data[:, 2]
self.rv2data_lines[i] = \
self.rv_ax.errorbar(phases, rv, yerr=err, ls='',
capsize=0.1, marker='o', ms=5,
color=cst.RV2COLORS[
i % len(cst.RV2COLORS)],
label='secondary RV')
def plot_as_data(self):
"""
plot the as data
"""
if not self.gui.datamanager.hasAS():
return
for i in range(len(self.gui.datamanager.datasets['AS'])):
dtst = self.gui.datamanager.datasets['AS'][i]
data = dtst.getData()
if data is not None:
if self.asdata_lines[i] is None:
self.asdata_lines[i], = \
self.as_ax.plot(data[:, 1], data[:, 2], '.',
c=cst.ASCOLORS[i % len(
cst.ASCOLORS)],
ls='', label=dtst.name_var.get())
else:
self.asdata_lines[i].set_xdata(data[:, 1])
self.asdata_lines[i].set_ydata(data[:, 2])
if self.as_ellipses[i] is not None:
self.as_ellipses[i].remove()
self.as_ellipses[i] = \
EllipseCollection(2 * data[:, 5], 2 * data[:, 6],
data[:, 7] - 90,
offsets=np.column_stack((data[:, 1],
data[:, 2])),
transOffset=self.as_ax.transData,
units='x',
edgecolors=cst.ASCOLORS[i % len(
cst.ASCOLORS)],
facecolors=(
0, 0, 0, 0))
self.as_ax.add_collection(self.as_ellipses[i])
def plot_as_dist(self):
"""
plot the astrometric distances of each as point
"""
if not self.gui.datamanager.hasAS():
return
for i in range(len(self.gui.datamanager.datasets['AS'])):
data = self.gui.datamanager.datasets['AS'][i].getData()
if self.as_dist_lines[i] is not None:
for line in self.as_dist_lines[i]:
line.remove()
self.as_dist_lines[i] = None
self.as_dist_lines[i] = list()
for j in range(len(data[:, 0])):
self.as_dist_lines[i].append(self.as_ax.plot(
(data[j, 1],
self.gui.system.relative.east_of_hjd(data[j, 0])),
(data[j, 2],
self.gui.system.relative.north_of_hjd(data[j, 0])),
c=cst.ASDISTCOLORS[i % len(cst.ASDISTCOLORS)])[0])
def plot_rv1_curve(self):
"""
plot the rv1 model curve
"""
if self.plot_vs_phase.get():
phases = np.linspace(-0.15, 1.15, num=150)
vrads1 = self.gui.system.primary.radial_velocity_of_phases(phases)
if self.rv1_line is None:
self.rv1_line, = self.rv_ax.plot(phases, vrads1,
label=r'primary', color='b',
ls='--')
else:
self.rv1_line.set_xdata(phases)
self.rv1_line.set_ydata(vrads1)
else:
m, mm = self._determine_time_bounds()
times = np.linspace(m, m + self.gui.system.p, num=100)
rvs = self.gui.system.primary.radial_velocity_of_phases(
self.gui.system.phase_of_hjds(times))
times, rvs = self.gui.system.extend_rvs_until_time(times, rvs, mm)
if self.rv1_line is None:
self.rv1_line, = self.rv_ax.plot(times, rvs, label=r'primary',
color='b', ls='--')
else:
self.rv1_line.set_xdata(times)
self.rv1_line.set_ydata(rvs)
def _determine_time_bounds(self):
m = np.infty
mm = -np.infty
if self.gui.datamanager.hasRV1():
data = self.gui.datamanager.getBuiltRV1s()
m = min(m, min(data[:, 0]))
mm = max(mm, max(data[:, 0]))
if self.gui.datamanager.hasRV2():
data = self.gui.datamanager.getBuiltRV2s()
m = min(m, min(data[:, 0]))
mm = max(mm, max(data[:, 0]))
print('time bounds', m, mm)
return m, mm
def plot_gamma1(self):
"""
plot the rv1 gamma line
"""
c = 'k'
if self.do_modelgamma2.get() or self.do_modelrv2.get() or \
self.do_datarv2.get():
c = 'b'
if self.gamma1_line is None:
self.gamma1_line = self.rv_ax.axhline(
self.gui.system.primary.gamma, color=c, ls=':')
else:
self.gamma1_line.set_ydata(self.gui.system.primary.gamma)
self.gamma1_line.set(color=c)
def plot_rv2_curve(self):
"""
plot the rv2 model curve
"""
if self.plot_vs_phase.get():
phases = np.linspace(-0.15, 1.15, num=150)
vrads1 = self.gui.system.secondary.radial_velocity_of_phases(
phases)
if self.rv2_line is None:
self.rv2_line, = self.rv_ax.plot(phases, vrads1,
label=r'secondary', color='r',
ls='--')
else:
self.rv2_line.set_xdata(phases)
self.rv2_line.set_ydata(vrads1)
else:
m, mm = self._determine_time_bounds()
times = np.linspace(m, m + self.gui.system.p, num=100)
rvs = self.gui.system.secondary.radial_velocity_of_phases(
self.gui.system.phase_of_hjds(times))
times, rvs = self.gui.system.extend_rvs_until_time(times, rvs, mm)
if self.rv2_line is None:
self.rv2_line, = self.rv_ax.plot(times, rvs,
label=r'secondary', color='r',
ls='--')
else:
self.rv2_line.set_xdata(times)
self.rv2_line.set_ydata(rvs)
def plot_gamma2(self):
"""
plot the rv2 gamma line
"""
c = 'k'
if self.do_modelgamma1.get() or self.do_modelrv1.get() or \
self.do_datarv1.get():
c = 'r'
if self.gamma2_line is None:
self.gamma2_line = self.rv_ax.axhline(
self.gui.system.secondary.gamma, color=c, ls=':')
else:
self.gamma2_line.set_ydata(self.gui.system.secondary.gamma)
self.gamma2_line.set(color=c)
def plot_relative_orbit(self):
"""
(re)plot the relative astrometric orbit
"""
ecc_anoms = np.linspace(0, 2 * np.pi, 200)
norths = self.gui.system.relative.north_of_ecc(ecc_anoms)
easts = self.gui.system.relative.east_of_ecc(ecc_anoms)
if self.as_line is None:
self.as_line, = self.as_ax.plot(easts, norths, color='k')
else:
self.as_line.set_xdata(easts)
self.as_line.set_ydata(norths)
def plot_node_line(self):
"""
(re)plot the astrometric node line
"""
system = self.gui.system.relative
if self.node_line is None:
self.node_line, = self.as_ax.plot(
[system.east_of_true(-system.omega),
system.east_of_true(-system.omega + np.pi)],
[system.north_of_true(-system.omega),
system.north_of_true(-system.omega + np.pi)],
color='0.5', ls='--', label='Line of nodes')
else:
self.node_line.set_xdata([system.east_of_true(-system.omega),
system.east_of_true(
-system.omega + np.pi)])
self.node_line.set_ydata([system.north_of_true(-system.omega),
system.north_of_true(
-system.omega + np.pi)])
def plot_periastron(self):
"""
(re)plot the astrometric periastron point
"""
system = self.gui.system.relative
if self.peri_dot is None:
self.peri_dot, = self.as_ax.plot([system.east_of_ecc(0)],
[system.north_of_ecc(0)],
color='b', marker='s',
ls='', fillstyle='full',
label='Periastron',
markersize=8)
else:
self.peri_dot.set_xdata(system.east_of_ecc(0))
self.peri_dot.set_ydata(system.north_of_ecc(0))
def plot_semimajor_axis(self):
"""
(re)plot the astrometric semimajor axis
"""
system = self.gui.system.relative
if self.semi_major is None:
self.semi_major, = self.as_ax.plot(
[system.east_of_true(0), system.east_of_true(np.pi)],
[system.north_of_true(0),
system.north_of_true(np.pi)],
color='0.3', ls='dashdot', label='Semi-major axis')
else:
self.semi_major.set_xdata(
[system.east_of_true(0), system.east_of_true(np.pi)])
self.semi_major.set_ydata(
[system.north_of_true(0), system.north_of_true(np.pi)])
def plot_dots(self):
"""
(re)plot diamond shapes at the specified phase
"""
if self.rv1_dot is not None:
self.rv1_dot.remove()
self.rv1_dot = None
if self.do_modelrv1.get() or self.do_datarv1.get():
rv1 = self.gui.system.primary.radial_velocity_of_phase(
self.phase.get())
self.rv1_dot = self.rv_ax.scatter(self.phase.get(), rv1, s=100,
color='b', marker='D',
label=np.round(rv1, 2))
if self.rv2_dot is not None:
self.rv2_dot.remove()
self.rv2_dot = None
if self.do_modelrv2.get() or self.do_datarv2.get():
rv2 = self.gui.system.secondary.radial_velocity_of_phase(
self.phase.get())
self.rv2_dot = self.rv_ax.scatter(self.phase.get(), rv2, s=100,
color='r', marker='D',
label=np.round(rv2, 2))
if self.as_dot is not None:
self.as_dot.remove()
self.as_dot = None
if self.do_modelas.get() or self.do_dataas.get():
N = self.gui.system.relative.north_of_ph(self.phase.get())
E = self.gui.system.relative.east_of_ph(self.phase.get())
self.as_dot = self.as_ax.scatter(E, N, s=100, color='r',
marker='x',
label='{}E/{}N'.format(
np.round(E, 2),
np.round(N, 2)))
def plot_legends(self):
"""
(re)plot the legends
"""
try:
self.rv_ax.get_legend().remove()
except AttributeError:
pass
try:
self.as_ax.get_legend().remove()
except AttributeError:
pass
if self.do_legend.get():
if len(self.rv_ax.get_lines()) > 1:
self.rv_ax.legend(prop={'size': self.axeslabelsize.get()})
if len(self.as_ax.get_lines()) > 1:
self.as_ax.legend(prop={'size': self.axeslabelsize.get()})
def make_corner_diagram(self):
"""
plot a corner diagram of an MCMC run
"""
if self.gui.didmcmc:
import corner
labels = []
thruths = []
for key in self.gui.minresult.var_names:
if key == 'e':
labels.append(r'$e$')
elif key == 'i':
labels.append(r'$i$ (deg)')
elif key == 'omega':
labels.append(r'$\omega$ (deg)')
elif key == 'Omega':
labels.append(r'$\Omega$ (deg)')
elif key == 't0':
labels.append(r'$T_0$ (MJD)')
elif key == 'k1':
labels.append(r'$K_1$ (km/s)')
elif key == 'k2':
labels.append(r'$K_2$ (km/s)')
elif key == 'p':
labels.append(r'$P$ (day)')
elif key == 'gamma1':
labels.append(r'$\gamma_1$ (km/s)')
elif key == 'gamma2':
labels.append(r'$\gamma_2$ (km/s)')
elif key == 'd':
labels.append(r'$d$ (pc)')
elif key == 'mt':
labels.append(r'$M_{\textrm{total}}$ (M$\odot$)')
if self.gui.minresult.params[key].vary:
thruths.append(self.gui.minresult.params.valuesdict()[key])
corner.corner(self.gui.minresult.flatchain, labels=labels,
truths=thruths)
else:
print('do an mcmc minimization first!')
| 26,311 | 38.927162 | 79 | py |
spinOS | spinOS-master/modules/binary_system.py | """
Copyright 2020, 2021, 2022 Matthias Fabry
This file is part of spinOS.
spinOS is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
spinOS is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with spinOS. If not, see <https://www.gnu.org/licenses/>.
Module that defines the System class, the Orbit class and its subclasses.
"""
import numpy as np
import scipy.optimize as spopt
import modules.constants as const
class BinarySystem:
"""
The class that represents a binary system with its respective
components. It is assumed
that this binary has a
distance to the observer that is way larger than the orbital separation.
"""
def __init__(self, parameters: dict):
"""
Creates a BinarySystem object, defining a binary system with the 11
parameters supplied that
fully determine the
orbits:
- e the eccentricity
- i the inclination (deg)
- omega the longitude of the periastron with respect to
the ascending node (deg)
- Omega the longitude of the ascending node of the
seconday measured east of
north (deg)
- t0 the time of periastron passage (hjd)
- p the period of the binary (days)
- d the distance to the system(pc)
- k1 the semiamplitude of the radial velocity curve of
the primary (km/s)
- k2 the semiamplitude of the radial velocity curve of
the secondary (km/s)
- gamma1 the (apparent) systemic velocity of the primary (
km/s)
- gamma2 the (apparent) systemic velocity of the secondary
(km/s)
- mt the total dynamical mass of the system (Msun)
:param parameters: dictionary containing the aforementioned parameters
"""
if parameters['p'] == 0.0:
raise ValueError(
'a binary system cannot have a period of zero days')
self.e = parameters['e']
self.i = parameters['i'] * const.DEG2RAD
self.sini = np.sin(self.i)
self.cosi = np.cos(self.i)
self.Omega = parameters['Omega'] * const.DEG2RAD
self.sinO = np.sin(self.Omega)
self.cosO = np.cos(self.Omega)
self.t0 = parameters['t0']
self.p = parameters['p'] # day
self.d = parameters['d'] * const.PC2KM # km
self.primary = AbsoluteOrbit(self, abs(parameters['k1']),
parameters['omega'] - 180,
parameters['gamma1'])
self.secondary = AbsoluteOrbit(self, abs(parameters['k2']),
parameters['omega'],
parameters['gamma2'])
self.ap_kk = (self.p * const.DAY2SEC) * (
abs(parameters['k1']) + abs(parameters['k2'])) * np.sqrt(
1 - self.e ** 2) / (2 * np.pi * abs(self.sini))
self.mt = parameters['mt']
self.ap_mt = np.cbrt(
parameters['mt'] * const.MSUN * const.G * (
self.p * const.DAY2SEC) ** 2 / (
4 * np.pi ** 2))
self.relative = RelativeOrbit(self,
self.ap_mt / self.d * const.RAD2MAS,
parameters['omega'])
def extend_rvs_until_time(self, times, rvs, maxtime):
out = np.copy(times)
n = int((maxtime - times[0]) // self.p)
for i in range(n):
times += self.p
out = np.concatenate((out, times))
rvs = np.tile(rvs, n + 1)
return out, rvs
def semimajor_axis_from_RV(self):
"""
Calculates the physical semi-major axis of the relative orbit.
:return: semi-major axis (AU)
"""
return self.ap_kk / const.AU2KM
def semimajor_axis_from_distance(self):
"""
Calculates the physical semi-major axis of the relative orbit from
the distance and
apparent size.
:return: semi-major axis (AU)
"""
return self.ap_mt / const.AU2KM
def primary_mass(self):
"""
Calculates the mass of the primary body of the system
:return: mass of the primary (Solar Mass)
"""
return np.power(1 - self.e ** 2, 1.5) * (
self.primary.k + self.secondary.k) ** 2 * self.secondary.k * (
self.p * const.DAY2SEC) / (
2 * np.pi * const.G * abs(self.sini) ** 3) / const.MSUN
def secondary_mass(self):
"""
Calculates the mass of the secondary body of the system
:return: mass of the secondary (in Solar Mass)
"""
return np.power(1 - self.e ** 2, 1.5) * (
self.primary.k + self.secondary.k) ** 2 * self.primary.k * (
self.p * const.DAY2SEC) / (
2 * np.pi * const.G * abs(self.sini) ** 3) / const.MSUN
def total_mass(self):
"""
Calculates the total mass of the system
:return: mass of the system (in Solar Mass)
"""
return np.power(1 - self.e ** 2, 1.5) * (
self.primary.k + self.secondary.k) ** 3 * (
self.p * const.DAY2SEC) / (
2 * np.pi * const.G * abs(self.sini) ** 3) / const.MSUN
def total_mass_from_distance(self):
"""
Calculates the total dynamical mass of the system using the size of
the apparent orbit.
:return: m_total (Msun)
"""
return 4 * np.pi ** 2 * np.power(
self.d * self.relative.a * const.MAS2RAD, 3) / (
const.G * (self.p * const.DAY2SEC) ** 2) / const.MSUN
def phase_of_hjds(self, hjds):
"""
Calculates the phase in the orbit given a Julian Date.
:param hjds: Julian Date (day)
:return: phase (rad)
"""
return np.remainder((hjds - self.t0) / self.p, 1)
def phase_of_ecc_anom(self, ecc_anoms):
"""
Calculates the phase in the orbit given an eccentric anomaly.
:param ecc_anoms: eccentric anomaly (rad)
:return: phase (rad)
"""
return (ecc_anoms - self.e * np.sin(ecc_anoms)) / (2 * np.pi)
def ecc_anom_of_true_anom(self, theta):
"""
Calculates the eccentric anomaly given a true anomaly
:param theta: true anomaly (rad)
:return: eccentric anomaly (rad)
"""
return 2 * np.arctan(
np.sqrt((1 - self.e) / (1 + self.e)) * np.tan(theta / 2))
def true_anomaly_of_ecc_anom(self, E):
"""
Calculates the true anomaly given an eccentric anomaly
:param E: eccentric anomaly (rad)
:return: true anomaly (rad)
"""
return 2 * np.arctan(
np.sqrt((1 + self.e) / (1 - self.e)) * np.tan(E / 2))
def ecc_anom_of_phase(self, phase):
"""
Calculates the eccentric anomaly given a phase. This function is the
hardest function to
resolve as it requires
solving a transcendental equation: Kepler's Equation.
:param phase: phase (rad)
:return: eccentric anomaly (rad)
"""
# define keplers equation as function of a phase
def keplers_eq(ph):
"""
wrapper that returns keplers equation of a phase ph as a
function object
:param ph: phase
:return: python function to find root of
"""
# build a function object that should be zero for a certain
# eccentric anomaly
def kepler(ecc_an):
"""
defines keplers equation in function of an eccentric anomaly
and phase, if zero,
the given eccentric
anomaly corresponds to the phase ph of this orbit
:param ecc_an: eccentric anomaly
:return: float
"""
return ecc_an - self.e * np.sin(ecc_an) - 2 * np.pi * ph
return kepler
# find the root of keplers_eq(phase), which by construction returns
# a function for which
# the eccentric anomaly
# is the independent variable.
# current root finding algorithm is toms748, as it has the best
# convergence (2.7 bits per
# function evaluation)
phase = np.remainder(phase, 1)
return spopt.root_scalar(keplers_eq(phase), method='toms748',
bracket=(0, 2 * np.pi)).root
def create_phase_extended_RV(self, rvdata, extension_range):
"""
Creates a new RV dataset, where the phase folding of the data is
extended outside of the
(0, 1) interval by a
given amount
:param rvdata: an ndarray containing the hjds, RV measurements and
errors
:param extension_range: the phase amount to extend the folding with.
:return: same dataset as supplied, only folded to phases (
-extension_range,
1+extension_range)
"""
phases = self.phase_of_hjds(rvdata[:, 0])
data = rvdata[:, 1]
errors = rvdata[:, 2]
left_extended_phases = phases[phases > (1 - extension_range)] - 1
right_extended_phases = phases[phases < extension_range] + 1
left_extended_data = data[phases > (1 - extension_range)]
right_extended_data = data[phases < extension_range]
left_extended_errors = errors[phases > (1 - extension_range)]
right_extended_errors = errors[phases < extension_range]
extended_phases = np.concatenate(
(left_extended_phases, phases, right_extended_phases))
extended_data = np.concatenate(
(left_extended_data, data, right_extended_data))
extended_errors = np.concatenate(
(left_extended_errors, errors, right_extended_errors))
return extended_phases, extended_data, extended_errors
class Orbit:
"""
Creates a general orbit object, storing all the orbital elemements as
well as its period and systemic velocity.
Within binary orbital solution finding however, use the subclasses
instead to differenciate the absolute and the relative orbits.
The motion of a star in a (Newtonian) binary is completely determined by
the quantities:
-) e, the eccentricity
-) i (rad), the inclination with respect to the plane of the sky
-) omega (rad), the argument of periastron
-) Omega (rad), the longitude of the ascending node
-) t0 (day), the epoch of periastron passage
-) k (km/s), the semiamplitude of the observed radial velocity curve
-) p (day), the period of the orbit
-) gamma (km/s), the systemic velocity of the binary pair
-) d (pc), the distance to the focus of the orbit (assumed way
larger than the dimensions
of the orbit)
An orbit inherits directly the quantities e, i, Omega, t0, p, d from the
system it resides in.
"""
def __init__(self, system, omega):
self.system: BinarySystem = system
self.omega = omega * const.DEG2RAD
self.sino = np.sin(self.omega)
self.coso = np.cos(self.omega)
class AbsoluteOrbit(Orbit):
"""
An absolute orbit represents an orbit of the either of the component
masses separately. It are these orbits that
determine the observed RV measurements
"""
def __init__(self, system, k, omega, gamma):
"""
An absolute orbit of a component body is an orbit with the systemic
parameters as well as k, omega, gamma.
:param system: system the component belongs to
:param k: semiamplitude of its RV curve
:param omega: its argument of periastron
:param gamma: its peculiar velocity
"""
self.k = k
self.gamma = gamma
super().__init__(system, omega)
def radial_velocity_of_phase(self, phase, getAngles: bool = False):
"""
Calculates the radial velocities of a component body given a list of
phases
:param phase: phase (rads) (cannot be an iterable)
:param getAngles: Boolean (default=False) indicating to additionally
return the
corresponding true and eccentric
anomalies
:return: list of radial velocities (km/s) [optionally: list of true
anomalies (rad) and
list of eccentric
anomalies (rad)]
"""
E = self.system.ecc_anom_of_phase(phase)
return self.radial_velocity_of_ecc_anom(E, getAngles)
def radial_velocity_of_phases(self, phases, getAngles: bool = False):
"""
Calculates the radial velocities of a component body given a list of
phases
:param phases: list of phases (rads) (must be an iterable)
:param getAngles: Boolean (default=False) indicating to additionally
return the
corresponding true and eccentric
anomalies
:return: list of radial velocities (km/s) [optionally: list of true
anomalies (rad) and
list of eccentric
anomalies (rad)]
"""
Es = np.zeros(phases.size)
for i in range(phases.size):
Es[i] = self.system.ecc_anom_of_phase(phases[i])
return self.radial_velocity_of_ecc_anom(Es, getAngles)
def radial_velocity_of_ecc_anom(self, ecc_anom, getAngles: bool = False):
"""
Calculates the radial velocity of a component body given an
eccentric anomaly
:param ecc_anom: eccentric anomaly (rad)
:param getAngles: Boolean (default=False) indicating to additionally
return the eccentric
anomaly
:return: radial velocity (km/s) [optionally: eccentric anomaly (rad)]
"""
if getAngles:
return self.radial_velocity_of_true_anom(
self.system.true_anomaly_of_ecc_anom(ecc_anom),
getAngles), ecc_anom
return self.radial_velocity_of_true_anom(
self.system.true_anomaly_of_ecc_anom(ecc_anom))
def radial_velocity_of_true_anom(self, theta, getAngles: bool = False):
"""
Calculates the radial velocity of a component body given a true anomaly
:param theta: true anomaly (rad)
:param getAngles: Boolean (default=False) indicating to additionally
return the true anomaly
:return: radial velocity (km/s) [optionally: true anomaly (rad)]
"""
if getAngles:
return self.k * (
np.cos(
theta + self.omega) + self.system.e * self.coso) + \
self.gamma, theta
return self.k * (np.cos(
theta + self.omega) + self.system.e * self.coso) + self.gamma
def radial_velocity_of_hjds(self, hjds, getAngles: bool = False):
"""
Calculates the radial velocity of a component body given Julian dates
:param hjds: julian dates
:param getAngles: Boolean (default=False) indicating to additionally
return the true anomaly
:return: radial velocity (km/s) [optionally: true anomaly (rad)]
"""
return self.radial_velocity_of_phases(self.system.phase_of_hjds(hjds),
getAngles=getAngles)
class RelativeOrbit(Orbit):
"""
A Relative orbit represents the relative orbit of the secondary with
respect to the primary. It is this orbit that
determine the observed AS measurements
"""
def __init__(self, system, a, omega):
super().__init__(system, omega)
self.a = a
self.thiele_A = self.a * (
system.cosO * self.coso - system.sinO * self.sino *
system.cosi)
self.thiele_B = self.a * (
system.sinO * self.coso + system.cosO * self.sino *
system.cosi)
self.thiele_F = self.a * (
-system.cosO * self.sino - system.sinO * self.coso *
system.cosi)
self.thiele_G = self.a * (
-system.sinO * self.sino + system.cosO * self.coso *
system.cosi)
def north_of_ph(self, ph):
"""
Calculates the northward separations given a phase
:param ph: phases
:return: northward separations
"""
return self.north_of_ecc(self.system.ecc_anom_of_phase(ph))
def east_of_ph(self, ph):
"""
Calculates the eastward separations given a phase
:param ph: phases
:return: eastward separations
"""
return self.east_of_ecc(self.system.ecc_anom_of_phase(ph))
def north_of_ecc(self, E):
"""
Calculates the northward separations given a eccentric anomaly
:param E: eccentric anomalies (rad)
:return: northward separations
"""
return self.thiele_A * self.X(E) + self.thiele_F * self.Y(E)
def east_of_ecc(self, E):
"""
Calculates the eastward separations given a eccentric anomaly
:param E: eccentric anomaly (rad)
:return: eastward separation
"""
return self.thiele_B * self.X(E) + self.thiele_G * self.Y(E)
def north_of_true(self, theta):
"""
Calculates the northward separation given a true anomaly
:param theta: true anomaly (rad)
:return: northward separation
"""
return self.north_of_ecc(self.system.ecc_anom_of_true_anom(theta))
def east_of_true(self, theta):
"""
Calculates the eastward separation given a true anomaly
:param theta: eccentric anomaly (rad)
:return: eastward separation
"""
return self.east_of_ecc(self.system.ecc_anom_of_true_anom(theta))
def north_of_hjd(self, hjd):
"""
Calculates the northward separation given a julian date
:param hjd: julian date (days)
:return: list of northward separations
"""
return self.north_of_ecc(
self.system.ecc_anom_of_phase(self.system.phase_of_hjds(hjd)))
def north_of_hjds(self, hjds):
"""
Calculates the northward separation given a julian date
:param hjds: julian date (days, must be iterable)
:return: list of northward separations
"""
Es = np.zeros(hjds.size)
for i in range(len(Es)):
Es[i] = self.system.ecc_anom_of_phase(
self.system.phase_of_hjds(hjds[i]))
return self.north_of_ecc(Es)
def east_of_hjd(self, hjd):
"""
Calculates the eastward separation given a julian date
:param hjd: julian date (days)
:return: list of eastward separations
"""
return self.east_of_ecc(
self.system.ecc_anom_of_phase(self.system.phase_of_hjds(hjd)))
def east_of_hjds(self, hjds):
"""
Calculates the eastward separation given julian dates
:param hjds: julian dates (days, must be iterable)
:return: list of eastward separations
"""
Es = np.zeros(hjds.size)
for i in range(len(Es)):
Es[i] = self.system.ecc_anom_of_phase(
self.system.phase_of_hjds(hjds[i]))
return self.east_of_ecc(Es)
def X(self, E):
"""
Calculates the elliptical rectangular coordinate X given an eccentric
anomaly
:param E: eccentric anomaly (rad)
:return: elliptical rectangular coordinate X
"""
return np.cos(E) - self.system.e
def Y(self, E):
"""
Calculates the elliptical rectangular coordinate Y given an eccentric
anomaly
:param E: eccentric anomaly (rad)
:return: elliptical rectangular coordinate Y
"""
return np.sqrt(1 - self.system.e ** 2) * np.sin(E)
| 20,662 | 37.986792 | 79 | py |
spinOS | spinOS-master/modules/gui.py | """
Copyright 2020, 2021, 2022 Matthias Fabry
This file is part of spinOS.
spinOS is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
spinOS is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with spinOS. If not, see <https://www.gnu.org/licenses/>.
"""
import pathlib
import tkinter as tk
from tkinter import ttk
import lmfit as lm
import numpy as np
import modules.binary_system as bsys
import modules.constants as cst
import modules.data_manager as dmgr
import modules.minimizer as spm
import modules.spinOS_io as spl
import modules.splash as splash
import modules.utils as util
from modules.plotting import Plotting
class SpinOSGUI:
"""
class specifying the main spinOS tk implementation
"""
# IMPROVEMENT: fix fonts to some standard one?
def __init__(self, master, wwd, width, height):
# set homogenous style
s = ttk.Style()
# s.theme_use('default')
s.map("TNotebook",
foreground=[('disabled', 'gray'), ('!disabled', 'black')])
s.map("TFrame",
foreground=[('disabled', 'gray'), ('!disabled', 'black')])
s.map("TEntry",
foreground=[('disabled', 'gray'), ('!disabled', 'black')])
s.map("TLabel",
foreground=[('disabled', 'gray'), ('!disabled', 'black')])
s.map("TButton",
foreground=[('disabled', 'gray'), ('!disabled', 'black')])
s.map("TRadiobutton",
foreground=[('disabled', 'gray'), ('!disabled', 'black')])
# FRAME STRUCTURE #
# set the root frame
tabs = ttk.Notebook(master)
# set the data frame
data_frame_tab = ttk.Frame(tabs)
data_frame = util.VerticalScrolledFrame(data_frame_tab)
data_frame.pack(expand=1, fill=tk.BOTH, anchor=tk.N)
# set the guess frame
guess_infer_tab = ttk.Frame(tabs)
guess_infer_top = util.VerticalScrolledFrame(guess_infer_tab)
infer_frame = ttk.Frame(guess_infer_top)
guess_frame = ttk.Frame(guess_infer_top)
guess_frame.pack()
refreshframe1 = ttk.Frame(guess_infer_top)
refreshframe1.pack(pady=10)
infer_frame.pack()
guess_infer_top.pack(expand=1, fill=tk.BOTH, anchor=tk.N)
# set the minimization frame
min_frame_tab = ttk.Frame(tabs)
min_frame = util.VerticalScrolledFrame(min_frame_tab)
min_frame.pack(expand=1, fill=tk.BOTH, anchor=tk.N)
# set the plot window controls frame
plt_frame_tab = ttk.Frame(tabs)
plt_frame_top = util.VerticalScrolledFrame(plt_frame_tab)
plt_frame = ttk.Frame(plt_frame_top)
plt_frame_top.pack(expand=1, fill=tk.BOTH, anchor=tk.N)
# GLOBAL GUI VARS #
self.w, self.h = width, height
self.wd = wwd
# the plotter instance
self.plotter = Plotting(self)
# DATA FRAME #
self.datamanager = dmgr.DataManager(self)
filesframe = ttk.Frame(data_frame)
firstlabel = ttk.Label(filesframe, text='DATA',
font=('', cst.TITLESIZE, 'underline'),
state=tk.ACTIVE)
firstlabel.grid(columnspan=5, sticky=tk.N)
# define inlcusion variables
self.load_rv1 = tk.BooleanVar()
self.load_rv2 = tk.BooleanVar()
self.load_as = tk.BooleanVar()
# assign to checkbuttons
rv1check = ttk.Checkbutton(filesframe, var=self.load_rv1,
command=self.toggle_rv1)
rv2check = ttk.Checkbutton(filesframe, var=self.load_rv2,
command=self.toggle_rv2)
ascheck = ttk.Checkbutton(filesframe, var=self.load_as,
command=self.toggle_as)
# put them in a nice grid
rv1check.grid(row=2, sticky=tk.E)
rv2check.grid(row=3, sticky=tk.E)
ascheck.grid(row=4, sticky=tk.E)
# define labels
ttk.Label(filesframe, text='load?').grid(row=1, column=0, sticky=tk.E)
ttk.Label(filesframe, text='Working directory').grid(row=1, column=1,
sticky=tk.E)
self.rv1_label = ttk.Label(filesframe, text='Primary RV file',
state=tk.DISABLED)
self.rv1_label.grid(row=2, column=1, sticky=tk.E)
self.rv2_label = ttk.Label(filesframe, text='Secondary RV file',
state=tk.DISABLED)
self.rv2_label.grid(row=3, column=1, sticky=tk.E)
self.as_label = ttk.Label(filesframe, text='Astrometric data file',
state=tk.DISABLED)
self.as_label.grid(row=4, column=1, sticky=tk.E)
# define entries
self.wd = ttk.Entry(filesframe)
self.rv1_file = ttk.Entry(filesframe)
self.rv2_file = ttk.Entry(filesframe)
self.as_file = ttk.Entry(filesframe)
# put some mock values
if wwd:
self.wd.insert(0, wwd)
self.rv1_file.insert(0, 'primary_vels.txt')
self.rv2_file.insert(0, 'secondary_vels.txt')
self.as_file.insert(0, 'relative_astrometry.txt')
# disable them after inserting stuff
self.rv1_file.config(state=tk.DISABLED)
self.rv2_file.config(state=tk.DISABLED)
self.as_file.config(state=tk.DISABLED)
# put in a nice grid
self.wd.grid(row=1, column=2)
self.rv1_file.grid(row=2, column=2)
self.rv2_file.grid(row=3, column=2)
self.as_file.grid(row=4, column=2)
self.seppa = tk.BooleanVar(value=True)
self.seppa_but = ttk.Radiobutton(filesframe, text='Sep/PA',
variable=self.seppa, value=True,
state=tk.DISABLED)
self.seppa_but.grid(row=4, column=3)
self.en_but = ttk.Radiobutton(filesframe, text='E/N',
variable=self.seppa, value=False,
state=tk.DISABLED)
self.en_but.grid(row=4, column=4, sticky=tk.W)
ttk.Button(filesframe, text='Load Data',
command=self.datamanager.loaddataintoSets,
).grid(row=5, columnspan=5, pady=10)
filesframe.grid_columnconfigure(0, weight=1)
filesframe.grid_columnconfigure(4, weight=1)
filesframe.pack(expand=1, fill=tk.BOTH, anchor=tk.N)
data_manager_frame = ttk.Frame(data_frame)
ttk.Label(data_manager_frame, text='Data Manager',
font=('', cst.TITLESIZE, 'underline')).pack()
managernotebook = ttk.Notebook(data_manager_frame)
self.rv1_tab = ttk.Frame(managernotebook)
self.rv1book = ttk.Notebook(self.rv1_tab)
self.rv1book.pack(expand=1, fill=tk.BOTH)
ttk.Button(self.rv1_tab, text='Add Dataset',
command=self.datamanager.emptyrv1DataSet).pack()
managernotebook.add(self.rv1_tab, text='Primary RVs')
self.rv2_tab = ttk.Frame(managernotebook)
self.rv2book = ttk.Notebook(self.rv2_tab)
self.rv2book.pack(expand=1, fill=tk.BOTH)
ttk.Button(self.rv2_tab, text='Add Dataset',
command=self.datamanager.emptyrv2DataSet).pack()
managernotebook.add(self.rv2_tab, text='Secondary RVs')
self.as_tab = ttk.Frame(managernotebook)
self.asbook = ttk.Notebook(self.as_tab)
self.asbook.pack(expand=1, fill=tk.BOTH)
self.seppa_dtmgr = tk.BooleanVar(value=True)
self.seppa_but_dtmgr = ttk.Radiobutton(self.as_tab, text='Sep/PA',
variable=self.seppa,
value=True)
self.seppa_but_dtmgr.pack(side=tk.LEFT, expand=1)
self.en_but_dtmgr = ttk.Radiobutton(self.as_tab, text='E/N',
variable=self.seppa,
value=False)
self.en_but_dtmgr.pack(side=tk.LEFT, expand=1)
ttk.Button(self.as_tab, text='Add Dataset',
command=lambda: self.datamanager.emptyasDataSet(
seppa=self.seppa_dtmgr.get())) \
.pack(side=tk.LEFT, expand=1)
managernotebook.add(self.as_tab, text='Astrometry')
managernotebook.pack(expand=1, fill=tk.BOTH, side=tk.TOP)
data_manager_frame.pack(expand=1, fill=tk.BOTH, side=tk.TOP)
# GUESS FRAME #
self.system = None
self.param_dict = None
self.guess_dict = None
columns = 7
labelcolumn = 1
entrycolumn = 2
varycheckcolumn = 3
transfercolumn = 4
minresultcolumn = 5
errorcolumn = 6
numofparams = 12
rparams = range(numofparams)
titlesrow = 2
paramgridrow = titlesrow + 1
buttonrow = paramgridrow + numofparams
# print the labels in the guess frame
ttk.Label(guess_frame, text='SYSTEM PARAMETERS',
font=('', cst.TITLESIZE, 'underline')).grid(
columnspan=columns)
ttk.Label(guess_frame, text='Guess file').grid(row=1,
column=labelcolumn,
sticky=tk.E)
self.guess_file = ttk.Entry(guess_frame, width=15)
self.guess_file.insert(0, 'guesses.txt')
self.guess_file.grid(row=1, column=entrycolumn, sticky=tk.S,
columnspan=2)
ttk.Label(guess_frame, text='Guesses') \
.grid(row=titlesrow, column=entrycolumn)
ttk.Label(guess_frame, text='Vary?') \
.grid(row=titlesrow, column=varycheckcolumn)
ttk.Label(guess_frame, text='Transfer') \
.grid(row=titlesrow, column=transfercolumn)
ttk.Label(guess_frame, text='Result') \
.grid(row=titlesrow, column=minresultcolumn)
ttk.Label(guess_frame, text='Error') \
.grid(row=titlesrow, column=errorcolumn)
self.lock_gs = tk.BooleanVar(False)
self.locked_image = tk.PhotoImage(
file=pathlib.Path(__file__).parent.parent.joinpath('rsc/lock.png'))
self.unlocked_image = tk.PhotoImage(
file=pathlib.Path(__file__).parent.parent.joinpath(
'rsc/unlock.png'))
self.lock_gs_button = ttk.Button(guess_frame, width=1,
image=self.locked_image,
command=self.toggle_lock)
self.lock_gs_button.grid(row=paramgridrow + 10)
self.q_mode = tk.BooleanVar(False)
self.lock_q_button = ttk.Button(guess_frame, width=1, text='q',
command=self.toggle_q)
self.lock_q_button.grid(row=paramgridrow + 8)
self.param_var_list = [tk.StringVar() for _ in rparams]
self.param_var_list[0].set('p (days) =')
self.param_var_list[1].set('e =')
self.param_var_list[2].set('i (deg) =')
self.param_var_list[3].set('omega (deg) =')
self.param_var_list[4].set('Omega (deg) =')
self.param_var_list[5].set('t0 (JD) =')
self.param_var_list[6].set('d (pc) =')
self.param_var_list[7].set('k1 (km/s) =')
self.param_var_list[8].set('k2 (km/s) =')
self.param_var_list[9].set('gamma1 (km/s) =')
self.param_var_list[10].set('gamma2 (km/s) =')
self.param_var_list[11].set('M_tot (Msun) =')
self.param_label_list = [
ttk.Label(guess_frame, textvariable=self.param_var_list[i]) for i
in
rparams]
for i in rparams:
self.param_label_list[i].grid(row=paramgridrow + i,
column=labelcolumn, sticky=tk.E)
# initialize the entry variables
self.guess_var_list = [tk.StringVar(value='0') for _ in rparams]
# define entry boxes
self.guess_entry_list = [
ttk.Entry(guess_frame, textvariable=self.guess_var_list[i],
width=10) for i in rparams]
# put in a nice grid
for i in rparams:
self.guess_entry_list[i].grid(row=paramgridrow + i,
column=entrycolumn)
# define the vary state variables
self.vary_var_list = [tk.BooleanVar() for _ in rparams]
# define checkbuttons for vary states
self.vary_button_list = [
ttk.Checkbutton(guess_frame, var=self.vary_var_list[i]) for i in
rparams]
# put the checkbuttons in a nice grid
for i in rparams:
self.vary_button_list[i].grid(row=paramgridrow + i,
column=varycheckcolumn)
# define the transfer buttons
# for this semantic to work, we need to wrap the lambda function
# into another one, so that each command
# references to its own number 'y', rather than the outer 'i' of the
# list comprehension
self.transfer_button_list = [ttk.Button(guess_frame, text='<-',
command=(lambda y: (
lambda: self.transfer(y)))(
i)).grid(
row=paramgridrow + i, column=transfercolumn) for i in rparams]
# define the minimized parameter variables
self.mininimzed_var_list = [tk.StringVar() for _ in rparams]
# define the labels the minimized parameters will go in
self.min_label_list = [
ttk.Label(guess_frame, textvariable=self.mininimzed_var_list[i],
width=8) for i in
rparams]
for i in rparams:
self.min_label_list[i].grid(row=paramgridrow + i,
column=minresultcolumn)
# define the error variables
self.error_var_list = [tk.StringVar() for _ in rparams]
# define the labels the errors will go in
self.error_label_list = [
ttk.Label(guess_frame, textvariable=self.error_var_list[i],
width=8)
for i in rparams]
for i in rparams:
self.error_label_list[i].grid(row=paramgridrow + i,
column=errorcolumn)
# define the buttons in this frame
ttk.Button(guess_frame, text='Load guesses',
command=self.load_guesses).grid(row=buttonrow,
column=labelcolumn)
ttk.Button(guess_frame, text='Save guesses', command=self.save_guesses,
).grid(row=buttonrow, column=entrycolumn)
ttk.Button(guess_frame, text='Save parameters',
command=self.save_params,
).grid(row=buttonrow, column=minresultcolumn,
columnspan=2)
ttk.Button(refreshframe1, text='Refresh Plots & Inferred Parameters',
command=self.update).pack()
# INFER FRAME #
self.mprimary = tk.StringVar()
self.msecondary = tk.StringVar()
self.semimajord = tk.StringVar()
self.semimajork1k2 = tk.StringVar()
self.totalmass = tk.StringVar()
# define labels
ttk.Label(infer_frame, text='INFERRED PARAMETERS (from guesses)',
font=('', cst.TITLESIZE, 'underline')).grid(columnspan=4,
sticky=tk.N)
ttk.Label(infer_frame, text='From k1/k2',
font=('', 13, 'underline')).grid(row=1,
columnspan=2)
ttk.Label(infer_frame, text='M1 (M_sun) =').grid(row=3, sticky=tk.E)
ttk.Label(infer_frame, text='M2 (M_sun) =').grid(row=4, sticky=tk.E)
ttk.Label(infer_frame, text='M (M_sun) =').grid(row=5, sticky=tk.E)
ttk.Label(infer_frame, text='a (AU) =').grid(row=2, sticky=tk.E)
ttk.Label(infer_frame, textvariable=self.mprimary).grid(row=3,
column=1)
ttk.Label(infer_frame, textvariable=self.msecondary).grid(row=4,
column=1)
ttk.Label(infer_frame, textvariable=self.semimajork1k2).grid(row=2,
column=1)
ttk.Label(infer_frame, textvariable=self.totalmass).grid(row=5,
column=1)
ttk.Separator(infer_frame).grid(column=2, row=2, rowspan=5,
sticky=tk.NS)
ttk.Label(infer_frame, text='From d/M_tot:',
font=('', 13, 'underline')).grid(row=1,
column=3,
columnspan=2)
ttk.Label(infer_frame, text='a (AU) =').grid(row=2, column=3,
sticky=tk.E)
ttk.Label(infer_frame, textvariable=self.semimajord).grid(row=2,
column=4)
# MINIMIZATION FRAME #
self.minimization_run_number = 0
self.minresult = None
self.didmcmc = False
self.method = tk.StringVar(value='leastsq')
self.redchisq = tk.DoubleVar()
self.dof = tk.IntVar()
self.steps = tk.IntVar(value=1000)
self.walkers = tk.IntVar(value=100)
self.burn = tk.IntVar(value=100)
self.thin = tk.IntVar(value=1)
self.rms_rv1 = tk.DoubleVar()
self.rms_rv2 = tk.DoubleVar()
self.rms_as = tk.DoubleVar()
self.do_custom_weight = tk.BooleanVar(value=False)
self.def_as_weight = tk.DoubleVar()
self.custom_as_weight = tk.DoubleVar()
self.hops = tk.IntVar()
# define labels and buttons in a grid
methodframe = ttk.Frame(min_frame)
ttk.Label(methodframe, text='MINIMIZATION',
font=('', cst.TITLESIZE, 'underline')).grid(
columnspan=4)
ttk.Label(methodframe, text='Method:').grid(sticky=tk.E)
ttk.Radiobutton(methodframe, text='Levenberg-Marquardt',
variable=self.method,
value='leastsq',
command=self.toggle_method). \
grid(row=1, column=1, sticky=tk.W)
ttk.Radiobutton(methodframe, text='Basinhopping', variable=self.method,
value='basinhopping',
command=self.toggle_method).grid(row=2, column=1,
sticky=tk.W)
ttk.Radiobutton(methodframe, text='LM+MCMC', variable=self.method,
value='emcee',
command=self.toggle_method).grid(row=3, column=1,
sticky=tk.W)
self.hops_label = ttk.Label(methodframe, text='# of hops:',
state=tk.DISABLED)
self.hops_label.grid(row=2, column=2)
self.hops_entry = ttk.Entry(methodframe, textvariable=self.hops,
width=5, state=tk.DISABLED)
self.hops_entry.grid(row=2, column=3)
methodframe.pack()
mcframe = ttk.Frame(min_frame)
ttk.Label(mcframe, text='MCMC params: ').grid(row=0, column=1)
self.steps_label = ttk.Label(mcframe, text='# of steps:',
state=tk.DISABLED)
self.steps_label.grid(row=0, column=2, sticky=tk.E)
self.steps_entry = ttk.Entry(mcframe, textvariable=self.steps, width=5,
state=tk.DISABLED)
self.steps_entry.grid(row=0, column=3)
self.walkers_label = ttk.Label(mcframe, text='# of walkers:',
state=tk.DISABLED)
self.walkers_label.grid(row=0, column=4, sticky=tk.E)
self.walkers_entry = ttk.Entry(mcframe, textvariable=self.walkers,
width=5,
state=tk.DISABLED)
self.walkers_entry.grid(row=0, column=5)
self.burn_label = ttk.Label(mcframe, text='Burn:', state=tk.DISABLED)
self.burn_label.grid(row=0, column=6, sticky=tk.E)
self.burn_entry = ttk.Entry(mcframe, textvariable=self.burn, width=5,
state=tk.DISABLED)
self.burn_entry.grid(row=0, column=7)
self.thin_label = ttk.Label(mcframe, text='Thin:', state=tk.DISABLED)
self.thin_label.grid(row=0, column=8, sticky=tk.E)
self.thin_entry = ttk.Entry(mcframe, textvariable=self.thin, width=5,
state=tk.DISABLED)
self.thin_entry.grid(row=0, column=9)
mcframe.pack()
self.mc_widg = {self.steps_label, self.steps_entry, self.walkers_entry,
self.walkers_label,
self.burn_entry, self.burn_label, self.thin_label,
self.thin_entry}
otherminframe = ttk.Frame(min_frame)
ttk.Label(otherminframe, text='astrometric weight from data = ').grid(
row=2, column=1,
sticky=tk.E)
self.def_weight_label = ttk.Label(otherminframe,
textvariable=self.def_as_weight)
self.def_weight_label.grid(row=2, column=2, sticky=tk.W)
self.as_weight_button = ttk.Checkbutton(otherminframe,
var=self.do_custom_weight,
command=self.toggle_weights)
self.as_weight_button.grid(row=3, sticky=tk.E)
self.weight_label = ttk.Label(otherminframe,
text='Custom astrometric weight =',
state=tk.DISABLED)
self.weight_label.grid(row=3, column=1, sticky=tk.E)
self.weight_slider = tk.Scale(otherminframe,
variable=self.custom_as_weight,
from_=0, to=1, resolution=0.01,
orient=tk.HORIZONTAL, fg=cst.FONTCOLOR,
state=tk.DISABLED, length=180)
self.weight_slider.grid(row=3, column=2, columnspan=2, sticky=tk.W)
ttk.Button(otherminframe, text='Minimize!',
command=self.minimize).grid(row=4, columnspan=4)
ttk.Label(otherminframe, text='Results',
font=('', cst.TITLESIZE, 'underline')) \
.grid(row=5, columnspan=4)
ttk.Label(otherminframe, text='Red. Chi Sqrd =') \
.grid(row=6, sticky=tk.E)
ttk.Label(otherminframe, text='Deg. of frdm =') \
.grid(row=7, sticky=tk.E)
ttk.Label(otherminframe, textvariable=self.redchisq) \
.grid(row=6, column=1, sticky=tk.W)
ttk.Label(otherminframe, textvariable=self.dof) \
.grid(row=7, column=1, sticky=tk.W)
ttk.Label(otherminframe, text='RMS Primary (km/s) =') \
.grid(row=6, column=2, sticky=tk.E)
ttk.Label(otherminframe, text='RMS Secondary (km/s) ='). \
grid(row=7, column=2, sticky=tk.E)
ttk.Label(otherminframe, text='RMS Rel. Orbit (mas) =') \
.grid(row=8, column=2, sticky=tk.E)
ttk.Label(otherminframe, textvariable=self.rms_rv1) \
.grid(row=6, column=3, sticky=tk.W)
ttk.Label(otherminframe, textvariable=self.rms_rv2) \
.grid(row=7, column=3, sticky=tk.W)
ttk.Label(otherminframe, textvariable=self.rms_as) \
.grid(row=8, column=3, sticky=tk.W)
self.min_save_button = ttk.Button(otherminframe,
text='Save minimization result',
command=self.save_params,
state=tk.DISABLED)
self.min_save_button.grid(row=9, columnspan=4)
self.mcplotbutton = \
ttk.Button(otherminframe,
text='Make MCMC scatterplot matrix',
command=self.plotter.make_corner_diagram,
state=tk.DISABLED)
self.mcplotbutton.grid(row=10, columnspan=4)
otherminframe.pack()
# PLOT CONTROLS #
ttk.Label(plt_frame, text='PLOT CONTROLS',
font=('', cst.TITLESIZE, 'underline')).grid(
columnspan=6)
# UI elements
self.phase_label = ttk.Label(plt_frame, text='phase =',
state=tk.DISABLED)
self.phase_label.grid(row=1, column=1, sticky=tk.E)
self.phase_slider = tk.Scale(plt_frame, variable=self.plotter.phase,
from_=0, to=1, resolution=0.01,
orient=tk.HORIZONTAL, length=300,
state=tk.DISABLED, fg=cst.FONTCOLOR)
self.phase_slider.grid(row=1, column=2, columnspan=4)
self.phase_button = ttk.Checkbutton(plt_frame,
var=self.plotter.do_phasedot,
command=self.toggle_dot,
state=tk.DISABLED)
self.phase_button.grid(row=1)
self.plot_rv1data_label = ttk.Label(plt_frame, text='Primary RV data',
state=tk.DISABLED)
self.plot_rv1data_label.grid(row=2, column=1)
self.plot_rv1data_button = ttk.Checkbutton(plt_frame,
var=self.plotter.do_datarv1,
state=tk.DISABLED)
self.plot_rv1data_button.grid(row=2)
self.plot_rv2data_label = ttk.Label(plt_frame,
text='Secondary RV data',
state=tk.DISABLED)
self.plot_rv2data_label.grid(row=3, column=1)
self.plot_rv2data_button = ttk.Checkbutton(plt_frame,
var=self.plotter.do_datarv2,
state=tk.DISABLED)
self.plot_rv2data_button.grid(row=3)
self.plot_asdata_label = ttk.Label(plt_frame, text='Astrometric data',
state=tk.DISABLED)
self.plot_asdata_label.grid(row=4, column=1)
self.plot_asdata_button = ttk.Checkbutton(plt_frame,
var=self.plotter.do_dataas,
state=tk.DISABLED)
self.plot_asdata_button.grid(row=4)
self.plot_rv1model_label = ttk.Label(plt_frame,
text='Primary RV model',
state=tk.DISABLED)
self.plot_rv1model_label.grid(row=2, column=3)
self.plot_rv1model_button = \
ttk.Checkbutton(plt_frame,
var=self.plotter.do_modelrv1,
state=tk.DISABLED)
self.plot_rv1model_button.grid(row=2, column=2)
self.plot_gamma1_label = ttk.Label(plt_frame, text=r'Gamma 1',
state=tk.DISABLED)
self.plot_gamma1_label.grid(row=3, column=3)
self.plot_gamma1_button = \
ttk.Checkbutton(plt_frame,
var=self.plotter.do_modelgamma1,
state=tk.DISABLED)
self.plot_gamma1_button.grid(row=3, column=2)
self.plot_rv2model_label = ttk.Label(plt_frame,
text='Secondary RV model',
state=tk.DISABLED)
self.plot_rv2model_label.grid(row=4, column=3)
self.plot_rv2model_button = \
ttk.Checkbutton(plt_frame,
var=self.plotter.do_modelrv2,
state=tk.DISABLED)
self.plot_rv2model_button.grid(row=4, column=2)
self.plot_gamma2_label = ttk.Label(plt_frame, text='Gamma 2',
state=tk.DISABLED)
self.plot_gamma2_label.grid(row=5, column=3)
self.plot_gamma2_button = \
ttk.Checkbutton(plt_frame,
var=self.plotter.do_modelgamma2,
state=tk.DISABLED)
self.plot_gamma2_button.grid(row=5, column=2)
self.plot_asmodel_label = ttk.Label(plt_frame, text='Model Orbit',
state=tk.DISABLED)
self.plot_asmodel_label.grid(row=2, column=5)
self.plot_asmodel_button = ttk.Checkbutton(plt_frame,
var=self.plotter.do_modelas,
state=tk.DISABLED)
self.plot_asmodel_button.grid(row=2, column=4)
self.plot_nodeline_label = ttk.Label(plt_frame, text='Line of nodes',
state=tk.DISABLED)
self.plot_nodeline_label.grid(row=3, column=5)
self.plot_nodeline_button = \
ttk.Checkbutton(plt_frame,
var=self.plotter.do_nodeline,
state=tk.DISABLED)
self.plot_nodeline_button.grid(row=3, column=4)
self.plot_semimajor_label = ttk.Label(plt_frame,
text='Semi-major axis',
state=tk.DISABLED)
self.plot_semimajor_label.grid(row=4, column=5)
self.plot_semimajor_button = \
ttk.Checkbutton(plt_frame,
var=self.plotter.do_semimajor,
state=tk.DISABLED)
self.plot_semimajor_button.grid(row=4, column=4)
self.plot_peri_label = ttk.Label(plt_frame, text='Periastron',
state=tk.DISABLED)
self.plot_peri_label.grid(row=5, column=5)
self.plot_peri_button = ttk.Checkbutton(plt_frame,
var=self.plotter.do_peri,
state=tk.DISABLED)
self.plot_peri_button.grid(row=5, column=4)
self.as_dist_label = ttk.Label(plt_frame, text='Astrometric errors',
state=tk.DISABLED)
self.as_dist_label.grid(row=6, column=5)
self.as_dist_button = ttk.Checkbutton(plt_frame,
var=self.plotter.do_as_dist,
state=tk.DISABLED)
self.as_dist_button.grid(row=6, column=4)
legend_button = ttk.Checkbutton(plt_frame, var=self.plotter.do_legend)
legend_button.grid(row=6)
ttk.Label(plt_frame, text='Legend').grid(row=6, column=1)
self.pphase_but = ttk.Radiobutton(plt_frame, text='phase',
command=self.toggle_phase_time,
variable=self.plotter.plot_vs_phase,
value=True, state=tk.DISABLED)
self.ptime_but = ttk.Radiobutton(plt_frame, text='time',
command=self.toggle_phase_time,
variable=self.plotter.plot_vs_phase,
value=False, state=tk.DISABLED)
self.pphase_but.grid(row=6, column=2)
self.ptime_but.grid(row=6, column=3)
self.modelwidgets = {self.plot_asmodel_label, self.plot_asmodel_button,
self.plot_rv1model_button,
self.plot_rv2model_button,
self.plot_rv1model_label,
self.plot_rv2model_label,
self.plot_semimajor_button,
self.plot_semimajor_label,
self.plot_nodeline_button,
self.plot_nodeline_label,
self.plot_peri_label,
self.plot_peri_button, self.as_dist_button,
self.as_dist_label,
self.pphase_but, self.ptime_but,
self.plot_gamma1_button,
self.plot_gamma1_label, self.plot_gamma2_button,
self.plot_gamma2_label}
plt_frame.pack()
ttk.Button(plt_frame_top, text='Refresh Plots', command=self.update,
).pack()
settings_frame = ttk.Frame(plt_frame_top)
entrycol = 1
ttk.Label(settings_frame, text='PLOT SETTINGS',
font=('', cst.TITLESIZE, 'underline')).grid(
columnspan=3)
ttk.Label(settings_frame, text='Axis label size').grid(row=1,
sticky=tk.E)
ttk.Entry(settings_frame, textvariable=self.plotter.axeslabelsize,
width=15) \
.grid(row=1, column=entrycol, columnspan=2)
ttk.Label(settings_frame, text='Tick label size').grid(row=2,
sticky=tk.E)
ttk.Entry(settings_frame, textvariable=self.plotter.ticklabelsize,
width=15) \
.grid(row=2, column=entrycol, columnspan=2)
ttk.Label(settings_frame, text='Grid visibility').grid(row=3,
sticky=tk.E)
self.grid_button = ttk.Checkbutton(settings_frame,
var=self.plotter.do_grids)
self.grid_button.grid(row=3, column=1)
ttk.Label(settings_frame, text='Axis limits').grid(row=4)
ttk.Radiobutton(settings_frame, text='Auto',
var=self.plotter.limcontrol, value=True,
command=self.togglelimcontrol).grid(row=4, column=1)
ttk.Radiobutton(settings_frame, text='Manual',
var=self.plotter.limcontrol, value=False,
command=self.togglelimcontrol).grid(row=4, column=2)
limitrow = 5
self.limit_labels = []
for i in range(8):
self.limit_labels.append(
ttk.Label(settings_frame, text=cst.LIM_STRINGS[i],
state=tk.DISABLED))
for i in range(8):
self.limit_labels[i].grid(row=limitrow + i, sticky=tk.E)
self.limit_entries = []
for i in range(8):
self.limit_entries.append(
ttk.Entry(settings_frame, textvariable=self.plotter.limits[i],
state=tk.DISABLED))
for i in range(8):
self.limit_entries[i].grid(row=limitrow + i, column=entrycol,
columnspan=2)
settings_frame.pack(pady=10)
ttk.Button(plt_frame_top, text='Refresh Plots',
command=self.plotter.update_plots).pack(pady=10)
ttk.Button(plt_frame_top, text='New plot windows',
command=self.plotter.init_plots).pack(pady=10)
# setup the plotting windows
self.plotter.init_plots()
# display in the root frame
tabs.add(data_frame_tab, text='Data Files', state=tk.NORMAL)
tabs.add(guess_infer_tab, text='System/Parameters')
tabs.add(min_frame_tab, text='Minimization')
tabs.add(plt_frame_tab, text='Plot Controls')
tabs.pack(expand=1, fill=tk.BOTH)
@staticmethod
def toggle(widg, boolvalue):
"""
toggles widget widg to be disabled or not given a boolvalue
:param widg: widget to toggle
:param boolvalue: bool
"""
if boolvalue:
widg.config(state=tk.NORMAL)
else:
widg.config(state=tk.DISABLED)
def toggle_phase_time(self):
if not self.plotter.plot_vs_phase.get() and \
self.plotter.do_phasedot.get():
self.plotter.do_phasedot.set(False)
self.toggle(self.phase_button, self.plotter.plot_vs_phase.get())
def togglelimcontrol(self):
for widg in self.limit_labels:
self.toggle(widg, not self.plotter.limcontrol.get())
for widg in self.limit_entries:
self.toggle(widg, not self.plotter.limcontrol.get())
if not self.plotter.limcontrol.get():
self.plotter.matchLimits()
def toggle_rv1(self):
"""
toggles the RV1 widgets
"""
for widg in self.rv1_file, self.rv1_label:
self.toggle(widg, self.load_rv1.get())
for widg in self.plot_rv1data_label, self.plot_rv1data_button:
self.toggle(widg, self.datamanager.hasRV1())
def toggle_rv2(self):
"""
toggles the RV2 widgets
"""
for widg in self.rv2_file, self.rv2_label:
self.toggle(widg, self.load_rv2.get())
for widg in self.plot_rv2data_button, self.plot_rv2data_label:
self.toggle(widg, self.datamanager.hasRV2())
def toggle_as(self):
"""
toggles the AS widgets
"""
for widg in self.as_file, self.as_label, self.seppa_but, self.en_but:
self.toggle(widg, self.load_as.get())
for widg in self.plot_asdata_label, self.plot_asdata_button:
self.toggle(widg, self.datamanager.hasAS())
def toggle_dot(self):
for widg in self.phase_slider, self.phase_label:
self.toggle(widg, self.plotter.do_phasedot.get())
def toggle_q(self):
self.q_mode.set(not self.q_mode.get())
if self.q_mode.get():
self.param_var_list[8].set('q = k1/k2 =')
self.toggle(self.vary_button_list[8], False)
self.lock_q_button.config(text='k')
if float(self.guess_var_list[8].get()) != 0:
self.guess_var_list[8].set(np.round(
float(self.guess_var_list[7].get()) / float(
self.guess_var_list[8].get()), 3))
else:
self.param_var_list[8].set('k2 (km/s) =')
self.toggle(self.vary_button_list[8], True)
self.lock_q_button.config(text='q')
if float(self.guess_var_list[8].get()) != 0:
self.guess_var_list[8].set(np.round(
float(self.guess_var_list[7].get()) / float(
self.guess_var_list[8].get()), 3))
def toggle_lock(self):
self.lock_gs.set(not self.lock_gs.get())
if self.lock_gs.get():
self.lock_gs_button.config(image=self.unlocked_image)
for widgset in self.param_label_list, self.vary_button_list, \
self.guess_entry_list:
self.toggle(widgset[10], False)
else:
self.lock_gs_button.config(image=self.locked_image)
for widgset in self.param_label_list, self.vary_button_list, \
self.guess_entry_list:
self.toggle(widgset[10], True)
self.guess_var_list[10].set(str(self.guess_var_list[9].get()))
def toggle_method(self):
"""
toggles the appropriate method widgets
"""
for widg in self.mc_widg:
if self.method.get() == 'emcee':
self.toggle(widg, True)
else:
self.toggle(widg, False)
for widg in self.hops_label, self.hops_entry:
if self.method.get() == 'basinhopping':
self.toggle(widg, True)
else:
self.toggle(widg, False)
def toggle_weights(self):
"""
toggles the weight widgets
"""
if not (self.datamanager.hasAS() and (
self.datamanager.hasRV1() or self.datamanager.hasRV2())):
self.do_custom_weight.set(False)
self.toggle(self.weight_label, self.do_custom_weight.get())
self.toggle(self.weight_slider, self.do_custom_weight.get())
def set_RV_or_AS_mode(self):
"""
sets the parameters in the correct inference mode
"""
for lst in self.param_label_list, self.vary_button_list:
if self.datamanager.hasRV1() and self.datamanager.hasRV2() and \
self.datamanager.hasAS():
for i in {2, 4, 6, 7, 8, 9, 10, 11}:
lst[i].config(state=tk.NORMAL)
elif self.datamanager.hasRV1() and self.datamanager.hasRV2():
for i in {7, 8, 9, 10}:
lst[i].config(state=tk.NORMAL)
for i in {2, 4, 6, 11}:
lst[i].config(state=tk.DISABLED)
elif self.datamanager.hasRV1() and self.datamanager.hasAS():
for i in {2, 4, 6, 7, 9, 11}:
lst[i].config(state=tk.NORMAL)
for i in {8, 10}:
lst[i].config(state=tk.DISABLED)
elif self.datamanager.hasRV1():
for i in {7, 9, 11}:
lst[i].config(state=tk.NORMAL)
for i in {2, 4, 6, 8, 10, 11}:
lst[i].config(state=tk.DISABLED)
elif self.datamanager.hasAS():
for i in {2, 4, 6, 11}:
lst[i].config(state=tk.NORMAL)
for i in {7, 8, 9, 10}:
lst[i].config(state=tk.DISABLED)
else:
for i in {2, 4, 6, 7, 8, 9, 10}:
lst[i].config(state=tk.NORMAL)
lst[11].config(state=tk.DISABLED)
def transfer(self, varno):
"""
pushes a minimization result to the parameter column
:param varno: number in the parameter list
"""
self.guess_var_list[varno].set(self.mininimzed_var_list[varno].get())
def load_guesses(self):
"""
load guesses from a file to the guess column
"""
try:
self.guess_dict = spl.guess_loader(self.wd.get(),
self.guess_file.get())
except IOError:
print('cannot find your guess file!')
self.guess_dict = None
return
except ValueError as e:
print(
'your guessfile seems to have badly formatted data or '
'something...')
print(e)
self.guess_dict = None
return
try:
for i in range(len(cst.PARAM_LIST)):
self.guess_var_list[i].set(
self.guess_dict[cst.PARAM_LIST[i]][0])
self.vary_var_list[i].set(
str(self.guess_dict[cst.PARAM_LIST[i]][1]))
except (ValueError, TypeError) as e:
print('some parameter has not been set properly:', e)
self.guess_dict = None
return
self.set_system()
def set_guess_dict_from_entries(self):
"""
builds the guess dict from the guess column
"""
self.guess_dict = {}
for i in range(len(cst.PARAM_LIST)):
if i == 8:
self.guess_dict[cst.PARAM_LIST[i]] = (
float(self.guess_var_list[
8].get()) if not self.q_mode.get() else float(
self.guess_var_list[7].get()) / float(
self.guess_var_list[8].get()),
self.vary_var_list[8].get())
else:
self.guess_dict[cst.PARAM_LIST[i]] = (
float(self.guess_var_list[i].get()),
self.vary_var_list[i].get())
self.param_dict = dict()
for param, value in self.guess_dict.items():
self.param_dict[param] = value[0]
def set_system(self):
"""
sets the system from the current guess column
"""
try:
self.set_guess_dict_from_entries()
self.system = bsys.BinarySystem(self.param_dict)
except ValueError:
print('invalid model!')
self.guess_dict = None
self.system = None
self.plotter.plot_vs_phase.set(False)
self.toggle_phase_time()
for widg in self.modelwidgets:
self.toggle(widg, False)
return False
else:
self.toggle_phase_time()
for widg in self.modelwidgets:
self.toggle(widg, True)
return True
def minimize(self):
"""
launches a minimization run
"""
self.set_guess_dict_from_entries()
self.datamanager.buildSets()
data_dict = self.datamanager.get_all_data()
if self.guess_dict is not None and len(data_dict) > 0:
# calculate best parameters
try:
if self.do_custom_weight.get():
w = self.custom_as_weight.get()
else:
w = None
self.minresult, rms_rv1, rms_rv2, rms_as \
= spm.LMminimizer(self.guess_dict, data_dict,
self.method.get(),
self.hops.get(), self.steps.get(),
self.walkers.get(),
self.burn.get(), self.thin.get(), w,
self.lock_gs.get(),
self.q_mode.get())
if self.method.get() == 'emcee':
self.didmcmc = True
self.toggle(self.mcplotbutton, True)
else:
self.didmcmc = False
self.minimization_run_number += 1
self.toggle(self.min_save_button, True)
pars = self.minresult.params
# fill in the entries
self.mininimzed_var_list[0].set(np.round(pars['p'].value, 3))
if pars['p'].vary:
self.error_var_list[0].set(
np.round(0 if pars['p'].stderr is None else pars[
'p'].stderr, 3))
self.mininimzed_var_list[1].set(np.round(pars['e'].value, 3))
if pars['e'].vary:
self.error_var_list[1].set(
np.round(0 if pars['e'].stderr is None else pars[
'e'].stderr, 3))
self.mininimzed_var_list[2].set(
np.round(pars['i'].value % 360, 3))
if pars['i'].vary:
self.error_var_list[2].set(
np.round(0 if pars['i'].stderr is None else pars[
'i'].stderr, 3))
self.mininimzed_var_list[3].set(
np.round(pars['omega'].value % 360, 3))
if pars['omega'].vary:
self.error_var_list[3].set(
np.round(0 if pars['omega'].stderr is None else pars[
'omega'].stderr, 3))
self.mininimzed_var_list[4].set(
np.round(pars['Omega'].value % 360, 3))
if pars['Omega'].vary:
self.error_var_list[4].set(
np.round(0 if pars['Omega'].stderr is None else pars[
'Omega'].stderr, 3))
self.mininimzed_var_list[5].set(np.round(pars['t0'].value, 3))
if pars['t0'].vary:
self.error_var_list[5].set(
np.round(0 if pars['t0'].stderr is None else pars[
't0'].stderr, 3))
self.mininimzed_var_list[6].set(np.round(pars['d'].value, 3))
if pars['d'].vary:
self.error_var_list[6].set(
np.round(0 if pars['d'].stderr is None else pars[
'd'].stderr, 3))
self.mininimzed_var_list[7].set(np.round(pars['k1'].value, 3))
if pars['k1'].vary:
self.error_var_list[7].set(
np.round(0 if pars['k1'].stderr is None else pars[
'k1'].stderr, 3))
if self.q_mode.get():
self.mininimzed_var_list[8].set(
np.round(pars['q'].value, 3))
else:
self.mininimzed_var_list[8].set(
np.round(pars['k2'].value, 3))
if pars['k2'].vary:
self.error_var_list[8].set(
np.round(0 if pars['k2'].stderr is None else pars[
'k2'].stderr, 3))
self.mininimzed_var_list[9].set(
np.round(pars['gamma1'].value, 3))
if pars['gamma1'].vary:
self.error_var_list[9].set(
np.round(0 if pars['gamma1'].stderr is None else pars[
'gamma1'].stderr, 3))
self.mininimzed_var_list[10].set(
np.round(pars['gamma2'].value, 3))
if pars['gamma2'].vary:
self.error_var_list[10].set(
np.round(0 if pars['gamma2'].stderr is None else pars[
'gamma2'].stderr, 3))
self.mininimzed_var_list[11].set(np.round(pars['mt'].value, 3))
if pars['mt'].vary:
self.error_var_list[11].set(
np.round(0 if pars['mt'].stderr is None else pars[
'mt'].stderr, 3))
self.redchisq.set(np.round(self.minresult.redchi, 4))
self.dof.set(self.minresult.nfree)
self.rms_rv1.set(np.round(rms_rv1, 4))
self.rms_rv2.set(np.round(rms_rv2, 4))
self.rms_as.set(np.round(rms_as, 4))
self.minimization_run_number += 1
except ValueError as e:
print(e)
def set_inferred_params(self):
self.mprimary.set(np.round(self.system.primary_mass(), 2))
self.msecondary.set(np.round(self.system.secondary_mass(), 2))
self.totalmass.set(np.round(self.system.total_mass(), 2))
self.semimajork1k2.set(
np.round(self.system.semimajor_axis_from_RV(), 2))
self.semimajord.set(
np.round(self.system.semimajor_axis_from_distance(), 2))
def update(self):
"""
updates the gui, by replotting everything that is selected
"""
self.datamanager.buildSets()
if not self.set_system():
return
if self.system is not None:
self.set_inferred_params()
self.plotter.update_plots()
def save_params(self):
"""
save minimized parameters to a file
"""
out = util.getString('name your parameters file',
default='fitted_params')
wd = spl.check_slash(self.wd.get())
with open(wd + out + '{}.txt'.format(self.minimization_run_number),
'w') as f:
if self.minresult is not None:
f.write(lm.fit_report(self.minresult))
f.write('\n')
#
# f.write('reduced chisq = {} \n'.format(self.redchisq.get()))
# f.write('dof = {} \n'.format(self.dof.get()))
if self.didmcmc:
np.savetxt(wd + out + '{}_flatchain.txt'.format(
self.minimization_run_number),
self.minresult.flatchain,
header='param order: {}'.format(
self.minresult.var_names))
else:
np.savetxt(
wd + out + '_covar{}.txt'.format(self.minimization_run_number),
self.minresult.covar,
header='param order in this covar matrix: {}'.format(
self.minresult.var_names))
def save_guesses(self):
"""
save guesses parameters to a file
"""
out = util.getString('name your guesses file', default='guess_save')
self.set_guess_dict_from_entries()
spl.guess_saver(self.wd.get(), out, self.guess_dict)
def run(wd):
"""
Run the main spinOS app tk loop.
:param wd: working directory to set as root.
"""
root = tk.Tk()
wdir = pathlib.PurePath(__file__).parent.parent
w, h = root.winfo_screenwidth(), root.winfo_screenheight()
with splash.Splash(root, wdir.joinpath('rsc/spinos100.png'), 2.1, w, h):
root.geometry("{}x{}+0+0".format(int(0.37 * w),
int(0.95 * h))) # TODO: on linux
# this might not scale properly
root.title('spinOS v{}'.format(cst.VERSION))
SpinOSGUI(root, wd, w, h)
root.mainloop()
| 54,565 | 44.700168 | 79 | py |
spinOS | spinOS-master/modules/utils.py | """
Copyright 2020, 2021, 2022 Matthias Fabry
This file is part of spinOS.
spinOS is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
spinOS is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with spinOS. If not, see <https://www.gnu.org/licenses/>.
"""
import tkinter as tk
import tkinter.ttk as ttk
def getString(msg, default=None):
name = tk.simpledialog.askstring('name', msg)
if name == '' and default is not None:
name = default
return name
class VerticalScrolledFrame:
"""
A vertically scrolled Frame that can be treated like any other Frame
ie it needs a master and layout and it can be a master.
:width:, :height:, :bg: are passed to the underlying Canvas
:bg: and all other keyword arguments are passed to the inner Frame
note that a widget layed out in this frame will have a self.master 3
layers deep, (outer Frame, Canvas, inner Frame) so
if you subclass this there is no built in way for the children to access
it. You need to provide the controller separately.
"""
def __init__(self, master, **kwargs):
width = kwargs.pop('width', None)
height = kwargs.pop('height', None)
bg = kwargs.pop('bg', kwargs.pop('background', None))
self.outer = ttk.Frame(master, **kwargs)
self.vsb = tk.Scrollbar(self.outer, orient=tk.VERTICAL)
self.vsb.pack(fill=tk.Y, side=tk.RIGHT)
self.canvas = tk.Canvas(self.outer, highlightthickness=0, width=width,
height=height, bg=bg)
self.canvas.pack(side=tk.LEFT, fill=tk.BOTH, expand=True)
self.canvas['yscrollcommand'] = self.vsb.set
# mouse scroll does not seem to work with just "bind"; You have
# to use "bind_all". Therefore to use multiple windows you have
# to bind_all in the current widget
# works for me tho
self.canvas.bind("<Configure>", self._on_canvas_configure)
self.canvas.bind("<Enter>", self._bind_mouse)
self.canvas.bind("<Leave>", self._unbind_mouse)
self.vsb['command'] = self.canvas.yview
self.inner = ttk.Frame(self.canvas, bg=bg)
# pack the inner Frame into the Canvas with the topleft corner 4
# pixels offset
self.canvas_window = self.canvas.create_window((4, 4),
window=self.inner,
anchor='nw',
tags='self.inner')
self.inner.bind("<Configure>", self._on_frame_configure)
self.outer_attr = set(dir(tk.Widget))
def __getattr__(self, item):
if item in self.outer_attr:
# geometry attributes etc (eg pack, destroy, tkraise) are passed
# on to self.outer
return getattr(self.outer, item)
else:
# all other attributes (_w, children, etc) are passed to self.inner
return getattr(self.inner, item)
def _on_canvas_configure(self, event):
self.canvas.itemconfig(self.canvas_window, width=event.width)
def _on_frame_configure(self, event=None):
x1, y1, x2, y2 = self.canvas.bbox("all")
height = self.canvas.winfo_height()
self.canvas.config(scrollregion=(0, 0, x2, max(y2, height)))
def _bind_mouse(self, event=None):
self.canvas.bind_all("<4>", self._on_mousewheel)
self.canvas.bind_all("<5>", self._on_mousewheel)
self.canvas.bind_all("<MouseWheel>", self._on_mousewheel)
def _unbind_mouse(self, event=None):
self.canvas.unbind_all("<4>")
self.canvas.unbind_all("<5>")
self.canvas.unbind_all("<MouseWheel>")
def _on_mousewheel(self, event):
# Linux uses event.num; Windows / Mac uses event.delta
if event.num == 4 or event.delta > 0:
self.canvas.yview_scroll(-1, "units")
elif event.num == 5 or event.delta < 0:
self.canvas.yview_scroll(1, "units")
def __str__(self):
return str(self.outer)
| 4,526 | 40.53211 | 79 | py |
spinOS | spinOS-master/modules/spinOS_io.py | """
Copyright 2020, 2021 Matthias Fabry
This file is part of spinOS.
spinOS is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
spinOS is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with spinOS. If not, see <https://www.gnu.org/licenses/>.
Module that handles the loading of the relevant data for the solver.
"""
import numpy as np
def guess_loader(wd: str, guessfile: str) -> dict:
"""
parses the guess file and determines values and flags for each guess
:param wd: the working directory
:param guessfile: pathname (relative to wd) pointing to the file
containing guesses
:return: dictionary containing the guesses and flags for each parameter
"""
wd = check_slash(wd)
guesses = np.genfromtxt(wd + guessfile, dtype=None, filling_values=np.nan,
usecols=(0, 1, 2), encoding='utf-8')
guessdict = dict()
for i in range(12):
guessdict[guesses[i][0]] = (guesses[i][1], guesses[i][2])
return guessdict
def guess_saver(wd: str, name: str, guess_dict: dict) -> None:
"""
saves guesses to a file
:param wd: working directory
:param name: file name
:param guess_dict: guesses to save
"""
wd = check_slash(wd)
with open(wd + name + '.txt', 'w') as guessfile:
for param, guess in guess_dict.items():
guessfile.write(param + ' {} {}\n'.format(guess[0], str(guess[1])))
def data_loader(wd: str, filetypes: list, filenames: list) -> dict:
"""
loads data from files into a dictionary
:param wd: working directory where the files are
:param filetypes: data types to load, must be 'RV1file', 'RV2file',
or 'ASfile'
:param filenames: names of the files in question
:return: data in a dictionary
"""
wd = check_slash(wd)
data_dict = dict()
for i in range(len(filetypes)):
if filetypes[i] == 'RV1file':
data = np.loadtxt(wd + filenames[i])
data_dict['RV1'] = dict()
data_dict['RV1']['hjds'] = data[:, 0]
data_dict['RV1']['RVs'] = data[:, 1]
try:
data_dict['RV1']['errors'] = data[:, 2]
except IndexError:
# put dummy error if none found in data
data_dict['RV1']['errors'] = data[:, 1] * 0.05
elif filetypes[i] == 'RV2file':
data = np.loadtxt(wd + filenames[i])
data_dict['RV2'] = dict()
data_dict['RV2']['hjds'] = data[:, 0]
data_dict['RV2']['RVs'] = data[:, 1]
try:
data_dict['RV2']['errors'] = data[:, 2]
except IndexError:
# put dummy error if none found in data
data_dict['RV2']['errors'] = data[:, 1] * 0.05
elif filetypes[i] == 'ASfile':
data = np.loadtxt(wd + filenames[i])
data_dict['AS'] = dict()
data_dict['AS']['hjds'] = data[:, 0]
data_dict['AS']['majors'] = data[:, 3]
data_dict['AS']['minors'] = data[:, 4]
data_dict['AS']['pas'] = data[:, 5]
data_dict['AS']['eastsorsep'] = data[:, 1]
data_dict['AS']['northsorpa'] = data[:, 2]
return data_dict
def convert_error_ellipse(major, minor, angle):
"""
Converts error ellipses to actual east and north errors by a sampling
the error ellipse monte-carlo style and
then taking the variance in the east and north directions.
:param major: length of the major axis of the error ellipse
:param minor: length of the minor axis of the error ellipse
:param angle: position angle east of north of the major axis
:return: east and north error
"""
num = 1000
cosa = np.cos(angle)
sina = np.sin(angle)
temp_major = np.random.randn(num) * major
temp_minor = np.random.randn(num) * minor
rotated_temp = np.matmul(np.array([[cosa, sina], [-sina, cosa]]),
[temp_major, temp_minor])
east_error = np.std(rotated_temp[0])
north_error = np.std(rotated_temp[1])
return east_error, north_error
def check_slash(wd):
if len(wd) == 0:
return wd
if wd[-1] != '/':
wd += '/'
return wd
| 4,606 | 35.563492 | 79 | py |
spinOS | spinOS-master/modules/data_manager.py | """
Copyright 2020, 2021, 2022 Matthias Fabry
This file is part of spinOS.
spinOS is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
spinOS is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with spinOS. If not, see <https://www.gnu.org/licenses/>.
"""
import tkinter as tk
import tkinter.ttk as ttk
from abc import ABC, abstractmethod
from typing import TYPE_CHECKING
import numpy as np
import modules.constants as cst
import modules.spinOS_io as spl
import modules.utils as util
if TYPE_CHECKING:
import modules.gui as gui
class DataManager:
def __init__(self, ggui: 'gui.SpinOSGUI'):
self.gui = ggui
self.datasets = {'RV1': [], 'RV2': [], 'AS': []}
self.defWeight = None
def buildSets(self):
for dataset in self.datasets['RV1']:
dataset.setData()
for dataset in self.datasets['RV2']:
dataset.setData()
for dataset in self.datasets['AS']:
dataset.setData()
self.setDefWeight()
self.gui.def_as_weight.set(np.round(self.defWeight, 4))
def getBuiltRV1s(self):
data = None
for dataset in self.datasets['RV1']:
if data is None and dataset.getData() is not None:
data = dataset.getData()
elif dataset.getData() is not None:
data = np.vstack((data, dataset.getData()))
return data
def getBuiltRV2s(self):
data = None
for dataset in self.datasets['RV2']:
if data is None and dataset.getData() is not None:
data = dataset.getData()
elif dataset.getData() is not None:
data = np.vstack((data, dataset.getData()))
return data
def getBuiltASs(self):
data = None
for dataset in self.datasets['AS']:
if data is None and dataset.getData() is not None:
data = dataset.getData()
elif dataset.getData() is not None:
data = np.vstack((data, dataset.getData()))
return data
def get_all_data(self):
datadict = {}
if self.hasRV1():
datadict['RV1'] = self.getBuiltRV1s()
if self.hasRV2():
datadict['RV2'] = self.getBuiltRV2s()
if self.hasAS():
datadict['AS'] = self.getBuiltASs()
return datadict
def hasRV1(self):
return len(self.datasets['RV1']) > 0
def hasRV2(self):
return len(self.datasets['RV2']) > 0
def hasAS(self):
return len(self.datasets['AS']) > 0
def loaddataintoSets(self):
"""
loads the data from the currently selected files into datasets
"""
filetypes = list()
filenames = list()
if self.gui.rv1_file.get() != '' and self.gui.load_rv1.get():
filetypes.append('RV1file')
filenames.append(self.gui.rv1_file.get())
if self.gui.rv2_file.get() != '' and self.gui.load_rv2.get():
filetypes.append('RV2file')
filenames.append(self.gui.rv2_file.get())
if self.gui.as_file.get() != '' and self.gui.load_as.get():
filetypes.append('ASfile')
filenames.append(self.gui.as_file.get())
try:
datahere = spl.data_loader(self.gui.wd.get(), filetypes, filenames)
except (OSError, KeyError) as e:
print(e)
return None
if self.gui.rv1_file.get() != '' and self.gui.load_rv1.get():
name = util.getString(
'Name of dataset from {}'.format(self.gui.rv1_file.get()),
default=self.gui.rv1_file.get())
if name is not None:
newset = RVDataSet(self, self.gui.rv1_tab, self.gui.rv1book,
self.gui.rv1_file.get(),
name, tpe='RV1')
newset.setentriesfromfile(datahere['RV1'])
self.datasets['RV1'].append(newset)
self.gui.plotter.rv1data_lines.append(None)
self.gui.load_rv1.set(False)
self.gui.toggle_rv1()
if self.gui.rv2_file.get() != '' and self.gui.load_rv2.get():
name = util.getString(
'Name of dataset from {}'.format(self.gui.rv2_file.get()),
default=self.gui.rv2_file.get())
if name is not None:
newset = RVDataSet(self, self.gui.rv2_tab, self.gui.rv2book,
self.gui.rv2_file.get(),
name, tpe='RV2')
newset.setentriesfromfile(datahere['RV2'])
self.datasets['RV2'].append(newset)
self.gui.load_rv2.set(False)
self.gui.toggle_rv2()
if self.gui.as_file.get() != '' and self.gui.load_as.get():
name = util.getString(
'Name of dataset from {}'.format(self.gui.as_file.get()),
default=self.gui.as_file.get())
if name is not None:
newset = ASDataSet(self, self.gui.as_tab, self.gui.asbook,
self.gui.as_file.get(),
name, seppa=self.gui.seppa.get())
newset.setentriesfromfile(datahere['AS'])
self.datasets['AS'].append(newset)
self.gui.load_as.set(False)
self.gui.toggle_as()
self.gui.set_RV_or_AS_mode()
self.buildSets()
def emptyrv1DataSet(self):
newset = RVDataSet(self, self.gui.rv1_tab, self.gui.rv1book,
util.getString('Name of new data set',
default='rv1'), tpe='RV1')
if newset is not None:
self.datasets['RV1'].append(newset)
self.gui.toggle_rv1()
def emptyrv2DataSet(self):
newset = RVDataSet(self, self.gui.rv2_tab, self.gui.rv2book,
util.getString('Name of new data set',
default='rv2'), tpe='RV2')
if newset is not None:
self.datasets['RV2'].append(newset)
self.gui.toggle_rv2()
def emptyasDataSet(self, seppa=False):
newset = ASDataSet(self, self.gui.as_tab, self.gui.asbook,
util.getString('Name of new data set',
default='astrometry'), seppa=seppa)
if newset is not None:
self.datasets['AS'].append(newset)
self.gui.toggle_as()
def setDefWeight(self):
if not self.hasAS():
self.defWeight = 0
else:
numas = sum(
(len(dataset.getData()) if dataset.getData() is not None else 0
for dataset in
self.datasets['AS']))
if not self.hasRV1():
numrv1 = 0
else:
numrv1 = sum(
(
len(dataset.getData()) if dataset.getData() is not None
else 0
for dataset in
self.datasets['RV1']))
if not self.hasRV2():
numrv2 = 0
else:
numrv2 = sum(
(
len(dataset.getData()) if dataset.getData() is not None
else 0
for dataset in
self.datasets['RV2']))
self.defWeight = numas / (numrv1 + numrv2 + numas)
class DataSet(ABC):
# TODO: add support for deletion of datasets
def __init__(self, dataman, tab, book, filename, name=None, tpe='AS'):
if name is None:
name = filename
self.dataman: DataManager = dataman
self.gui = self.dataman.gui
self.fname = filename
self.tpe = tpe
newset = ttk.Frame(tab)
self.name_var = tk.StringVar()
self.name_var.set(name)
self.name_label = ttk.Label(newset, textvariable=self.name_var,
font=('', 13, 'bold'))
self.name_label.pack()
self.datagrid = util.VerticalScrolledFrame(newset)
self.datagrid.pack(fill=tk.BOTH, expand=1)
self.selall = tk.BooleanVar(value=True)
tk.Checkbutton(self.datagrid, var=self.selall, bg=cst.BGCOLOR,
command=self.selAll).grid()
but = ttk.Frame(newset)
ttk.Button(but, text='+', command=self.addentry).grid()
ttk.Button(but, text='Rename', command=self.setName) \
.grid(row=0, column=1)
but.pack()
self.data = None
self.book = book
self.book.add(newset, text=self.fname)
self.id = len(book.tabs()) - 1
self.entries = []
# I add line objects to the plotter but don't bind them to these
# dataset objects, makes deletion hard
if self.tpe == 'RV1':
self.gui.plotter.rv1data_lines.append(None)
elif self.tpe == 'RV2':
self.gui.plotter.rv2data_lines.append(None)
else:
self.gui.plotter.asdata_lines.append(None)
self.gui.plotter.as_ellipses.append(None)
self.gui.plotter.as_dist_lines.append(None)
def setName(self) -> None:
self.name_var.set(util.getString('new name for this dataset'))
def setData(self) -> None:
self.data = None
for entry in self.entries:
if self.data is None and entry.toInclude():
self.data = entry.getData()
elif entry.toInclude():
self.data = np.vstack((self.data, entry.getData()))
if self.data is not None and self.data.ndim == 1:
self.data = self.data.reshape((1, self.data.size))
@abstractmethod
def setentriesfromfile(self, data) -> None:
raise NotImplementedError
@abstractmethod
def addentry(self) -> None:
raise NotImplementedError
def getData(self) -> np.ndarray:
return self.data
def selAll(self):
for entry in self.entries:
entry.include.set(self.selall.get())
self.dataman.buildSets()
class RVDataSet(DataSet):
def __init__(self, dataman, tab, book, filename, name=None, **kwargs):
super().__init__(dataman, tab, book, filename, name, **kwargs)
ttk.Label(self.datagrid, text='date').grid(row=0, column=1)
ttk.Label(self.datagrid, text='RV').grid(row=0, column=2)
ttk.Label(self.datagrid, text='error').grid(row=0, column=3)
def addentry(self):
self.entries.append(RVEntry(self.datagrid, len(self.entries) + 1))
def setentriesfromfile(self, datadict):
self.entries = [] # delete all present entries here
for i in range(len(datadict['hjds'])):
self.entries.append(
RVEntry(self.datagrid, i + 1, hjdin=datadict['hjds'][i],
rvin=datadict['RVs'][i],
errorin=datadict['errors'][i]))
class ASDataSet(DataSet):
def __init__(self, dataman, tab, book, filename, name=None, seppa=False,
**kwargs):
super().__init__(dataman, tab, book, filename, name, **kwargs)
self.seppa = seppa
ttk.Label(self.datagrid, text='date').grid(row=0, column=1)
ttk.Label(self.datagrid, text='Sep' if self.seppa else 'East').grid(
row=0, column=2)
ttk.Label(self.datagrid, text='PA' if self.seppa else 'North').grid(
row=0, column=3)
ttk.Label(self.datagrid, text='major').grid(row=0, column=4)
ttk.Label(self.datagrid, text='minor').grid(row=0, column=5)
ttk.Label(self.datagrid, text='error PA').grid(row=0, column=6)
def addentry(self):
self.entries.append(
ASEntry(self.datagrid.frame, len(self.entries) + 1))
def setentriesfromfile(self, datadict):
self.entries = [] # delete all present entries here
for i in range(len(datadict['hjds'])):
self.entries.append(
ASEntry(self.datagrid, i + 1, hjdin=datadict['hjds'][i],
eastorsepin=datadict['eastsorsep'][i],
northorpain=datadict['northsorpa'][i],
majorin=datadict['majors'][i],
minorin=datadict['minors'][i],
pain=datadict['pas'][i], seppa=self.seppa))
class Entry(ABC):
def __init__(self, datagrid, i, hjdin=None):
self.include = tk.BooleanVar()
check = tk.Checkbutton(datagrid, bg=cst.BGCOLOR, var=self.include)
check.grid(row=i, sticky=tk.E)
self.hjdvar = tk.DoubleVar()
if hjdin is not None:
self.hjdvar.set(hjdin)
self.include.set(True)
hjd = ttk.Entry(datagrid, textvariable=self.hjdvar, width=10)
hjd.grid(row=i, column=1)
def toInclude(self):
return self.include.get()
def getHjd(self):
return self.hjdvar.get()
class RVEntry(Entry):
def __init__(self, datagrid, i, rvin=None, errorin=None, hjdin=None):
super().__init__(datagrid, i, hjdin)
self.rvvar = tk.DoubleVar()
if rvin is not None:
self.rvvar.set(rvin)
rv = ttk.Entry(datagrid, textvariable=self.rvvar, width=10)
rv.grid(row=i, column=2)
self.errorvar = tk.DoubleVar()
if errorin is not None:
self.errorvar.set(errorin)
error = ttk.Entry(datagrid, textvariable=self.errorvar, width=10)
error.grid(row=i, column=3)
def getData(self):
return np.array([self.getHjd(), self.getRV(), self.getError()])
def getRV(self):
return self.rvvar.get()
def getError(self):
return self.errorvar.get()
class ASEntry(Entry):
def __init__(self, datagrid, i, seppa=False,
hjdin=None, eastorsepin=None, northorpain=None, majorin=None,
minorin=None,
pain=None):
super().__init__(datagrid, i, hjdin)
self.seppa = seppa
self.eastorsepvar = tk.DoubleVar()
if eastorsepin is not None:
self.eastorsepvar.set(eastorsepin)
eastorsep = ttk.Entry(datagrid, textvariable=self.eastorsepvar,
width=5)
eastorsep.grid(row=i, column=2)
self.northorpavar = tk.DoubleVar()
if northorpain is not None:
self.northorpavar.set(northorpain)
northorpa = ttk.Entry(datagrid, textvariable=self.northorpavar,
width=5)
northorpa.grid(row=i, column=3)
self.majorvar = tk.DoubleVar()
if majorin is not None:
self.majorvar.set(majorin)
major = ttk.Entry(datagrid, textvariable=self.majorvar, width=5)
major.grid(row=i, column=4)
self.minorvar = tk.DoubleVar()
if minorin is not None:
self.minorvar.set(minorin)
minor = ttk.Entry(datagrid, textvariable=self.minorvar, width=5)
minor.grid(row=i, column=5)
self.pavar = tk.DoubleVar()
if pain is not None:
self.pavar.set(pain)
pa = ttk.Entry(datagrid, textvariable=self.pavar, width=5)
pa.grid(row=i, column=6)
def getData(self):
if self.seppa:
east = self.getEastorsep() * np.sin(
self.getNorthorpa() * cst.DEG2RAD)
north = self.getEastorsep() * np.cos(
self.getNorthorpa() * cst.DEG2RAD)
else:
east = self.getEastorsep()
north = self.getNorthorpa()
easterror, northerror = spl.convert_error_ellipse(self.getMajor(),
self.getMinor(),
self.getPA())
return np.array([self.getHjd(), east, north, easterror, northerror,
self.getMajor(), self.getMinor(), self.getPA()])
def getEastorsep(self):
return self.eastorsepvar.get()
def getNorthorpa(self):
return self.northorpavar.get()
def getMajor(self):
return self.majorvar.get()
def getMinor(self):
return self.minorvar.get()
def getPA(self):
return self.pavar.get()
| 16,837 | 37.009029 | 79 | py |
spinOS | spinOS-master/modules/minimizer.py | """
Copyright 2020, 2021 Matthias Fabry
This file is part of spinOS.
spinOS is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
spinOS is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with spinOS. If not, see <https://www.gnu.org/licenses/>.
Module that performs a non-linear least squares minimization of the
spectroscopic and/or astrometric
data using the lmfit package.
"""
import time
import lmfit as lm
import numpy as np
from modules.binary_system import BinarySystem
RV1 = RV2 = AS = False
LAS = LRV = 0
def LMminimizer(guess_dict: dict, data_dict: dict, method: str = 'leastsq',
hops: int = 10,
steps: int = 1000, walkers: int = 100, burn: int = 100,
thin: int = 1,
as_weight: float = None,
lock_g: bool = None, lock_q: bool = None):
"""
Minimizes the provided data to a binary star model, with initial
provided guesses and a search
radius
:param as_weight: weight to give to the astrometric data, optional.
:param hops: int designating the number of hops if basinhopping is selected
:param method: string to indicate what method to be used, 'leastsq' or
'bqsinhopping' or 'emcee'
:param guess_dict: dictionary containing guesses and 'to-vary' flags for
the 11 parameters
:param data_dict: dictionary containing observational data of RV and/or
separations
:param steps: integer giving the number of steps each walker in the MCMC
should perform
:param walkers: integer giving the number of independent walkers to be
running
:param burn: integer giving the number of samples to be discarded (
"burned") at the start
:param thin: integer indicating to accept only 1 every thin samples
:param lock_g: boolean to indicate whether to lock gamma1 to gamma2
:param lock_q: boolean to indicate whether to lock k2 to k1/q, and that
q is supplied rather
than k2 in that field
:return: result from the lmfit minimization routine. It is a
MinimizerResult object.
"""
# protect users
if method == 'emcee' and burn >= steps:
print(
'You are burning all steps of the MCMC chain! please put burn < '
'steps')
return
# setup data for the solver
rv1s = None
rv2s = None
aas = None
# we need to store this on module level so the function to minimize
# knows quickly which data is
# included or not
global RV1, RV2, AS
RV1 = RV2 = AS = False
global LAS, LRV
LAS = LRV = 0
if 'RV1' in data_dict and data_dict['RV1'] is not None:
rv1s = data_dict['RV1']
RV1 = True
LRV = len(data_dict['RV1'])
if 'RV2' in data_dict and data_dict['RV2'] is not None:
rv2s = data_dict['RV2']
RV2 = True
LRV += len(data_dict['RV2'])
if 'AS' in data_dict and data_dict['AS'] is not None:
aas = data_dict['AS']
AS = True
LAS = 2 * len(data_dict['AS'])
# setup Parameters object for the solver
params = lm.Parameters()
# populate with parameter data
params.add_many(
('e', guess_dict['e'][0], guess_dict['e'][1], 0, 1 - 1e-5),
('i', guess_dict['i'][0], guess_dict['i'][1]),
('omega', guess_dict['omega'][0], guess_dict['omega'][1]),
('Omega', guess_dict['Omega'][0], guess_dict['Omega'][1]),
('t0', guess_dict['t0'][0], guess_dict['t0'][1]),
('p', guess_dict['p'][0], guess_dict['p'][1], 0),
('mt', guess_dict['mt'][0], guess_dict['mt'][1], 0),
('d', guess_dict['d'][0], guess_dict['d'][1], 0),
('k1', guess_dict['k1'][0], guess_dict['k1'][1], 0),
('gamma1', guess_dict['gamma1'][0], guess_dict['gamma1'][1]),
('k2', guess_dict['k2'][0], guess_dict['k2'][1], 0),
('gamma2', guess_dict['gamma2'][0], guess_dict['gamma2'][1])
)
if lock_g:
params['gamma2'].set(expr='gamma1')
if lock_q:
params.add('q', value=params['k1'] / params['k2'], vary=False)
params['k2'].set(expr='k1/q')
# put e to a non zero value to avoid conditioning problems in MCMC
if params['e'].value < 1e-8:
print(
'Warning: eccentricity is put to 1e-8 to avoid conditioning '
'issues!')
params['e'].set(value=1e-8)
if RV1 and RV2:
if not AS:
for key in 'd', 'i', 'Omega', 'mt':
params[key].set(vary=False)
elif RV1:
for key in 'k2', 'gamma2', 'd':
params[key].set(vary=False)
if not AS:
for key in 'i', 'Omega', 'mt':
params[key].set(vary=False)
elif AS and ('q' in params.valuesdict().keys()) \
and params.valuesdict()['q'] != 0:
params['i'] \
.set(expr='180-180/pi*asin(sqrt(1-e**2)*k1*(q+1)/q*'
'(p*86400/(2*pi*6.67430e-20*mt*1.9885e30))**(1/3))')
elif AS:
for key in 'k1', 'gamma1', 'k2', 'gamma2':
params[key].set(vary=False)
else:
raise ValueError('No data supplied! Cannot minimize.\n')
# build a minimizer object
minimizer = lm.Minimizer(fcn2min, params,
fcn_args=(rv1s, rv2s, aas, as_weight))
print('Starting Minimization with {}{}{}...'.format(
'primary RV data, ' if RV1 else '',
'secondary RV data, ' if RV2 else '',
'astrometric data' if AS else ''))
tic = time.time()
if method == 'leastsq':
result = minimizer.minimize()
elif method == 'basinhopping':
result = minimizer.minimize(method=method, disp=True, niter=hops, T=5,
minimizer_kwargs={'method': 'Nelder-Mead'})
elif method == 'emcee':
localresult = minimizer.minimize()
mcminimizer = lm.Minimizer(fcn2min, params=localresult.params,
fcn_args=(rv1s, rv2s, aas, as_weight))
print('Starting MCMC sampling using the minimized parameters...')
result = mcminimizer.emcee(steps=steps, nwalkers=walkers, burn=burn,
thin=thin)
else:
print('this minimization method not implemented')
return
toc = time.time()
print('Minimization Complete in {} s!\n'.format(np.round(toc - tic, 3)))
lm.report_fit(result.params)
rms_rv1, rms_rv2, rms_as = 0, 0, 0
system = BinarySystem(result.params.valuesdict())
if RV1:
# weigh with number of points for RV1 data
rms_rv1 = np.sqrt(np.sum(
(system.primary.radial_velocity_of_hjds(
rv1s[:, 0]) - rv1s[:, 1]) ** 2) / len(rv1s[:, 1]))
if RV2:
# Same for RV2
rms_rv2 = np.sqrt(np.sum(
(system.secondary.radial_velocity_of_hjds(
rv2s[:, 0]) - rv2s[:, 1]) ** 2) / len(rv2s[:, 1]))
if AS:
# same for AS
omc2E = np.sum(
(system.relative.east_of_hjds(aas[:, 0]) - aas[:, 1]) ** 2)
omc2N = np.sum(
(system.relative.north_of_hjds(aas[:, 0]) - aas[:, 2]) ** 2)
rms_as = np.sqrt((omc2E + omc2N) / LAS)
print('Minimization complete, check parameters tab for resulting orbit!\n')
return result, rms_rv1, rms_rv2, rms_as
def fcn2min(params, rv1s, rv2s, aas, weight=None):
"""
Define the function to be minimized by the minimizer. It is simply to
array of weighted
distances from the model to the data, schematically:
fun = array((data[hjd]-model[hjd])/error_on_data(hjd))
The function will find out which data is omitted.
:param weight: multiplicative weight to give to the astrometric points,
optional. If None, no
additional weight is applied
:param params: Parameters object from the package lmfit, containing the
11 parameters to fit.
:param rv1s: list rv1 data, as formatted by dataManager.DataSet.setData()
:param rv2s: list rv2 data, as formatted by dataManager.DataSet.setData()
:param aas: list astrometric data, as formatted by
dataManager.DataSet.setData()
:return: array with the weighted errors of the data to the model defined
by the parameters
"""
# create the system belonging to the parameters
system = BinarySystem(params.valuesdict())
if RV1:
# Get weighted distance for RV1 data
chisq_rv1 = ((system.primary.radial_velocity_of_hjds(
rv1s[:, 0]) - rv1s[:, 1]) / rv1s[:, 2])
if weight:
chisq_rv1 *= (1 - weight) * (LAS + LRV) / LRV
else:
# is RV1 not there, make empty list for this part of the data
chisq_rv1 = np.asarray(list())
if RV2:
# Same for RV2
chisq_rv2 = ((system.secondary.radial_velocity_of_hjds(
rv2s[:, 0]) - rv2s[:, 1]) / rv2s[:, 2])
if weight:
chisq_rv2 *= (1 - weight) * (LAS + LRV) / LRV
else:
chisq_rv2 = np.asarray(list())
if AS:
# same for AS
chisq_east = ((system.relative.east_of_hjds(
aas[:, 0]) - aas[:, 1]) / aas[:, 3])
chisq_north = ((system.relative.north_of_hjds(
aas[:, 0]) - aas[:, 2]) / aas[:, 4])
if weight:
chisq_east *= weight * (LAS + LRV) / LAS
chisq_north *= weight * (LAS + LRV) / LAS
else:
chisq_east = np.asarray(list())
chisq_north = np.asarray(list())
# concatentate the four parts (RV1, RV2, ASeast, ASnorth)
res = np.concatenate((chisq_rv1, chisq_rv2, chisq_east, chisq_north))
return res
| 9,981 | 38.299213 | 79 | py |
spinOS | spinOS-master/modules/splash.py | """
Copyright 2020, 2021 Matthias Fabry
This file is part of spinOS.
spinOS is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
spinOS is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with spinOS. If not, see <https://www.gnu.org/licenses/>.
Defines the splash screen class
"""
import tkinter as tk
import time
from sys import platform
class Splash:
def __init__(self, rroot, file, wait, width, height):
self.__root = rroot
self.__file = file
self.__wait = wait + time.time()
self.__scrW, self.__scrH = width, height
def __enter__(self):
# hide main window
self.__root.withdraw()
self.__window = tk.Toplevel(self.__root)
self.__window.lift()
self.__splash = tk.PhotoImage(master=self.__window, file=self.__file)
if platform == 'darwin':
# noinspection PyProtectedMember
self.__window.tk.call("::tk::unsupported::MacWindowStyle", "style", self.__window._w, "plain", "none")
# geometry
imgW = self.__splash.width()
imgH = self.__splash.height()
Xpos = (self.__scrW - imgW) // 2
Ypos = (self.__scrH - imgH) // 2
self.__window.geometry('+{}+{}'.format(Xpos, Ypos))
# put image
tk.Label(self.__window, image=self.__splash).grid()
# display before .mainloop()
self.__window.update_idletasks()
self.__window.overrideredirect(True)
self.__window.update()
def __exit__(self, *args):
if time.time() < self.__wait:
time.sleep(self.__wait - time.time())
del self.__splash
self.__window.destroy()
self.__root.update_idletasks()
self.__root.deiconify()
| 2,135 | 31.861538 | 114 | py |
Keras-FCN | Keras-FCN-master/inference.py | import numpy as np
import matplotlib.pyplot as plt
from pylab import *
import os
import sys
import cv2
from PIL import Image
from keras.preprocessing.image import *
from keras.models import load_model
import keras.backend as K
from keras.applications.imagenet_utils import preprocess_input
from models import *
def inference(model_name, weight_file, image_size, image_list, data_dir, label_dir, return_results=True, save_dir=None,
label_suffix='.png',
data_suffix='.jpg'):
current_dir = os.path.dirname(os.path.realpath(__file__))
# mean_value = np.array([104.00699, 116.66877, 122.67892])
batch_shape = (1, ) + image_size + (3, )
save_path = os.path.join(current_dir, 'Models/'+model_name)
model_path = os.path.join(save_path, "model.json")
checkpoint_path = os.path.join(save_path, weight_file)
# model_path = os.path.join(current_dir, 'model_weights/fcn_atrous/model_change.hdf5')
# model = FCN_Resnet50_32s((480,480,3))
config = tf.ConfigProto(gpu_options=tf.GPUOptions(allow_growth=True))
session = tf.Session(config=config)
K.set_session(session)
model = globals()[model_name](batch_shape=batch_shape, input_shape=(512, 512, 3))
model.load_weights(checkpoint_path, by_name=True)
model.summary()
results = []
total = 0
for img_num in image_list:
img_num = img_num.strip('\n')
total += 1
print('#%d: %s' % (total,img_num))
image = Image.open('%s/%s%s' % (data_dir, img_num, data_suffix))
image = img_to_array(image) # , data_format='default')
label = Image.open('%s/%s%s' % (label_dir, img_num, label_suffix))
label_size = label.size
img_h, img_w = image.shape[0:2]
# long_side = max(img_h, img_w, image_size[0], image_size[1])
pad_w = max(image_size[1] - img_w, 0)
pad_h = max(image_size[0] - img_h, 0)
image = np.lib.pad(image, ((pad_h/2, pad_h - pad_h/2), (pad_w/2, pad_w - pad_w/2), (0, 0)), 'constant', constant_values=0.)
# image -= mean_value
'''img = array_to_img(image, 'channels_last', scale=False)
img.show()
exit()'''
# image = cv2.resize(image, image_size)
image = np.expand_dims(image, axis=0)
image = preprocess_input(image)
result = model.predict(image, batch_size=1)
result = np.argmax(np.squeeze(result), axis=-1).astype(np.uint8)
result_img = Image.fromarray(result, mode='P')
result_img.palette = label.palette
# result_img = result_img.resize(label_size, resample=Image.BILINEAR)
result_img = result_img.crop((pad_w/2, pad_h/2, pad_w/2+img_w, pad_h/2+img_h))
# result_img.show(title='result')
if return_results:
results.append(result_img)
if save_dir:
result_img.save(os.path.join(save_dir, img_num + '.png'))
return results
if __name__ == '__main__':
# model_name = 'AtrousFCN_Resnet50_16s'
# model_name = 'Atrous_DenseNet'
model_name = 'DenseNet_FCN'
weight_file = 'checkpoint_weights.hdf5'
image_size = (512, 512)
data_dir = os.path.expanduser('~/.keras/datasets/VOC2012/VOCdevkit/VOC2012/JPEGImages')
label_dir = os.path.expanduser('~/.keras/datasets/VOC2012/VOCdevkit/VOC2012/SegmentationClass')
image_list = sys.argv[1:]#'2007_000491'
results = inference(model_name, weight_file, image_size, image_list, data_dir, label_dir)
for result in results:
result.show(title='result', command=None)
| 3,546 | 37.978022 | 131 | py |
Keras-FCN | Keras-FCN-master/evaluate.py | import numpy as np
import matplotlib.pyplot as plt
from pylab import *
import os
import sys
import time
import cv2
from PIL import Image
from keras.preprocessing.image import *
from keras.utils.np_utils import to_categorical
from keras.models import load_model
import keras.backend as K
from models import *
from inference import inference
def calculate_iou(model_name, nb_classes, res_dir, label_dir, image_list):
conf_m = zeros((nb_classes, nb_classes), dtype=float)
total = 0
# mean_acc = 0.
for img_num in image_list:
img_num = img_num.strip('\n')
total += 1
print('#%d: %s' % (total, img_num))
pred = img_to_array(Image.open('%s/%s.png' % (res_dir, img_num))).astype(int)
label = img_to_array(Image.open('%s/%s.png' % (label_dir, img_num))).astype(int)
flat_pred = np.ravel(pred)
flat_label = np.ravel(label)
# acc = 0.
for p, l in zip(flat_pred, flat_label):
if l == 255:
continue
if l < nb_classes and p < nb_classes:
conf_m[l, p] += 1
else:
print('Invalid entry encountered, skipping! Label: ', l,
' Prediction: ', p, ' Img_num: ', img_num)
# if l==p:
# acc+=1
#acc /= flat_pred.shape[0]
#mean_acc += acc
#mean_acc /= total
#print 'mean acc: %f'%mean_acc
I = np.diag(conf_m)
U = np.sum(conf_m, axis=0) + np.sum(conf_m, axis=1) - I
IOU = I/U
meanIOU = np.mean(IOU)
return conf_m, IOU, meanIOU
def evaluate(model_name, weight_file, image_size, nb_classes, batch_size, val_file_path, data_dir, label_dir,
label_suffix='.png',
data_suffix='.jpg'):
current_dir = os.path.dirname(os.path.realpath(__file__))
save_dir = os.path.join(current_dir, 'Models/'+model_name+'/res/')
if os.path.exists(save_dir) == False:
os.mkdir(save_dir)
fp = open(val_file_path)
image_list = fp.readlines()
fp.close()
start_time = time.time()
inference(model_name, weight_file, image_size, image_list, data_dir, label_dir, return_results=False, save_dir=save_dir,
label_suffix=label_suffix, data_suffix=data_suffix)
duration = time.time() - start_time
print('{}s used to make predictions.\n'.format(duration))
start_time = time.time()
conf_m, IOU, meanIOU = calculate_iou(model_name, nb_classes, save_dir, label_dir, image_list)
print('IOU: ')
print(IOU)
print('meanIOU: %f' % meanIOU)
print('pixel acc: %f' % (np.sum(np.diag(conf_m))/np.sum(conf_m)))
duration = time.time() - start_time
print('{}s used to calculate IOU.\n'.format(duration))
if __name__ == '__main__':
# model_name = 'Atrous_DenseNet'
model_name = 'AtrousFCN_Resnet50_16s'
# model_name = 'DenseNet_FCN'
weight_file = 'checkpoint_weights.hdf5'
# weight_file = 'model.hdf5'
image_size = (512, 512)
nb_classes = 21
batch_size = 1
dataset = 'VOC2012_BERKELEY'
if dataset == 'VOC2012_BERKELEY':
# pascal voc + berkeley semantic contours annotations
train_file_path = os.path.expanduser('~/.keras/datasets/VOC2012/combined_imageset_train.txt') #Data/VOClarge/VOC2012/ImageSets/Segmentation
# train_file_path = os.path.expanduser('~/.keras/datasets/oneimage/train.txt') #Data/VOClarge/VOC2012/ImageSets/Segmentation
val_file_path = os.path.expanduser('~/.keras/datasets/VOC2012/combined_imageset_val.txt')
data_dir = os.path.expanduser('~/.keras/datasets/VOC2012/VOCdevkit/VOC2012/JPEGImages')
label_dir = os.path.expanduser('~/.keras/datasets/VOC2012/combined_annotations')
label_suffix = '.png'
if dataset == 'COCO':
train_file_path = os.path.expanduser('~/.keras/datasets/VOC2012/VOCdevkit/VOC2012/ImageSets/Segmentation/train.txt') #Data/VOClarge/VOC2012/ImageSets/Segmentation
# train_file_path = os.path.expanduser('~/.keras/datasets/oneimage/train.txt') #Data/VOClarge/VOC2012/ImageSets/Segmentation
val_file_path = os.path.expanduser('~/.keras/datasets/VOC2012/VOCdevkit/VOC2012/ImageSets/Segmentation/val.txt')
data_dir = os.path.expanduser('~/.keras/datasets/VOC2012/VOCdevkit/VOC2012/JPEGImages')
label_dir = os.path.expanduser('~/.keras/datasets/VOC2012/VOCdevkit/VOC2012/SegmentationClass')
label_suffix = '.npy'
evaluate(model_name, weight_file, image_size, nb_classes, batch_size, val_file_path, data_dir, label_dir,
label_suffix=label_suffix, data_suffix=data_suffix)
| 4,607 | 42.471698 | 170 | py |
Keras-FCN | Keras-FCN-master/train_coco.py | import numpy as np
import matplotlib.pyplot as plt
from pylab import *
import os
import sys
import pickle
import time
from keras.optimizers import SGD, Adam
from keras.callbacks import *
from keras.objectives import *
from keras.models import load_model
import keras.backend as K
#import keras.utils.visualize_util as vis_util
from models import *
from train import *
from utils.loss_function import *
from utils.metrics import *
from utils.SegDataGenerator import *
# from tf_image_segmentation.recipes.mscoco import data_coco
if __name__ == '__main__':
# model_name = 'AtrousFCN_Resnet50_16s'
#model_name = 'Atrous_DenseNet'
model_name = 'DenseNet_FCN'
batch_size = 2
batchnorm_momentum = 0.95
epochs = 450
lr_base = 0.2 * (float(batch_size) / 4)
lr_power = float(1)/float(30)
resume_training=False
weight_decay = 0.0001/2
target_size = (320, 320)
dataset = 'COCO'
if dataset == 'VOC2012_BERKELEY':
# pascal voc + berkeley semantic contours annotations
train_file_path = os.path.expanduser('~/.keras/datasets/VOC2012/combined_imageset_train.txt') #Data/VOClarge/VOC2012/ImageSets/Segmentation
# train_file_path = os.path.expanduser('~/.keras/datasets/oneimage/train.txt') #Data/VOClarge/VOC2012/ImageSets/Segmentation
val_file_path = os.path.expanduser('~/.keras/datasets/VOC2012/combined_imageset_val.txt')
data_dir = os.path.expanduser('~/.keras/datasets/VOC2012/VOCdevkit/VOC2012/JPEGImages')
label_dir = os.path.expanduser('~/.keras/datasets/VOC2012/combined_annotations')
if dataset is 'VOC2012':
train_file_path = os.path.expanduser('~/.keras/datasets/VOC2012/VOCdevkit/VOC2012/ImageSets/Segmentation/train.txt') #Data/VOClarge/VOC2012/ImageSets/Segmentation
# train_file_path = os.path.expanduser('~/.keras/datasets/oneimage/train.txt') #Data/VOClarge/VOC2012/ImageSets/Segmentation
val_file_path = os.path.expanduser('~/.keras/datasets/VOC2012/VOCdevkit/VOC2012/ImageSets/Segmentation/val.txt')
data_dir = os.path.expanduser('~/.keras/datasets/VOC2012/VOCdevkit/VOC2012/JPEGImages')
label_dir = os.path.expanduser('~/.keras/datasets/VOC2012/VOCdevkit/VOC2012/SegmentationClass')
classes = 21
class_weight = None
elif dataset is 'COCO':
train_file_path = os.path.expanduser('~/.keras/datasets/coco/annotations/train2014.txt') #Data/VOClarge/VOC2012/ImageSets/Segmentation
# train_file_path = os.path.expanduser('~/.keras/datasets/oneimage/train.txt') #Data/VOClarge/VOC2012/ImageSets/Segmentation
val_file_path = os.path.expanduser('~/.keras/datasets/coco/annotations/test2014.txt')
data_dir = os.path.expanduser('~/.keras/datasets/coco/train2014')
label_dir = os.path.expanduser('~/.keras/datasets/coco/seg_mask/train2014')
stats_file = os.path.expanduser('~/.keras/datasets/coco/seg_mask/train2014/image_segmentation_class_stats.json')
classes = 91
# class_weight = data_coco.class_weight(image_segmentation_stats_file=stats_file)
class_weight = None
config = tf.ConfigProto(gpu_options=tf.GPUOptions(allow_growth=True))
session = tf.Session(config=config)
K.set_session(session)
train(batch_size, epochs, lr_base, lr_power, weight_decay, classes, model_name, train_file_path, val_file_path,
data_dir, label_dir, target_size=target_size, batchnorm_momentum=batchnorm_momentum, resume_training=resume_training,
class_weight=class_weight, dataset=dataset)
| 3,586 | 51.75 | 170 | py |
Keras-FCN | Keras-FCN-master/models.py | import numpy as np
import matplotlib.pyplot as plt
from pylab import *
import os
import sys
from keras_contrib.applications import densenet
from keras.models import Model
from keras.regularizers import l2
from keras.layers import *
from keras.engine import Layer
from keras.applications.vgg16 import *
from keras.models import *
from keras.applications.imagenet_utils import _obtain_input_shape
import keras.backend as K
import tensorflow as tf
from utils.get_weights_path import *
from utils.basics import *
from utils.resnet_helpers import *
from utils.BilinearUpSampling import *
def top(x, input_shape, classes, activation, weight_decay):
x = Conv2D(classes, (1, 1), activation='linear',
padding='same', kernel_regularizer=l2(weight_decay),
use_bias=False)(x)
if K.image_data_format() == 'channels_first':
channel, row, col = input_shape
else:
row, col, channel = input_shape
# TODO(ahundt) this is modified for the sigmoid case! also use loss_shape
if activation is 'sigmoid':
x = Reshape((row * col * classes,))(x)
return x
def FCN_Vgg16_32s(input_shape=None, weight_decay=0., batch_momentum=0.9, batch_shape=None, classes=21):
if batch_shape:
img_input = Input(batch_shape=batch_shape)
image_size = batch_shape[1:3]
else:
img_input = Input(shape=input_shape)
image_size = input_shape[0:2]
# Block 1
x = Conv2D(64, (3, 3), activation='relu', padding='same', name='block1_conv1', kernel_regularizer=l2(weight_decay))(img_input)
x = Conv2D(64, (3, 3), activation='relu', padding='same', name='block1_conv2', kernel_regularizer=l2(weight_decay))(x)
x = MaxPooling2D((2, 2), strides=(2, 2), name='block1_pool')(x)
# Block 2
x = Conv2D(128, (3, 3), activation='relu', padding='same', name='block2_conv1', kernel_regularizer=l2(weight_decay))(x)
x = Conv2D(128, (3, 3), activation='relu', padding='same', name='block2_conv2', kernel_regularizer=l2(weight_decay))(x)
x = MaxPooling2D((2, 2), strides=(2, 2), name='block2_pool')(x)
# Block 3
x = Conv2D(256, (3, 3), activation='relu', padding='same', name='block3_conv1', kernel_regularizer=l2(weight_decay))(x)
x = Conv2D(256, (3, 3), activation='relu', padding='same', name='block3_conv2', kernel_regularizer=l2(weight_decay))(x)
x = Conv2D(256, (3, 3), activation='relu', padding='same', name='block3_conv3', kernel_regularizer=l2(weight_decay))(x)
x = MaxPooling2D((2, 2), strides=(2, 2), name='block3_pool')(x)
# Block 4
x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block4_conv1', kernel_regularizer=l2(weight_decay))(x)
x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block4_conv2', kernel_regularizer=l2(weight_decay))(x)
x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block4_conv3', kernel_regularizer=l2(weight_decay))(x)
x = MaxPooling2D((2, 2), strides=(2, 2), name='block4_pool')(x)
# Block 5
x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block5_conv1', kernel_regularizer=l2(weight_decay))(x)
x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block5_conv2', kernel_regularizer=l2(weight_decay))(x)
x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block5_conv3', kernel_regularizer=l2(weight_decay))(x)
x = MaxPooling2D((2, 2), strides=(2, 2), name='block5_pool')(x)
# Convolutional layers transfered from fully-connected layers
x = Conv2D(4096, (7, 7), activation='relu', padding='same', name='fc1', kernel_regularizer=l2(weight_decay))(x)
x = Dropout(0.5)(x)
x = Conv2D(4096, (1, 1), activation='relu', padding='same', name='fc2', kernel_regularizer=l2(weight_decay))(x)
x = Dropout(0.5)(x)
#classifying layer
x = Conv2D(classes, (1, 1), kernel_initializer='he_normal', activation='linear', padding='valid', strides=(1, 1), kernel_regularizer=l2(weight_decay))(x)
x = BilinearUpSampling2D(size=(32, 32))(x)
model = Model(img_input, x)
weights_path = os.path.expanduser(os.path.join('~', '.keras/models/fcn_vgg16_weights_tf_dim_ordering_tf_kernels.h5'))
model.load_weights(weights_path, by_name=True)
return model
def AtrousFCN_Vgg16_16s(input_shape=None, weight_decay=0., batch_momentum=0.9, batch_shape=None, classes=21):
if batch_shape:
img_input = Input(batch_shape=batch_shape)
image_size = batch_shape[1:3]
else:
img_input = Input(shape=input_shape)
image_size = input_shape[0:2]
# Block 1
x = Conv2D(64, (3, 3), activation='relu', padding='same', name='block1_conv1', kernel_regularizer=l2(weight_decay))(img_input)
x = Conv2D(64, (3, 3), activation='relu', padding='same', name='block1_conv2', kernel_regularizer=l2(weight_decay))(x)
x = MaxPooling2D((2, 2), strides=(2, 2), name='block1_pool')(x)
# Block 2
x = Conv2D(128, (3, 3), activation='relu', padding='same', name='block2_conv1', kernel_regularizer=l2(weight_decay))(x)
x = Conv2D(128, (3, 3), activation='relu', padding='same', name='block2_conv2', kernel_regularizer=l2(weight_decay))(x)
x = MaxPooling2D((2, 2), strides=(2, 2), name='block2_pool')(x)
# Block 3
x = Conv2D(256, (3, 3), activation='relu', padding='same', name='block3_conv1', kernel_regularizer=l2(weight_decay))(x)
x = Conv2D(256, (3, 3), activation='relu', padding='same', name='block3_conv2', kernel_regularizer=l2(weight_decay))(x)
x = Conv2D(256, (3, 3), activation='relu', padding='same', name='block3_conv3', kernel_regularizer=l2(weight_decay))(x)
x = MaxPooling2D((2, 2), strides=(2, 2), name='block3_pool')(x)
# Block 4
x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block4_conv1', kernel_regularizer=l2(weight_decay))(x)
x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block4_conv2', kernel_regularizer=l2(weight_decay))(x)
x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block4_conv3', kernel_regularizer=l2(weight_decay))(x)
x = MaxPooling2D((2, 2), strides=(2, 2), name='block4_pool')(x)
# Block 5
x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block5_conv1', kernel_regularizer=l2(weight_decay))(x)
x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block5_conv2', kernel_regularizer=l2(weight_decay))(x)
x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block5_conv3', kernel_regularizer=l2(weight_decay))(x)
# Convolutional layers transfered from fully-connected layers
x = Conv2D(4096, (7, 7), activation='relu', padding='same', dilation_rate=(2, 2),
name='fc1', kernel_regularizer=l2(weight_decay))(x)
x = Dropout(0.5)(x)
x = Conv2D(4096, (1, 1), activation='relu', padding='same', name='fc2', kernel_regularizer=l2(weight_decay))(x)
x = Dropout(0.5)(x)
#classifying layer
x = Conv2D(classes, (1, 1), kernel_initializer='he_normal', activation='linear', padding='valid', strides=(1, 1), kernel_regularizer=l2(weight_decay))(x)
x = BilinearUpSampling2D(target_size=tuple(image_size))(x)
model = Model(img_input, x)
weights_path = os.path.expanduser(os.path.join('~', '.keras/models/fcn_vgg16_weights_tf_dim_ordering_tf_kernels.h5'))
model.load_weights(weights_path, by_name=True)
return model
def FCN_Resnet50_32s(input_shape = None, weight_decay=0., batch_momentum=0.9, batch_shape=None, classes=21):
if batch_shape:
img_input = Input(batch_shape=batch_shape)
image_size = batch_shape[1:3]
else:
img_input = Input(shape=input_shape)
image_size = input_shape[0:2]
bn_axis = 3
x = Conv2D(64, (7, 7), strides=(2, 2), padding='same', name='conv1', kernel_regularizer=l2(weight_decay))(img_input)
x = BatchNormalization(axis=bn_axis, name='bn_conv1')(x)
x = Activation('relu')(x)
x = MaxPooling2D((3, 3), strides=(2, 2))(x)
x = conv_block(3, [64, 64, 256], stage=2, block='a', strides=(1, 1))(x)
x = identity_block(3, [64, 64, 256], stage=2, block='b')(x)
x = identity_block(3, [64, 64, 256], stage=2, block='c')(x)
x = conv_block(3, [128, 128, 512], stage=3, block='a')(x)
x = identity_block(3, [128, 128, 512], stage=3, block='b')(x)
x = identity_block(3, [128, 128, 512], stage=3, block='c')(x)
x = identity_block(3, [128, 128, 512], stage=3, block='d')(x)
x = conv_block(3, [256, 256, 1024], stage=4, block='a')(x)
x = identity_block(3, [256, 256, 1024], stage=4, block='b')(x)
x = identity_block(3, [256, 256, 1024], stage=4, block='c')(x)
x = identity_block(3, [256, 256, 1024], stage=4, block='d')(x)
x = identity_block(3, [256, 256, 1024], stage=4, block='e')(x)
x = identity_block(3, [256, 256, 1024], stage=4, block='f')(x)
x = conv_block(3, [512, 512, 2048], stage=5, block='a')(x)
x = identity_block(3, [512, 512, 2048], stage=5, block='b')(x)
x = identity_block(3, [512, 512, 2048], stage=5, block='c')(x)
#classifying layer
x = Conv2D(classes, (1, 1), kernel_initializer='he_normal', activation='linear', padding='valid', strides=(1, 1), kernel_regularizer=l2(weight_decay))(x)
x = BilinearUpSampling2D(size=(32, 32))(x)
model = Model(img_input, x)
weights_path = os.path.expanduser(os.path.join('~', '.keras/models/fcn_resnet50_weights_tf_dim_ordering_tf_kernels.h5'))
model.load_weights(weights_path, by_name=True)
return model
def AtrousFCN_Resnet50_16s(input_shape = None, weight_decay=0., batch_momentum=0.9, batch_shape=None, classes=21):
if batch_shape:
img_input = Input(batch_shape=batch_shape)
image_size = batch_shape[1:3]
else:
img_input = Input(shape=input_shape)
image_size = input_shape[0:2]
bn_axis = 3
x = Conv2D(64, (7, 7), strides=(2, 2), padding='same', name='conv1', kernel_regularizer=l2(weight_decay))(img_input)
x = BatchNormalization(axis=bn_axis, name='bn_conv1', momentum=batch_momentum)(x)
x = Activation('relu')(x)
x = MaxPooling2D((3, 3), strides=(2, 2))(x)
x = conv_block(3, [64, 64, 256], stage=2, block='a', weight_decay=weight_decay, strides=(1, 1), batch_momentum=batch_momentum)(x)
x = identity_block(3, [64, 64, 256], stage=2, block='b', weight_decay=weight_decay, batch_momentum=batch_momentum)(x)
x = identity_block(3, [64, 64, 256], stage=2, block='c', weight_decay=weight_decay, batch_momentum=batch_momentum)(x)
x = conv_block(3, [128, 128, 512], stage=3, block='a', weight_decay=weight_decay, batch_momentum=batch_momentum)(x)
x = identity_block(3, [128, 128, 512], stage=3, block='b', weight_decay=weight_decay, batch_momentum=batch_momentum)(x)
x = identity_block(3, [128, 128, 512], stage=3, block='c', weight_decay=weight_decay, batch_momentum=batch_momentum)(x)
x = identity_block(3, [128, 128, 512], stage=3, block='d', weight_decay=weight_decay, batch_momentum=batch_momentum)(x)
x = conv_block(3, [256, 256, 1024], stage=4, block='a', weight_decay=weight_decay, batch_momentum=batch_momentum)(x)
x = identity_block(3, [256, 256, 1024], stage=4, block='b', weight_decay=weight_decay, batch_momentum=batch_momentum)(x)
x = identity_block(3, [256, 256, 1024], stage=4, block='c', weight_decay=weight_decay, batch_momentum=batch_momentum)(x)
x = identity_block(3, [256, 256, 1024], stage=4, block='d', weight_decay=weight_decay, batch_momentum=batch_momentum)(x)
x = identity_block(3, [256, 256, 1024], stage=4, block='e', weight_decay=weight_decay, batch_momentum=batch_momentum)(x)
x = identity_block(3, [256, 256, 1024], stage=4, block='f', weight_decay=weight_decay, batch_momentum=batch_momentum)(x)
x = atrous_conv_block(3, [512, 512, 2048], stage=5, block='a', weight_decay=weight_decay, atrous_rate=(2, 2), batch_momentum=batch_momentum)(x)
x = atrous_identity_block(3, [512, 512, 2048], stage=5, block='b', weight_decay=weight_decay, atrous_rate=(2, 2), batch_momentum=batch_momentum)(x)
x = atrous_identity_block(3, [512, 512, 2048], stage=5, block='c', weight_decay=weight_decay, atrous_rate=(2, 2), batch_momentum=batch_momentum)(x)
#classifying layer
#x = Conv2D(classes, (3, 3), dilation_rate=(2, 2), kernel_initializer='normal', activation='linear', padding='same', strides=(1, 1), kernel_regularizer=l2(weight_decay))(x)
x = Conv2D(classes, (1, 1), kernel_initializer='he_normal', activation='linear', padding='same', strides=(1, 1), kernel_regularizer=l2(weight_decay))(x)
x = BilinearUpSampling2D(target_size=tuple(image_size))(x)
model = Model(img_input, x)
weights_path = os.path.expanduser(os.path.join('~', '.keras/models/fcn_resnet50_weights_tf_dim_ordering_tf_kernels.h5'))
model.load_weights(weights_path, by_name=True)
return model
def Atrous_DenseNet(input_shape=None, weight_decay=1E-4,
batch_momentum=0.9, batch_shape=None, classes=21,
include_top=False, activation='sigmoid'):
# TODO(ahundt) pass the parameters but use defaults for now
if include_top is True:
# TODO(ahundt) Softmax is pre-applied, so need different train, inference, evaluate.
# TODO(ahundt) for multi-label try per class sigmoid top as follows:
# x = Reshape((row * col * classes))(x)
# x = Activation('sigmoid')(x)
# x = Reshape((row, col, classes))(x)
return densenet.DenseNet(depth=None, nb_dense_block=3, growth_rate=32,
nb_filter=-1, nb_layers_per_block=[6, 12, 24, 16],
bottleneck=True, reduction=0.5, dropout_rate=0.2,
weight_decay=1E-4,
include_top=True, top='segmentation',
weights=None, input_tensor=None,
input_shape=input_shape,
classes=classes, transition_dilation_rate=2,
transition_kernel_size=(1, 1),
transition_pooling=None)
# if batch_shape:
# img_input = Input(batch_shape=batch_shape)
# image_size = batch_shape[1:3]
# else:
# img_input = Input(shape=input_shape)
# image_size = input_shape[0:2]
input_shape = _obtain_input_shape(input_shape,
default_size=32,
min_size=16,
data_format=K.image_data_format(),
include_top=False)
img_input = Input(shape=input_shape)
x = densenet.__create_dense_net(classes, img_input,
depth=None, nb_dense_block=3, growth_rate=32,
nb_filter=-1, nb_layers_per_block=[6, 12, 24, 16],
bottleneck=True, reduction=0.5, dropout_rate=0.2,
weight_decay=1E-4, top='segmentation',
input_shape=input_shape,
transition_dilation_rate=2,
transition_kernel_size=(1, 1),
transition_pooling=None,
include_top=include_top)
x = top(x, input_shape, classes, activation, weight_decay)
model = Model(img_input, x, name='Atrous_DenseNet')
# TODO(ahundt) add weight loading
return model
def DenseNet_FCN(input_shape=None, weight_decay=1E-4,
batch_momentum=0.9, batch_shape=None, classes=21,
include_top=False, activation='sigmoid'):
if include_top is True:
# TODO(ahundt) Softmax is pre-applied, so need different train, inference, evaluate.
# TODO(ahundt) for multi-label try per class sigmoid top as follows:
# x = Reshape((row * col * classes))(x)
# x = Activation('sigmoid')(x)
# x = Reshape((row, col, classes))(x)
return densenet.DenseNetFCN(input_shape=input_shape,
weights=None, classes=classes,
nb_layers_per_block=[4, 5, 7, 10, 12, 15],
growth_rate=16,
dropout_rate=0.2)
# if batch_shape:
# img_input = Input(batch_shape=batch_shape)
# image_size = batch_shape[1:3]
# else:
# img_input = Input(shape=input_shape)
# image_size = input_shape[0:2]
input_shape = _obtain_input_shape(input_shape,
default_size=32,
min_size=16,
data_format=K.image_data_format(),
include_top=False)
img_input = Input(shape=input_shape)
x = densenet.__create_fcn_dense_net(classes, img_input,
input_shape=input_shape,
nb_layers_per_block=[4, 5, 7, 10, 12, 15],
growth_rate=16,
dropout_rate=0.2,
include_top=include_top)
x = top(x, input_shape, classes, activation, weight_decay)
# TODO(ahundt) add weight loading
model = Model(img_input, x, name='DenseNet_FCN')
return model
| 17,472 | 52.271341 | 176 | py |
Keras-FCN | Keras-FCN-master/train.py | import numpy as np
import matplotlib.pyplot as plt
from pylab import *
import os
import sys
import pickle
from keras.optimizers import SGD, Adam, Nadam
from keras.callbacks import *
from keras.objectives import *
from keras.metrics import binary_accuracy
from keras.models import load_model
import keras.backend as K
#import keras.utils.visualize_util as vis_util
from models import *
from utils.loss_function import *
from utils.metrics import *
from utils.SegDataGenerator import *
import time
def train(batch_size, epochs, lr_base, lr_power, weight_decay, classes,
model_name, train_file_path, val_file_path,
data_dir, label_dir, target_size=None, batchnorm_momentum=0.9,
resume_training=False, class_weight=None, dataset='VOC2012',
loss_fn = softmax_sparse_crossentropy_ignoring_last_label,
metrics = [sparse_accuracy_ignoring_last_label],
loss_shape=None,
label_suffix='.png',
data_suffix='.jpg',
ignore_label=255,
label_cval=255):
if target_size:
input_shape = target_size + (3,)
else:
input_shape = (None, None, 3)
batch_shape = (batch_size,) + input_shape
###########################################################
current_dir = os.path.dirname(os.path.realpath(__file__))
save_path = os.path.join(current_dir, 'Models/' + model_name)
if os.path.exists(save_path) is False:
os.mkdir(save_path)
# ###############learning rate scheduler####################
def lr_scheduler(epoch, mode='power_decay'):
'''if lr_dict.has_key(epoch):
lr = lr_dict[epoch]
print 'lr: %f' % lr'''
if mode is 'power_decay':
# original lr scheduler
lr = lr_base * ((1 - float(epoch)/epochs) ** lr_power)
if mode is 'exp_decay':
# exponential decay
lr = (float(lr_base) ** float(lr_power)) ** float(epoch+1)
# adam default lr
if mode is 'adam':
lr = 0.001
if mode is 'progressive_drops':
# drops as progression proceeds, good for sgd
if epoch > 0.9 * epochs:
lr = 0.0001
elif epoch > 0.75 * epochs:
lr = 0.001
elif epoch > 0.5 * epochs:
lr = 0.01
else:
lr = 0.1
print('lr: %f' % lr)
return lr
scheduler = LearningRateScheduler(lr_scheduler)
# ###################### make model ########################
checkpoint_path = os.path.join(save_path, 'checkpoint_weights.hdf5')
model = globals()[model_name](weight_decay=weight_decay,
input_shape=input_shape,
batch_momentum=batchnorm_momentum,
classes=classes)
# ###################### optimizer ########################
optimizer = SGD(lr=lr_base, momentum=0.9)
# optimizer = Nadam(lr=lr_base, beta_1 = 0.825, beta_2 = 0.99685)
model.compile(loss=loss_fn,
optimizer=optimizer,
metrics=metrics)
if resume_training:
model.load_weights(checkpoint_path, by_name=True)
model_path = os.path.join(save_path, "model.json")
# save model structure
f = open(model_path, 'w')
model_json = model.to_json()
f.write(model_json)
f.close
img_path = os.path.join(save_path, "model.png")
# #vis_util.plot(model, to_file=img_path, show_shapes=True)
model.summary()
# lr_reducer = ReduceLROnPlateau(monitor=softmax_sparse_crossentropy_ignoring_last_label, factor=np.sqrt(0.1),
# cooldown=0, patience=15, min_lr=0.5e-6)
# early_stopper = EarlyStopping(monitor=sparse_accuracy_ignoring_last_label, min_delta=0.0001, patience=70)
# callbacks = [early_stopper, lr_reducer]
callbacks = [scheduler]
# ####################### tfboard ###########################
if K.backend() == 'tensorflow':
tensorboard = TensorBoard(log_dir=os.path.join(save_path, 'logs'), histogram_freq=10, write_graph=True)
callbacks.append(tensorboard)
# ################### checkpoint saver#######################
checkpoint = ModelCheckpoint(filepath=os.path.join(save_path, 'checkpoint_weights.hdf5'), save_weights_only=True)#.{epoch:d}
callbacks.append(checkpoint)
# set data generator and train
train_datagen = SegDataGenerator(zoom_range=[0.5, 2.0],
zoom_maintain_shape=True,
crop_mode='random',
crop_size=target_size,
# pad_size=(505, 505),
rotation_range=0.,
shear_range=0,
horizontal_flip=True,
channel_shift_range=20.,
fill_mode='constant',
label_cval=label_cval)
val_datagen = SegDataGenerator()
def get_file_len(file_path):
fp = open(file_path)
lines = fp.readlines()
fp.close()
return len(lines)
# from Keras documentation: Total number of steps (batches of samples) to yield from generator before declaring one epoch finished
# and starting the next epoch. It should typically be equal to the number of unique samples of your dataset divided by the batch size.
steps_per_epoch = int(np.ceil(get_file_len(train_file_path) / float(batch_size)))
history = model.fit_generator(
generator=train_datagen.flow_from_directory(
file_path=train_file_path,
data_dir=data_dir, data_suffix=data_suffix,
label_dir=label_dir, label_suffix=label_suffix,
classes=classes,
target_size=target_size, color_mode='rgb',
batch_size=batch_size, shuffle=True,
loss_shape=loss_shape,
ignore_label=ignore_label,
# save_to_dir='Images/'
),
steps_per_epoch=steps_per_epoch,
epochs=epochs,
callbacks=callbacks,
workers=4,
# validation_data=val_datagen.flow_from_directory(
# file_path=val_file_path, data_dir=data_dir, data_suffix='.jpg',
# label_dir=label_dir, label_suffix='.png',classes=classes,
# target_size=target_size, color_mode='rgb',
# batch_size=batch_size, shuffle=False
# ),
# nb_val_samples = 64
class_weight=class_weight
)
model.save_weights(save_path+'/model.hdf5')
if __name__ == '__main__':
model_name = 'AtrousFCN_Resnet50_16s'
#model_name = 'Atrous_DenseNet'
#model_name = 'DenseNet_FCN'
batch_size = 16
batchnorm_momentum = 0.95
epochs = 250
lr_base = 0.01 * (float(batch_size) / 16)
lr_power = 0.9
resume_training = False
if model_name is 'AtrousFCN_Resnet50_16s':
weight_decay = 0.0001/2
else:
weight_decay = 1e-4
target_size = (320, 320)
dataset = 'VOC2012_BERKELEY'
if dataset == 'VOC2012_BERKELEY':
# pascal voc + berkeley semantic contours annotations
train_file_path = os.path.expanduser('~/.keras/datasets/VOC2012/combined_imageset_train.txt') #Data/VOClarge/VOC2012/ImageSets/Segmentation
# train_file_path = os.path.expanduser('~/.keras/datasets/oneimage/train.txt') #Data/VOClarge/VOC2012/ImageSets/Segmentation
val_file_path = os.path.expanduser('~/.keras/datasets/VOC2012/combined_imageset_val.txt')
data_dir = os.path.expanduser('~/.keras/datasets/VOC2012/VOCdevkit/VOC2012/JPEGImages')
label_dir = os.path.expanduser('~/.keras/datasets/VOC2012/combined_annotations')
data_suffix='.jpg'
label_suffix='.png'
classes = 21
if dataset == 'COCO':
# ###################### loss function & metric ########################
train_file_path = os.path.expanduser('~/.keras/datasets/VOC2012/VOCdevkit/VOC2012/ImageSets/Segmentation/train.txt') #Data/VOClarge/VOC2012/ImageSets/Segmentation
# train_file_path = os.path.expanduser('~/.keras/datasets/oneimage/train.txt') #Data/VOClarge/VOC2012/ImageSets/Segmentation
val_file_path = os.path.expanduser('~/.keras/datasets/VOC2012/VOCdevkit/VOC2012/ImageSets/Segmentation/val.txt')
data_dir = os.path.expanduser('~/.keras/datasets/VOC2012/VOCdevkit/VOC2012/JPEGImages')
label_dir = os.path.expanduser('~/.keras/datasets/VOC2012/VOCdevkit/VOC2012/SegmentationClass')
loss_fn = binary_crossentropy_with_logits
metrics = [binary_accuracy]
loss_shape = (target_size[0] * target_size[1] * classes,)
label_suffix = '.npy'
data_suffix='.jpg'
ignore_label = None
label_cval = 0
# ###################### loss function & metric ########################
if dataset == 'VOC2012' or dataset == 'VOC2012_BERKELEY':
loss_fn = softmax_sparse_crossentropy_ignoring_last_label
metrics = [sparse_accuracy_ignoring_last_label]
loss_shape = None
ignore_label = 255
label_cval = 255
# Class weight is not yet supported for 3+ dimensional targets
# class_weight = {i: 1 for i in range(classes)}
# # The background class is much more common than all
# # others, so give it less weight!
# class_weight[0] = 0.1
class_weight = None
config = tf.ConfigProto(gpu_options=tf.GPUOptions(allow_growth=True))
session = tf.Session(config=config)
K.set_session(session)
train(batch_size, epochs, lr_base, lr_power, weight_decay, classes, model_name, train_file_path, val_file_path,
data_dir, label_dir, target_size=target_size, batchnorm_momentum=batchnorm_momentum, resume_training=resume_training,
class_weight=class_weight, loss_fn=loss_fn, metrics=metrics, loss_shape=loss_shape, data_suffix=data_suffix,
label_suffix=label_suffix, ignore_label=ignore_label, label_cval=label_cval)
| 10,131 | 42.672414 | 170 | py |
Keras-FCN | Keras-FCN-master/test/test_preprocessing.py | from keras.preprocessing.image import img_to_array, array_to_img
from utils import SegDataGenerator
from PIL import Image as PILImage
import numpy as np
def test_crop(crop_function):
arr = np.random.random(500, 800)
img = PILImage.fromarray(arr)
crop_width = img.width / 5
crop_height = img.height / 5
result = crop_function(img_to_array(img), (crop_height, crop_width), 'channels_last')
result = array_to_img(result)
assert result.width == crop_width
assert result.height == crop_height
def test_pair_crop(crop_function):
arr1 = np.random.random(500, 800)
arr2 = np.random.random(500, 800)
img1 = PILImage.fromarray(arr1)
img2 = PILImage.fromarray(arr2)
crop_width = img1.width / 5
crop_height = img1.height / 5
result1, result2 = crop_function(img_to_array(img1),
img_to_array(img2),
(crop_height, crop_width),
'channels_last')
result1 = array_to_img(result1)
result2 = array_to_img(result2)
assert result1.width == crop_width == result2.width
assert result2.height == crop_height == result2.height
test_center_crop = lambda: test_crop(SegDataGenerator.center_crop)
test_random_crop = lambda: test_crop(SegDataGenerator.random_crop)
test_pair_center_crop = lambda: test_pair_crop(SegDataGenerator.pair_center_crop)
test_pair_random_crop = lambda: test_pair_crop(SegDataGenerator.pair_random_crop)
| 1,415 | 27.897959 | 89 | py |
Keras-FCN | Keras-FCN-master/utils/BilinearUpSampling.py | import keras.backend as K
import tensorflow as tf
from keras.layers import *
def resize_images_bilinear(X, height_factor=1, width_factor=1, target_height=None, target_width=None, data_format='default'):
'''Resizes the images contained in a 4D tensor of shape
- [batch, channels, height, width] (for 'channels_first' data_format)
- [batch, height, width, channels] (for 'channels_last' data_format)
by a factor of (height_factor, width_factor). Both factors should be
positive integers.
'''
if data_format == 'default':
data_format = K.image_data_format()
if data_format == 'channels_first':
original_shape = K.int_shape(X)
if target_height and target_width:
new_shape = tf.constant(np.array((target_height, target_width)).astype('int32'))
else:
new_shape = tf.shape(X)[2:]
new_shape *= tf.constant(np.array([height_factor, width_factor]).astype('int32'))
X = permute_dimensions(X, [0, 2, 3, 1])
X = tf.image.resize_bilinear(X, new_shape)
X = permute_dimensions(X, [0, 3, 1, 2])
if target_height and target_width:
X.set_shape((None, None, target_height, target_width))
else:
X.set_shape((None, None, original_shape[2] * height_factor, original_shape[3] * width_factor))
return X
elif data_format == 'channels_last':
original_shape = K.int_shape(X)
if target_height and target_width:
new_shape = tf.constant(np.array((target_height, target_width)).astype('int32'))
else:
new_shape = tf.shape(X)[1:3]
new_shape *= tf.constant(np.array([height_factor, width_factor]).astype('int32'))
X = tf.image.resize_bilinear(X, new_shape)
if target_height and target_width:
X.set_shape((None, target_height, target_width, None))
else:
X.set_shape((None, original_shape[1] * height_factor, original_shape[2] * width_factor, None))
return X
else:
raise Exception('Invalid data_format: ' + data_format)
class BilinearUpSampling2D(Layer):
def __init__(self, size=(1, 1), target_size=None, data_format='default', **kwargs):
if data_format == 'default':
data_format = K.image_data_format()
self.size = tuple(size)
if target_size is not None:
self.target_size = tuple(target_size)
else:
self.target_size = None
assert data_format in {'channels_last', 'channels_first'}, 'data_format must be in {tf, th}'
self.data_format = data_format
self.input_spec = [InputSpec(ndim=4)]
super(BilinearUpSampling2D, self).__init__(**kwargs)
def compute_output_shape(self, input_shape):
if self.data_format == 'channels_first':
width = int(self.size[0] * input_shape[2] if input_shape[2] is not None else None)
height = int(self.size[1] * input_shape[3] if input_shape[3] is not None else None)
if self.target_size is not None:
width = self.target_size[0]
height = self.target_size[1]
return (input_shape[0],
input_shape[1],
width,
height)
elif self.data_format == 'channels_last':
width = int(self.size[0] * input_shape[1] if input_shape[1] is not None else None)
height = int(self.size[1] * input_shape[2] if input_shape[2] is not None else None)
if self.target_size is not None:
width = self.target_size[0]
height = self.target_size[1]
return (input_shape[0],
width,
height,
input_shape[3])
else:
raise Exception('Invalid data_format: ' + self.data_format)
def call(self, x, mask=None):
if self.target_size is not None:
return resize_images_bilinear(x, target_height=self.target_size[0], target_width=self.target_size[1], data_format=self.data_format)
else:
return resize_images_bilinear(x, height_factor=self.size[0], width_factor=self.size[1], data_format=self.data_format)
def get_config(self):
config = {'size': self.size, 'target_size': self.target_size}
base_config = super(BilinearUpSampling2D, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
| 4,452 | 46.88172 | 143 | py |
Keras-FCN | Keras-FCN-master/utils/transfer_FCN.py | import numpy as np
import matplotlib.pyplot as plt
from pylab import *
import os
import sys
from keras.models import Model
from keras.regularizers import l2
from keras.layers import *
from keras.models import model_from_json
from keras.utils import np_utils
from keras.applications.vgg16 import *
from keras.applications.resnet50 import *
import keras.backend as K
import tensorflow as tf
from get_weights_path import *
from resnet_helpers import *
def transfer_FCN_Vgg16():
input_shape = (224, 224, 3)
img_input = Input(shape=input_shape)
# Block 1
x = Conv2D(64, (3, 3), activation='relu', padding='same', name='block1_conv1')(img_input)
x = Conv2D(64, (3, 3), activation='relu', padding='same', name='block1_conv2')(x)
x = MaxPooling2D((2, 2), strides=(2, 2), name='block1_pool')(x)
# Block 2
x = Conv2D(128, (3, 3), activation='relu', padding='same', name='block2_conv1')(x)
x = Conv2D(128, (3, 3), activation='relu', padding='same', name='block2_conv2')(x)
x = MaxPooling2D((2, 2), strides=(2, 2), name='block2_pool')(x)
# Block 3
x = Conv2D(256, (3, 3), activation='relu', padding='same', name='block3_conv1')(x)
x = Conv2D(256, (3, 3), activation='relu', padding='same', name='block3_conv2')(x)
x = Conv2D(256, (3, 3), activation='relu', padding='same', name='block3_conv3')(x)
x = MaxPooling2D((2, 2), strides=(2, 2), name='block3_pool')(x)
# Block 4
x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block4_conv1')(x)
x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block4_conv2')(x)
x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block4_conv3')(x)
x = MaxPooling2D((2, 2), strides=(2, 2), name='block4_pool')(x)
# Block 5
x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block5_conv1')(x)
x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block5_conv2')(x)
x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block5_conv3')(x)
x = MaxPooling2D((2, 2), strides=(2, 2), name='block5_pool')(x)
# Convolutional layers transfered from fully-connected layers
x = Conv2D(4096, (7, 7), activation='relu', padding='same', name='fc1')(x)
x = Conv2D(4096, (1, 1), activation='relu', padding='same', name='fc2')(x)
x = Conv2D(1000, (1, 1), activation='linear', name='predictions_1000')(x)
#x = Reshape((7,7))(x)
# Create model
model = Model(img_input, x)
weights_path = os.path.expanduser(os.path.join('~', '.keras/models/fcn_vgg16_weights_tf_dim_ordering_tf_kernels.h5'))
#transfer if weights have not been created
if os.path.isfile(weights_path) == False:
flattened_layers = model.layers
index = {}
for layer in flattened_layers:
if layer.name:
index[layer.name]=layer
vgg16 = VGG16()
for layer in vgg16.layers:
weights = layer.get_weights()
if layer.name=='fc1':
weights[0] = np.reshape(weights[0], (7,7,512,4096))
elif layer.name=='fc2':
weights[0] = np.reshape(weights[0], (1,1,4096,4096))
elif layer.name=='predictions':
layer.name='predictions_1000'
weights[0] = np.reshape(weights[0], (1,1,4096,1000))
if index.has_key(layer.name):
index[layer.name].set_weights(weights)
model.save_weights(weights_path)
print( 'Successfully transformed!')
#else load weights
else:
model.load_weights(weights_path, by_name=True)
print( 'Already transformed!')
def transfer_FCN_ResNet50():
input_shape = (224, 224, 3)
img_input = Input(shape=input_shape)
bn_axis = 3
x = Conv2D(64, (7, 7), strides=(2, 2), padding='same', name='conv1')(img_input)
x = BatchNormalization(axis=bn_axis, name='bn_conv1')(x)
x = Activation('relu')(x)
x = MaxPooling2D((3, 3), strides=(2, 2))(x)
x = conv_block(3, [64, 64, 256], stage=2, block='a', strides=(1, 1))(x)
x = identity_block(3, [64, 64, 256], stage=2, block='b')(x)
x = identity_block(3, [64, 64, 256], stage=2, block='c')(x)
x = conv_block(3, [128, 128, 512], stage=3, block='a')(x)
x = identity_block(3, [128, 128, 512], stage=3, block='b')(x)
x = identity_block(3, [128, 128, 512], stage=3, block='c')(x)
x = identity_block(3, [128, 128, 512], stage=3, block='d')(x)
x = conv_block(3, [256, 256, 1024], stage=4, block='a')(x)
x = identity_block(3, [256, 256, 1024], stage=4, block='b')(x)
x = identity_block(3, [256, 256, 1024], stage=4, block='c')(x)
x = identity_block(3, [256, 256, 1024], stage=4, block='d')(x)
x = identity_block(3, [256, 256, 1024], stage=4, block='e')(x)
x = identity_block(3, [256, 256, 1024], stage=4, block='f')(x)
x = conv_block(3, [512, 512, 2048], stage=5, block='a')(x)
x = identity_block(3, [512, 512, 2048], stage=5, block='b')(x)
x = identity_block(3, [512, 512, 2048], stage=5, block='c')(x)
x = Conv2D(1000, (1, 1), activation='linear', name='fc1000')(x)
# Create model
model = Model(img_input, x)
weights_path = os.path.expanduser(os.path.join('~', '.keras/models/fcn_resnet50_weights_tf_dim_ordering_tf_kernels.h5'))
#transfer if weights have not been created
if os.path.isfile(weights_path) == False:
flattened_layers = model.layers
index = {}
for layer in flattened_layers:
if layer.name:
index[layer.name]=layer
resnet50 = ResNet50()
for layer in resnet50.layers:
weights = layer.get_weights()
if layer.name=='fc1000':
weights[0] = np.reshape(weights[0], (1,1,2048,1000))
if index.has_key(layer.name):
index[layer.name].set_weights(weights)
model.save_weights(weights_path)
print( 'Successfully transformed!')
#else load weights
else:
model.load_weights(weights_path, by_name=True)
print( 'Already transformed!')
if __name__ == '__main__':
if sys.argv[1] not in {'Vgg16', 'ResNet50'}:
print('Wrong argument! Model name must be Vgg16 or ResNet50.')
exit()
func = globals()['transfer_FCN_%s'%sys.argv[1]]
func()
| 6,302 | 41.302013 | 124 | py |
Keras-FCN | Keras-FCN-master/utils/resnet_helpers.py | from keras.layers import *
from keras.layers.merge import Add
from keras.regularizers import l2
# The original help functions from keras does not have weight regularizers, so I modified them.
# Also, I changed these two functions into functional style
def identity_block(kernel_size, filters, stage, block, weight_decay=0., batch_momentum=0.99):
'''The identity_block is the block that has no conv layer at shortcut
# Arguments
kernel_size: defualt 3, the kernel size of middle conv layer at main path
filters: list of integers, the nb_filters of 3 conv layer at main path
stage: integer, current stage label, used for generating layer names
block: 'a','b'..., current block label, used for generating layer names
'''
def f(input_tensor):
nb_filter1, nb_filter2, nb_filter3 = filters
if K.image_data_format() == 'channels_last':
bn_axis = 3
else:
bn_axis = 1
conv_name_base = 'res' + str(stage) + block + '_branch'
bn_name_base = 'bn' + str(stage) + block + '_branch'
x = Conv2D(nb_filter1, (1, 1), name=conv_name_base + '2a', kernel_regularizer=l2(weight_decay))(input_tensor)
x = BatchNormalization(axis=bn_axis, name=bn_name_base + '2a', momentum=batch_momentum)(x)
x = Activation('relu')(x)
x = Conv2D(nb_filter2, (kernel_size, kernel_size),
padding='same', name=conv_name_base + '2b', kernel_regularizer=l2(weight_decay))(x)
x = BatchNormalization(axis=bn_axis, name=bn_name_base + '2b', momentum=batch_momentum)(x)
x = Activation('relu')(x)
x = Conv2D(nb_filter3, (1, 1), name=conv_name_base + '2c', kernel_regularizer=l2(weight_decay))(x)
x = BatchNormalization(axis=bn_axis, name=bn_name_base + '2c', momentum=batch_momentum)(x)
x = Add()([x, input_tensor])
x = Activation('relu')(x)
return x
return f
def conv_block(kernel_size, filters, stage, block, weight_decay=0., strides=(2, 2), batch_momentum=0.99):
'''conv_block is the block that has a conv layer at shortcut
# Arguments
kernel_size: defualt 3, the kernel size of middle conv layer at main path
filters: list of integers, the nb_filters of 3 conv layer at main path
stage: integer, current stage label, used for generating layer names
block: 'a','b'..., current block label, used for generating layer names
Note that from stage 3, the first conv layer at main path is with strides=(2,2)
And the shortcut should have strides=(2,2) as well
'''
def f(input_tensor):
nb_filter1, nb_filter2, nb_filter3 = filters
if K.image_data_format() == 'channels_last':
bn_axis = 3
else:
bn_axis = 1
conv_name_base = 'res' + str(stage) + block + '_branch'
bn_name_base = 'bn' + str(stage) + block + '_branch'
x = Conv2D(nb_filter1, (1, 1), strides=strides,
name=conv_name_base + '2a', kernel_regularizer=l2(weight_decay))(input_tensor)
x = BatchNormalization(axis=bn_axis, name=bn_name_base + '2a', momentum=batch_momentum)(x)
x = Activation('relu')(x)
x = Conv2D(nb_filter2, (kernel_size, kernel_size), padding='same',
name=conv_name_base + '2b', kernel_regularizer=l2(weight_decay))(x)
x = BatchNormalization(axis=bn_axis, name=bn_name_base + '2b', momentum=batch_momentum)(x)
x = Activation('relu')(x)
x = Conv2D(nb_filter3, (1, 1), name=conv_name_base + '2c', kernel_regularizer=l2(weight_decay))(x)
x = BatchNormalization(axis=bn_axis, name=bn_name_base + '2c', momentum=batch_momentum)(x)
shortcut = Conv2D(nb_filter3, (1, 1), strides=strides,
name=conv_name_base + '1', kernel_regularizer=l2(weight_decay))(input_tensor)
shortcut = BatchNormalization(axis=bn_axis, name=bn_name_base + '1', momentum=batch_momentum)(shortcut)
x = Add()([x, shortcut])
x = Activation('relu')(x)
return x
return f
# Atrous-Convolution version of residual blocks
def atrous_identity_block(kernel_size, filters, stage, block, weight_decay=0., atrous_rate=(2, 2), batch_momentum=0.99):
'''The identity_block is the block that has no conv layer at shortcut
# Arguments
kernel_size: defualt 3, the kernel size of middle conv layer at main path
filters: list of integers, the nb_filters of 3 conv layer at main path
stage: integer, current stage label, used for generating layer names
block: 'a','b'..., current block label, used for generating layer names
'''
def f(input_tensor):
nb_filter1, nb_filter2, nb_filter3 = filters
if K.image_data_format() == 'channels_last':
bn_axis = 3
else:
bn_axis = 1
conv_name_base = 'res' + str(stage) + block + '_branch'
bn_name_base = 'bn' + str(stage) + block + '_branch'
x = Conv2D(nb_filter1, (1, 1), name=conv_name_base + '2a', kernel_regularizer=l2(weight_decay))(input_tensor)
x = BatchNormalization(axis=bn_axis, name=bn_name_base + '2a', momentum=batch_momentum)(x)
x = Activation('relu')(x)
x = Conv2D(nb_filter2, (kernel_size, kernel_size), dilation_rate=atrous_rate,
padding='same', name=conv_name_base + '2b', kernel_regularizer=l2(weight_decay))(x)
x = BatchNormalization(axis=bn_axis, name=bn_name_base + '2b', momentum=batch_momentum)(x)
x = Activation('relu')(x)
x = Conv2D(nb_filter3, (1, 1), name=conv_name_base + '2c', kernel_regularizer=l2(weight_decay))(x)
x = BatchNormalization(axis=bn_axis, name=bn_name_base + '2c', momentum=batch_momentum)(x)
x = Add()([x, input_tensor])
x = Activation('relu')(x)
return x
return f
def atrous_conv_block(kernel_size, filters, stage, block, weight_decay=0., strides=(1, 1), atrous_rate=(2, 2), batch_momentum=0.99):
'''conv_block is the block that has a conv layer at shortcut
# Arguments
kernel_size: defualt 3, the kernel size of middle conv layer at main path
filters: list of integers, the nb_filters of 3 conv layer at main path
stage: integer, current stage label, used for generating layer names
block: 'a','b'..., current block label, used for generating layer names
'''
def f(input_tensor):
nb_filter1, nb_filter2, nb_filter3 = filters
if K.image_data_format() == 'channels_last':
bn_axis = 3
else:
bn_axis = 1
conv_name_base = 'res' + str(stage) + block + '_branch'
bn_name_base = 'bn' + str(stage) + block + '_branch'
x = Conv2D(nb_filter1, (1, 1), strides=strides,
name=conv_name_base + '2a', kernel_regularizer=l2(weight_decay))(input_tensor)
x = BatchNormalization(axis=bn_axis, name=bn_name_base + '2a', momentum=batch_momentum)(x)
x = Activation('relu')(x)
x = Conv2D(nb_filter2, (kernel_size, kernel_size), padding='same', dilation_rate=atrous_rate,
name=conv_name_base + '2b', kernel_regularizer=l2(weight_decay))(x)
x = BatchNormalization(axis=bn_axis, name=bn_name_base + '2b', momentum=batch_momentum)(x)
x = Activation('relu')(x)
x = Conv2D(nb_filter3, (1, 1), name=conv_name_base + '2c', kernel_regularizer=l2(weight_decay))(x)
x = BatchNormalization(axis=bn_axis, name=bn_name_base + '2c', momentum=batch_momentum)(x)
shortcut = Conv2D(nb_filter3, (1, 1), strides=strides,
name=conv_name_base + '1', kernel_regularizer=l2(weight_decay))(input_tensor)
shortcut = BatchNormalization(axis=bn_axis, name=bn_name_base + '1', momentum=batch_momentum)(shortcut)
x = Add()([x, shortcut])
x = Activation('relu')(x)
return x
return f
| 7,977 | 50.470968 | 132 | py |
Keras-FCN | Keras-FCN-master/utils/basics.py | from keras.models import Model
from keras.layers import *
from keras.regularizers import l2
import tensorflow as tf
def conv_relu(nb_filter, nb_row, nb_col, subsample=(1, 1), border_mode='same', bias = True, w_decay = 0.01):
def f(x):
with tf.name_scope('conv_relu'):
x = Conv2D(filters=nb_filter, kernel_size=(nb_row, nb_col), stride=subsample, use_bias=bias,
kernel_initializer="he_normal", W_regularizer=l2(w_decay), border_mode=border_mode)(x)
x = Activation("relu")(x)
return x
return f
def conv_bn(nb_filter, nb_row, nb_col, subsample=(1, 1), border_mode='same', bias = True, w_decay = 0.01):
def f(x):
with tf.name_scope('conv_bn'):
x = Conv2D(filters=nb_filter, kernel_size=(nb_row, nb_col), stride=subsample, use_bias=bias,
kernel_initializer="he_normal", W_regularizer=l2(w_decay), border_mode=border_mode)(x)
x = BatchNormalization(mode=0, axis=-1)(x)
return x
return f
def conv_bn_relu(nb_filter, nb_row, nb_col, subsample=(1, 1), border_mode='same', bias = True, w_decay = 0.01):
def f(x):
with tf.name_scope('conv_bn_relu'):
x = Conv2D(filters=nb_filter, kernel_size=(nb_row, nb_col), stride=subsample, use_bias=bias,
kernel_initializer="he_normal", W_regularizer=l2(w_decay), border_mode=border_mode)(x)
x = BatchNormalization(mode=0, axis=-1)(x)
x = Activation("relu")(x)
return x
return f
def bn_relu_conv(nb_filter, nb_row, nb_col, subsample=(1, 1), border_mode='same', bias = True, w_decay = 0.01):
def f(x):
with tf.name_scope('bn_relu_conv'):
x = BatchNormalization(mode=0, axis=-1)(x)
x = Activation("relu")(x)
x = Conv2D(filters=nb_filter, kernel_size=(nb_row, nb_col), stride=subsample, use_bias=bias,
kernel_initializer="he_normal", W_regularizer=l2(w_decay), border_mode=border_mode)(x)
return x
return f
def atrous_conv_bn(nb_filter, nb_row, nb_col, atrous_rate=(2, 2), subsample=(1, 1), border_mode='same', bias = True, w_decay = 0.01):
def f(x):
with tf.name_scope('atrous_conv_bn'):
x = Conv2D(filters=nb_filter, kernel_size=(nb_row, nb_col), dilation_rate=atrous_rate, stride=subsample, use_bias=bias,
kernel_initializer="he_normal", kernel_regularizer=l2(w_decay), padding=border_mode)(x)
x = BatchNormalization(mode=0, axis=-1)(x)
return x
return f
def atrous_conv_bn_relu(nb_filter, nb_row, nb_col, atrous_rate=(2, 2), subsample=(1, 1), border_mode='same', bias = True, w_decay = 0.01):
def f(x):
with tf.name_scope('atrous_conv_bn_relu'):
x = Conv2D(filters=nb_filter, kernel_size=(nb_row, nb_col), dilation_rate=atrous_rate, stride=subsample, use_bias=bias,
kernel_initializer="he_normal", kernel_regularizer=l2(w_decay), padding=border_mode)(x)
x = BatchNormalization(mode=0, axis=-1)(x)
x = Activation("relu")(x)
return x
return f
| 3,168 | 50.112903 | 138 | py |
Keras-FCN | Keras-FCN-master/utils/SegDataGenerator.py | from keras.preprocessing.image import *
from keras.applications.imagenet_utils import preprocess_input
from keras import backend as K
from PIL import Image
import numpy as np
import os
def center_crop(x, center_crop_size, data_format, **kwargs):
if data_format == 'channels_first':
centerh, centerw = x.shape[1] // 2, x.shape[2] // 2
elif data_format == 'channels_last':
centerh, centerw = x.shape[0] // 2, x.shape[1] // 2
lh, lw = center_crop_size[0] // 2, center_crop_size[1] // 2
rh, rw = center_crop_size[0] - lh, center_crop_size[1] - lw
h_start, h_end = centerh - lh, centerh + rh
w_start, w_end = centerw - lw, centerw + rw
if data_format == 'channels_first':
return x[:, h_start:h_end, w_start:w_end]
elif data_format == 'channels_last':
return x[h_start:h_end, w_start:w_end, :]
def pair_center_crop(x, y, center_crop_size, data_format, **kwargs):
if data_format == 'channels_first':
centerh, centerw = x.shape[1] // 2, x.shape[2] // 2
elif data_format == 'channels_last':
centerh, centerw = x.shape[0] // 2, x.shape[1] // 2
lh, lw = center_crop_size[0] // 2, center_crop_size[1] // 2
rh, rw = center_crop_size[0] - lh, center_crop_size[1] - lw
h_start, h_end = centerh - lh, centerh + rh
w_start, w_end = centerw - lw, centerw + rw
if data_format == 'channels_first':
return x[:, h_start:h_end, w_start:w_end], \
y[:, h_start:h_end, w_start:w_end]
elif data_format == 'channels_last':
return x[h_start:h_end, w_start:w_end, :], \
y[h_start:h_end, w_start:w_end, :]
def random_crop(x, random_crop_size, data_format, sync_seed=None, **kwargs):
np.random.seed(sync_seed)
if data_format == 'channels_first':
h, w = x.shape[1], x.shape[2]
elif data_format == 'channels_last':
h, w = x.shape[0], x.shape[1]
rangeh = (h - random_crop_size[0]) // 2
rangew = (w - random_crop_size[1]) // 2
offseth = 0 if rangeh == 0 else np.random.randint(rangeh)
offsetw = 0 if rangew == 0 else np.random.randint(rangew)
h_start, h_end = offseth, offseth + random_crop_size[0]
w_start, w_end = offsetw, offsetw + random_crop_size[1]
if data_format == 'channels_first':
return x[:, h_start:h_end, w_start:w_end]
elif data_format == 'channels_last':
return x[h_start:h_end, w_start:w_end, :]
def pair_random_crop(x, y, random_crop_size, data_format, sync_seed=None, **kwargs):
np.random.seed(sync_seed)
if data_format == 'channels_first':
h, w = x.shape[1], x.shape[2]
elif data_format == 'channels_last':
h, w = x.shape[0], x.shape[1]
rangeh = (h - random_crop_size[0]) // 2
rangew = (w - random_crop_size[1]) // 2
offseth = 0 if rangeh == 0 else np.random.randint(rangeh)
offsetw = 0 if rangew == 0 else np.random.randint(rangew)
h_start, h_end = offseth, offseth + random_crop_size[0]
w_start, w_end = offsetw, offsetw + random_crop_size[1]
if data_format == 'channels_first':
return x[:, h_start:h_end, w_start:w_end], y[:, h_start:h_end, h_start:h_end]
elif data_format == 'channels_last':
return x[h_start:h_end, w_start:w_end, :], y[h_start:h_end, w_start:w_end, :]
class SegDirectoryIterator(Iterator):
'''
Users need to ensure that all files exist.
Label images should be png images where pixel values represents class number.
find images -name *.jpg > images.txt
find labels -name *.png > labels.txt
for a file name 2011_002920.jpg, each row should contain 2011_002920
file_path: location of train.txt, or val.txt in PASCAL VOC2012 format,
listing image file path components without extension
data_dir: location of image files referred to by file in file_path
label_dir: location of label files
data_suffix: image file extension, such as `.jpg` or `.png`
label_suffix: label file suffix, such as `.png`, or `.npy`
loss_shape: shape to use when applying loss function to the label data
'''
def __init__(self, file_path, seg_data_generator,
data_dir, data_suffix,
label_dir, label_suffix, classes, ignore_label=255,
crop_mode='none', label_cval=255, pad_size=None,
target_size=None, color_mode='rgb',
data_format='default', class_mode='sparse',
batch_size=1, shuffle=True, seed=None,
save_to_dir=None, save_prefix='', save_format='jpeg',
loss_shape=None):
if data_format == 'default':
data_format = K.image_data_format()
self.file_path = file_path
self.data_dir = data_dir
self.data_suffix = data_suffix
self.label_suffix = label_suffix
self.label_dir = label_dir
self.classes = classes
self.seg_data_generator = seg_data_generator
self.target_size = tuple(target_size)
self.ignore_label = ignore_label
self.crop_mode = crop_mode
self.label_cval = label_cval
self.pad_size = pad_size
if color_mode not in {'rgb', 'grayscale'}:
raise ValueError('Invalid color mode:', color_mode,
'; expected "rgb" or "grayscale".')
self.color_mode = color_mode
self.data_format = data_format
self.nb_label_ch = 1
self.loss_shape = loss_shape
if (self.label_suffix == '.npy') or (self.label_suffix == 'npy'):
self.label_file_format = 'npy'
else:
self.label_file_format = 'img'
if target_size:
if self.color_mode == 'rgb':
if self.data_format == 'channels_last':
self.image_shape = self.target_size + (3,)
else:
self.image_shape = (3,) + self.target_size
else:
if self.data_format == 'channels_last':
self.image_shape = self.target_size + (1,)
else:
self.image_shape = (1,) + self.target_size
if self.data_format == 'channels_last':
self.label_shape = self.target_size + (self.nb_label_ch,)
else:
self.label_shape = (self.nb_label_ch,) + self.target_size
elif batch_size != 1:
raise ValueError(
'Batch size must be 1 when target image size is undetermined')
else:
self.image_shape = None
self.label_shape = None
if class_mode not in {'sparse', None}:
raise ValueError('Invalid class_mode:', class_mode,
'; expected one of '
'"sparse", or None.')
self.class_mode = class_mode
if save_to_dir:
self.palette = None
self.save_to_dir = save_to_dir
self.save_prefix = save_prefix
self.save_format = save_format
white_list_formats = {'png', 'jpg', 'jpeg', 'bmp', 'npy'}
# build lists for data files and label files
self.data_files = []
self.label_files = []
fp = open(file_path)
lines = fp.readlines()
fp.close()
self.nb_sample = len(lines)
for line in lines:
line = line.strip('\n')
self.data_files.append(line + data_suffix)
self.label_files.append(line + label_suffix)
super(SegDirectoryIterator, self).__init__(
self.nb_sample, batch_size, shuffle, seed)
def next(self):
with self.lock:
index_array, current_index, current_batch_size = next(
self.index_generator)
# The transformation of images is not under thread lock so it can be
# done in parallel
if self.target_size:
# TODO(ahundt) make dtype properly configurable
batch_x = np.zeros((current_batch_size,) + self.image_shape)
if self.loss_shape is None and self.label_file_format is 'img':
batch_y = np.zeros((current_batch_size,) + self.label_shape,
dtype=int)
elif self.loss_shape is None:
batch_y = np.zeros((current_batch_size,) + self.label_shape)
else:
batch_y = np.zeros((current_batch_size,) + self.loss_shape,
dtype=np.uint8)
grayscale = self.color_mode == 'grayscale'
# build batch of image data and labels
for i, j in enumerate(index_array):
data_file = self.data_files[j]
label_file = self.label_files[j]
img_file_format = 'img'
img = load_img(os.path.join(self.data_dir, data_file),
grayscale=grayscale, target_size=None)
label_filepath = os.path.join(self.label_dir, label_file)
if self.label_file_format == 'npy':
y = np.load(label_filepath)
else:
label = Image.open(label_filepath)
if self.save_to_dir and self.palette is None:
self.palette = label.palette
# do padding
if self.target_size:
if self.crop_mode != 'none':
x = img_to_array(img, data_format=self.data_format)
if self.label_file_format is not 'npy':
y = img_to_array(
label, data_format=self.data_format).astype(int)
img_w, img_h = img.size
if self.pad_size:
pad_w = max(self.pad_size[1] - img_w, 0)
pad_h = max(self.pad_size[0] - img_h, 0)
else:
pad_w = max(self.target_size[1] - img_w, 0)
pad_h = max(self.target_size[0] - img_h, 0)
if self.data_format == 'channels_first':
x = np.lib.pad(x, ((0, 0), (pad_h // 2, pad_h - pad_h // 2), (pad_w // 2, pad_w - pad_w // 2)), 'constant', constant_values=0.)
y = np.lib.pad(y, ((0, 0), (pad_h // 2, pad_h - pad_h // 2), (pad_w // 2, pad_w - pad_w // 2)),
'constant', constant_values=self.label_cval)
elif self.data_format == 'channels_last':
x = np.lib.pad(x, ((pad_h // 2, pad_h - pad_h // 2), (pad_w // 2, pad_w - pad_w // 2), (0, 0)), 'constant', constant_values=0.)
y = np.lib.pad(y, ((pad_h // 2, pad_h - pad_h // 2), (pad_w // 2, pad_w - pad_w // 2), (0, 0)), 'constant', constant_values=self.label_cval)
else:
x = img_to_array(img.resize((self.target_size[1], self.target_size[0]),
Image.BILINEAR),
data_format=self.data_format)
if self.label_file_format is not 'npy':
y = img_to_array(label.resize((self.target_size[1], self.target_size[
0]), Image.NEAREST), data_format=self.data_format).astype(int)
else:
print('ERROR: resize not implemented for label npy file')
if self.target_size is None:
batch_x = np.zeros((current_batch_size,) + x.shape)
if self.loss_shape is not None:
batch_y = np.zeros((current_batch_size,) + self.loss_shape)
else:
batch_y = np.zeros((current_batch_size,) + y.shape)
x, y = self.seg_data_generator.random_transform(x, y)
x = self.seg_data_generator.standardize(x)
if self.ignore_label:
y[np.where(y == self.ignore_label)] = self.classes
if self.loss_shape is not None:
y = np.reshape(y, self.loss_shape)
batch_x[i] = x
batch_y[i] = y
# optionally save augmented images to disk for debugging purposes
if self.save_to_dir:
for i in range(current_batch_size):
img = array_to_img(batch_x[i], self.data_format, scale=True)
label = batch_y[i][:, :, 0].astype('uint8')
label[np.where(label == self.classes)] = self.ignore_label
label = Image.fromarray(label, mode='P')
label.palette = self.palette
fname = '{prefix}_{index}_{hash}'.format(prefix=self.save_prefix,
index=current_index + i,
hash=np.random.randint(1e4))
img.save(os.path.join(self.save_to_dir, 'img_' +
fname + '.{format}'.format(format=self.save_format)))
label.save(os.path.join(self.save_to_dir,
'label_' + fname + '.png'))
# return
batch_x = preprocess_input(batch_x)
if self.class_mode == 'sparse':
return batch_x, batch_y
else:
return batch_x
class SegDataGenerator(object):
def __init__(self,
featurewise_center=False,
samplewise_center=False,
featurewise_std_normalization=False,
samplewise_std_normalization=False,
channelwise_center=False,
rotation_range=0.,
width_shift_range=0.,
height_shift_range=0.,
shear_range=0.,
zoom_range=0.,
zoom_maintain_shape=True,
channel_shift_range=0.,
fill_mode='constant',
cval=0.,
label_cval=255,
crop_mode='none',
crop_size=(0, 0),
pad_size=None,
horizontal_flip=False,
vertical_flip=False,
rescale=None,
data_format='default'):
if data_format == 'default':
data_format = K.image_data_format()
self.__dict__.update(locals())
self.mean = None
self.ch_mean = None
self.std = None
self.principal_components = None
self.rescale = rescale
if data_format not in {'channels_last', 'channels_first'}:
raise Exception('data_format should be channels_last (channel after row and '
'column) or channels_first (channel before row and column). '
'Received arg: ', data_format)
if crop_mode not in {'none', 'random', 'center'}:
raise Exception('crop_mode should be "none" or "random" or "center" '
'Received arg: ', crop_mode)
self.data_format = data_format
if data_format == 'channels_first':
self.channel_index = 1
self.row_index = 2
self.col_index = 3
if data_format == 'channels_last':
self.channel_index = 3
self.row_index = 1
self.col_index = 2
if np.isscalar(zoom_range):
self.zoom_range = [1 - zoom_range, 1 + zoom_range]
elif len(zoom_range) == 2:
self.zoom_range = [zoom_range[0], zoom_range[1]]
else:
raise Exception('zoom_range should be a float or '
'a tuple or list of two floats. '
'Received arg: ', zoom_range)
def flow_from_directory(self, file_path, data_dir, data_suffix,
label_dir, label_suffix, classes,
ignore_label=255,
target_size=None, color_mode='rgb',
class_mode='sparse',
batch_size=32, shuffle=True, seed=None,
save_to_dir=None, save_prefix='', save_format='jpeg',
loss_shape=None):
if self.crop_mode == 'random' or self.crop_mode == 'center':
target_size = self.crop_size
return SegDirectoryIterator(
file_path, self,
data_dir=data_dir, data_suffix=data_suffix,
label_dir=label_dir, label_suffix=label_suffix,
classes=classes, ignore_label=ignore_label,
crop_mode=self.crop_mode, label_cval=self.label_cval,
pad_size=self.pad_size,
target_size=target_size, color_mode=color_mode,
data_format=self.data_format, class_mode=class_mode,
batch_size=batch_size, shuffle=shuffle, seed=seed,
save_to_dir=save_to_dir, save_prefix=save_prefix,
save_format=save_format,
loss_shape=loss_shape)
def standardize(self, x):
if self.rescale:
x *= self.rescale
# x is a single image, so it doesn't have image number at index 0
img_channel_index = self.channel_index - 1
if self.samplewise_center:
x -= np.mean(x, axis=img_channel_index, keepdims=True)
if self.samplewise_std_normalization:
x /= (np.std(x, axis=img_channel_index, keepdims=True) + 1e-7)
if self.featurewise_center:
x -= self.mean
if self.featurewise_std_normalization:
x /= (self.std + 1e-7)
if self.channelwise_center:
x -= self.ch_mean
return x
def random_transform(self, x, y):
# x is a single image, so it doesn't have image number at index 0
img_row_index = self.row_index - 1
img_col_index = self.col_index - 1
img_channel_index = self.channel_index - 1
if self.crop_mode == 'none':
crop_size = (x.shape[img_row_index], x.shape[img_col_index])
else:
crop_size = self.crop_size
assert x.shape[img_row_index] == y.shape[img_row_index] and x.shape[img_col_index] == y.shape[
img_col_index], 'DATA ERROR: Different shape of data and label!\ndata shape: %s, label shape: %s' % (str(x.shape), str(y.shape))
# use composition of homographies to generate final transform that
# needs to be applied
if self.rotation_range:
theta = np.pi / 180 * \
np.random.uniform(-self.rotation_range, self.rotation_range)
else:
theta = 0
rotation_matrix = np.array([[np.cos(theta), -np.sin(theta), 0],
[np.sin(theta), np.cos(theta), 0],
[0, 0, 1]])
if self.height_shift_range:
# * x.shape[img_row_index]
tx = np.random.uniform(-self.height_shift_range,
self.height_shift_range) * crop_size[0]
else:
tx = 0
if self.width_shift_range:
# * x.shape[img_col_index]
ty = np.random.uniform(-self.width_shift_range,
self.width_shift_range) * crop_size[1]
else:
ty = 0
translation_matrix = np.array([[1, 0, tx],
[0, 1, ty],
[0, 0, 1]])
if self.shear_range:
shear = np.random.uniform(-self.shear_range, self.shear_range)
else:
shear = 0
shear_matrix = np.array([[1, -np.sin(shear), 0],
[0, np.cos(shear), 0],
[0, 0, 1]])
if self.zoom_range[0] == 1 and self.zoom_range[1] == 1:
zx, zy = 1, 1
else:
zx, zy = np.random.uniform(
self.zoom_range[0], self.zoom_range[1], 2)
if self.zoom_maintain_shape:
zy = zx
zoom_matrix = np.array([[zx, 0, 0],
[0, zy, 0],
[0, 0, 1]])
transform_matrix = np.dot(
np.dot(np.dot(rotation_matrix, translation_matrix), shear_matrix), zoom_matrix)
h, w = x.shape[img_row_index], x.shape[img_col_index]
transform_matrix = transform_matrix_offset_center(
transform_matrix, h, w)
x = apply_transform(x, transform_matrix, img_channel_index,
fill_mode=self.fill_mode, cval=self.cval)
y = apply_transform(y, transform_matrix, img_channel_index,
fill_mode='constant', cval=self.label_cval)
if self.channel_shift_range != 0:
x = random_channel_shift(
x, self.channel_shift_range, img_channel_index)
if self.horizontal_flip:
if np.random.random() < 0.5:
x = flip_axis(x, img_col_index)
y = flip_axis(y, img_col_index)
if self.vertical_flip:
if np.random.random() < 0.5:
x = flip_axis(x, img_row_index)
y = flip_axis(y, img_row_index)
if self.crop_mode == 'center':
x, y = pair_center_crop(x, y, self.crop_size, self.data_format)
elif self.crop_mode == 'random':
x, y = pair_random_crop(x, y, self.crop_size, self.data_format)
# TODO:
# channel-wise normalization
# barrel/fisheye
return x, y
def fit(self, X,
augment=False,
rounds=1,
seed=None):
'''Required for featurewise_center and featurewise_std_normalization
# Arguments
X: Numpy array, the data to fit on.
augment: whether to fit on randomly augmented samples
rounds: if `augment`,
how many augmentation passes to do over the data
seed: random seed.
'''
X = np.copy(X)
if augment:
aX = np.zeros(tuple([rounds * X.shape[0]] + list(X.shape)[1:]))
for r in range(rounds):
for i in range(X.shape[0]):
aX[i + r * X.shape[0]] = self.random_transform(X[i])
X = aX
if self.featurewise_center:
self.mean = np.mean(X, axis=0)
X -= self.mean
if self.featurewise_std_normalization:
self.std = np.std(X, axis=0)
X /= (self.std + 1e-7)
def set_ch_mean(self, ch_mean):
self.ch_mean = ch_mean
| 22,399 | 42.159923 | 164 | py |
Keras-FCN | Keras-FCN-master/utils/loss_function.py | from keras.objectives import *
from keras.metrics import binary_crossentropy
import keras.backend as K
import tensorflow as tf
# Softmax cross-entropy loss function for pascal voc segmentation
# and models which do not perform softmax.
# tensorlow only
def softmax_sparse_crossentropy_ignoring_last_label(y_true, y_pred):
y_pred = K.reshape(y_pred, (-1, K.int_shape(y_pred)[-1]))
log_softmax = tf.nn.log_softmax(y_pred)
y_true = K.one_hot(tf.to_int32(K.flatten(y_true)), K.int_shape(y_pred)[-1]+1)
unpacked = tf.unstack(y_true, axis=-1)
y_true = tf.stack(unpacked[:-1], axis=-1)
cross_entropy = -K.sum(y_true * log_softmax, axis=1)
cross_entropy_mean = K.mean(cross_entropy)
return cross_entropy_mean
# Softmax cross-entropy loss function for coco segmentation
# and models which expect but do not apply sigmoid on each entry
# tensorlow only
def binary_crossentropy_with_logits(ground_truth, predictions):
return K.mean(K.binary_crossentropy(ground_truth,
predictions,
from_logits=True),
axis=-1)
| 1,139 | 34.625 | 81 | py |
Keras-FCN | Keras-FCN-master/utils/metrics.py | import keras.backend as K
import tensorflow as tf
from tensorflow.contrib.metrics import streaming_mean_iou
def sparse_accuracy_ignoring_last_label(y_true, y_pred):
nb_classes = K.int_shape(y_pred)[-1]
y_pred = K.reshape(y_pred, (-1, nb_classes))
y_true = K.one_hot(tf.to_int32(K.flatten(y_true)),
nb_classes + 1)
unpacked = tf.unstack(y_true, axis=-1)
legal_labels = ~tf.cast(unpacked[-1], tf.bool)
y_true = tf.stack(unpacked[:-1], axis=-1)
return K.sum(tf.to_float(legal_labels & K.equal(K.argmax(y_true, axis=-1), K.argmax(y_pred, axis=-1)))) / K.sum(tf.to_float(legal_labels))
# This IOU implementation is wrong!!!
'''def mean_iou_ignoring_last_label(y_true, y_pred):
batch_size = K.int_shape(y_pred)[0]
y_true_list = tf.unpack(y_true, num=batch_size, axis=0)
y_pred_list = tf.unpack(y_pred, num=batch_size, axis=0)
mean_iou = 0.
for y_true, y_pred in zip(y_true_list, y_pred_list):
nb_classes = K.int_shape(y_pred)[-1]
y_pred = K.reshape(y_pred, (-1, nb_classes))
y_pred = K.argmax(y_pred, axis=-1)
y_pred = K.one_hot(y_pred, nb_classes)
y_true = K.one_hot(tf.to_int32(K.flatten(y_true)), nb_classes + 1)
unpacked = tf.unpack(y_true, axis=-1)
legal_labels = tf.expand_dims(tf.to_float(
~tf.cast(unpacked[-1], tf.bool)), -1)
y_true = tf.pack(unpacked[:-1], axis=-1)
y_true = K.argmax(y_true, axis=-1)
y_true = K.one_hot(y_true, nb_classes)
y_pred = tf.cast(y_pred, tf.bool)
y_true = tf.cast(y_true, tf.bool)
intersection = tf.to_float(y_pred & y_true) * legal_labels
union = tf.to_float(y_pred | y_true) * legal_labels
intersection = K.sum(intersection, axis=0)
union = K.sum(union, axis=0)
total_union = K.sum(tf.to_float(tf.cast(union, tf.bool)))
iou = K.sum(intersection / (union + K.epsilon())) / total_union
mean_iou = mean_iou + iou
mean_iou = mean_iou / batch_size
return mean_iou'''
| 2,044 | 41.604167 | 142 | py |
Keras-FCN | Keras-FCN-master/utils/get_weights_path.py | from keras.utils.data_utils import get_file
def get_weights_path_vgg16():
TF_WEIGHTS_PATH = 'https://github.com/fchollet/deep-learning-models/releases/download/v0.1/vgg16_weights_tf_dim_ordering_tf_kernels.h5'
weights_path = get_file('vgg16_weights_tf_dim_ordering_tf_kernels.h5',TF_WEIGHTS_PATH,cache_subdir='models')
return weights_path
def get_weights_path_resnet():
TF_WEIGHTS_PATH = 'https://github.com/fchollet/deep-learning-models/releases/download/v0.2/resnet50_weights_tf_dim_ordering_tf_kernels.h5'
weights_path = get_file('resnet50_weights_tf_dim_ordering_tf_kernels.h5',TF_WEIGHTS_PATH,cache_subdir='models')
return weights_path
if __name__ == '__main__':
print( get_weights_path_vgg16())
print( get_weights_path_resnet())
| 771 | 44.411765 | 142 | py |
Keras-FCN | Keras-FCN-master/utils/__init__.py | 0 | 0 | 0 | py |
|
irm-empirical-study | irm-empirical-study-master/colored_mnist/main.py | # Copyright (c) Facebook, Inc. and its affiliates and Kakao Brain.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import argparse
from typing import Union, List, Tuple
import numpy as np
import torch
from torchvision import datasets
from torch import nn, optim, autograd
class MLP(nn.Module):
def __init__(self, hidden_dim, n_classes, grayscale_model=False):
super(MLP, self).__init__()
self.hidden_dim = hidden_dim
self.n_classes = n_classes
self.grayscale_model = grayscale_model
if self.grayscale_model:
lin1 = nn.Linear(14 * 14, self.hidden_dim)
else:
lin1 = nn.Linear(self.n_classes * 14 * 14, self.hidden_dim)
lin2 = nn.Linear(self.hidden_dim, self.hidden_dim)
lin3 = nn.Linear(self.hidden_dim, self.n_classes)
for lin in [lin1, lin2, lin3]:
nn.init.xavier_uniform_(lin.weight)
nn.init.zeros_(lin.bias)
self._main = nn.Sequential(lin1, nn.ReLU(True), lin2, nn.ReLU(True), lin3)
def forward(self, input):
if self.grayscale_model:
out = input.view(input.shape[0], self.n_classes, 14 * 14).sum(dim=1)
else:
out = input.view(input.shape[0], self.n_classes * 14 * 14)
out = self._main(out)
return out
def make_environment(images, labels, color_prob, label_prob, n_classes=2):
"""Build an environment where the label is spuriously correlated with
a specific "color" channel w.p. `color_prob`.
The label is also corrupted w.p. `label_prob`, such that
"color" is more correlated to the true label during training.
`n_classes` determines how many label classes are used.
- one color channel per class is created.
- label corruption shifts label "to the right":
0 to 1, 1 to 2, ..., and 9 to 0.
"""
def torch_bernoulli(p, size):
return (torch.rand(size) < p).float()
def collapse_labels(labels, n_classes):
"""Collapse 10 classes into n_classes classes."""
assert n_classes in [2, 3, 5, 10]
bin_width = 10 // n_classes
return (labels / bin_width).clamp(max=n_classes - 1)
def corrupt(labels, n_classes, prob):
"""Corrupt a fraction of labels by shifting it +1 (mod n_classes),
according to bernoulli(prob).
Generalizes torch_xor's role of label flipping for the binary case.
"""
is_corrupt = torch_bernoulli(prob, len(labels)).bool()
return torch.where(is_corrupt, (labels + 1) % n_classes, labels)
# 2x subsample for computational convenience
images = images.reshape((-1, 28, 28))[:, ::2, ::2]
# Assign a label based on the digit
labels = collapse_labels(labels, n_classes).float()
# *Corrupt* label with probability 0.25 (default)
labels = corrupt(labels, n_classes, label_prob)
# Assign a color based on the label; flip the color with probability e
colors = corrupt(labels, n_classes, color_prob)
# Apply the color to the image by only giving image in the assigned color channel
n, h, w = images.size()
colored_images = torch.zeros((n, n_classes, h, w)).to(images)
colored_images[torch.tensor(range(n)), colors.long(), :, :] = images
return {
'images': (colored_images.float() / 255.).cuda(),
'labels': labels.long().cuda(),
}
def mean_nll(logits, y):
return nn.functional.cross_entropy(logits, y)
def mean_accuracy(logits, y):
preds = torch.argmax(logits, dim=1).float()
return (preds == y).float().mean()
def penalty(logits, y):
scale = torch.ones((1, logits.size(-1))).cuda().requires_grad_()
loss = mean_nll(logits * scale, y)
grad = autograd.grad(loss, [scale], create_graph=True)[0]
return torch.sum(grad ** 2)
def pretty_print(*values):
col_width = 13
def format_val(v):
if not isinstance(v, str):
v = np.array2string(v, precision=5, floatmode='fixed')
return v.ljust(col_width)
str_values = [format_val(v) for v in values]
print(" ".join(str_values))
def run_colored_mnist(
hidden_dim: int = 256,
l2_regularizer_weight: float = 0.001,
lr: float = 0.001,
n_restarts: int = 10,
penalty_anneal_iters: int = 100,
penalty_weight: float = 10000.0,
steps: int = 501,
grayscale_model: bool = False,
train_probs: Union[Tuple[float], List[float]] = (0.2, 0.1),
train_label_probs: Union[Tuple[float], List[float]] = (0.25, ),
test_prob: float = 0.9,
n_envs: int = 2,
n_classes: int = 2,
verbose: bool = False
):
"""Run ColoredMNIST experiment and return train/test accuracies.
Some key parameters:
train_probs: tuple of environment probabilities (p_e)
test_prob: test environment probability
train_label_probs: tuple of label corruption (flipping) rates per env
n_envs: number of training environments
n_classes: number of output classes
"""
def format_probs(probs):
if len(probs) == 1:
return tuple(probs[0] for _ in range(n_envs))
elif len(probs) == 2:
lo, hi = sorted(probs)
return tuple(np.linspace(hi, lo, n_envs))
else:
assert len(probs) == n_envs
return tuple(float(p) for p in probs)
train_probs = format_probs(train_probs)
train_label_probs = format_probs(train_label_probs)
if verbose:
print('Flags:')
for k, v in sorted(locals().items()):
if not callable(v):
print("\t{}: {}".format(k, v))
final_train_accs = []
final_test_accs = []
for restart in range(n_restarts):
# Load MNIST, make train/val splits, and shuffle train set examples
mnist = datasets.MNIST('~/datasets/mnist', train=True, download=True)
mnist_train = (mnist.data[:50000], mnist.targets[:50000])
mnist_val = (mnist.data[50000:], mnist.targets[50000:])
rng_state = np.random.get_state()
np.random.shuffle(mnist_train[0].numpy())
np.random.set_state(rng_state)
np.random.shuffle(mnist_train[1].numpy())
# Build environments (last one is always test)
envs = [
make_environment(mnist_train[0][i::n_envs],
mnist_train[1][i::n_envs],
train_probs[i],
train_label_probs[i],
n_classes)
for i in range(n_envs)
]
test_label_prob = np.mean(train_label_probs).item()
envs.append(
make_environment(mnist_val[0],
mnist_val[1],
test_prob,
test_label_prob,
n_classes)
)
# Define and instantiate the model
mlp = MLP(hidden_dim, n_classes, grayscale_model).cuda()
if verbose and restart == 0:
print(mlp)
print("# trainable parameters:", sum(p.numel() for p in mlp.parameters() if p.requires_grad))
# Train loop
optimizer = optim.Adam(mlp.parameters(), lr=lr)
if verbose:
pretty_print('step', 'train nll', 'train acc', 'train penalty', 'test acc')
for step in range(steps):
for env in envs:
logits = mlp(env['images']) # multi-class logit
env['nll'] = mean_nll(logits, env['labels'])
env['acc'] = mean_accuracy(logits, env['labels'])
env['penalty'] = penalty(logits, env['labels'])
train_nll = torch.stack([envs[i]['nll'] for i in range(n_envs)]).mean()
train_acc = torch.stack([envs[i]['acc'] for i in range(n_envs)]).mean()
train_penalty = torch.stack([envs[i]['penalty'] for i in range(n_envs)]).mean()
weight_norm = torch.tensor(0.).cuda()
for w in mlp.parameters():
weight_norm += w.norm().pow(2)
loss = train_nll.clone()
loss += l2_regularizer_weight * weight_norm
annealed_penalty_weight = (penalty_weight
if step >= penalty_anneal_iters else 1.0)
loss += annealed_penalty_weight * train_penalty
if annealed_penalty_weight > 1.0:
# Rescale the entire loss to keep gradients in a reasonable range
loss /= annealed_penalty_weight
optimizer.zero_grad()
loss.backward()
optimizer.step()
test_acc = envs[n_envs]['acc']
if verbose and step % 100 == 0:
pretty_print(
np.int32(step),
train_nll.detach().cpu().numpy(),
train_acc.detach().cpu().numpy(),
train_penalty.detach().cpu().numpy(),
test_acc.detach().cpu().numpy()
)
train_acc = train_acc.item()
test_acc = test_acc.item()
final_train_accs.append(train_acc)
final_test_accs.append(test_acc)
if verbose:
print(f'Restart {restart}: train {train_acc:.5f}, test {test_acc:.5f}')
print('Final train accuracy (mean/std across restarts):')
print(np.mean(final_train_accs), np.std(final_train_accs))
print('Final test accuracy (mean/std across restarts):')
print(np.mean(final_test_accs), np.std(final_test_accs))
return final_train_accs, final_test_accs
def main():
parser = argparse.ArgumentParser(description='Extended ColoredMNIST')
parser.add_argument('--hidden_dim', type=int, default=256)
parser.add_argument('--l2_regularizer_weight', type=float, default=0.001)
parser.add_argument('--lr', type=float, default=0.001)
parser.add_argument('--n_restarts', type=int, default=10)
parser.add_argument('--penalty_anneal_iters', type=int, default=100)
parser.add_argument('--penalty_weight', type=float, default=10000.0)
parser.add_argument('--steps', type=int, default=501)
parser.add_argument('--grayscale_model', action='store_true')
parser.add_argument('--train_probs', type=float, nargs='+', default=(0.2, 0.1))
parser.add_argument('--train_label_probs', type=float, nargs='+', default=(0.25, 0.25))
parser.add_argument('--test_prob', type=float, default=0.9)
parser.add_argument('--n_envs', type=int, default=2)
parser.add_argument('--n_classes', type=int, default=2)
parser.add_argument('-v', '--verbose', action='store_true')
args = parser.parse_args()
run_colored_mnist(**vars(args))
if __name__ == '__main__':
main()
| 10,756 | 37.14539 | 105 | py |
irm-empirical-study | irm-empirical-study-master/colored_mnist/make_plots.py | """
Make plots for Figures 1 and 3.
"""
import argparse
import copy
import matplotlib.pyplot as plt
import numpy as np
import pickle
import os.path
from main import run_colored_mnist
DEFAULT_ARGS = dict(
hidden_dim=256,
l2_regularizer_weight=0.001,
lr=0.001,
n_restarts=10,
penalty_anneal_iters=100,
penalty_weight=10000.0,
steps=501,
grayscale_model=False,
train_probs=(0.2, 0.1),
train_label_probs=(0.25, ),
test_prob=0.9,
n_envs=2,
n_classes=2,
verbose=False,
)
def run_color_gap_experiment(color_gap=0.1, erm=False):
"""Q1: Run the default IRM/ERM with a gap parameter."""
args = copy.deepcopy(DEFAULT_ARGS)
train1_prob = args['train_probs'][1]
args['train_probs'] = (
train1_prob + color_gap / 2,
train1_prob - color_gap / 2,
)
if erm:
args['penalty_anneal_iters'] = 0
args['penalty_weight'] = 0.0
train_accs, test_accs = run_colored_mnist(**args)
return train_accs, test_accs
def run_label_gap_experiment(label_gap=0.1, erm=False):
"""Q3: Run the default IRM/ERM with a gap parameter for label corruption."""
args = copy.deepcopy(DEFAULT_ARGS)
mean_label_prob = args['train_label_probs'][0]
# test_label_prob is mean of train_label_probs
args['train_label_probs'] = (
mean_label_prob + label_gap / 2,
mean_label_prob - label_gap / 2
)
if erm:
args['penalty_anneal_iters'] = 0
args['penalty_weight'] = 0.0
train_accs, test_accs = run_colored_mnist(**args)
return train_accs, test_accs
def compute_stats(accs, split='train'):
"""Compute mean/std from each trial per gap."""
index = 0 if split == 'train' else 1
return zip(*[(np.mean(_accs[index]), np.std(_accs[index]))
for _accs in accs])
def run_q1(results_file="q1_results.pkl"):
"""Run Q1 experiment (skip if results_file exists) and make plots."""
# Run/load results
gaps = np.linspace(0.0, 0.4, 41)
print("color gaps tested:", gaps)
if os.path.exists(results_file):
with open(results_file, "rb") as f:
erm_accs, irm_accs = pickle.load(f)
else:
# Main experiment: takes a while
erm_accs = [run_color_gap_experiment(gap, erm=True) for gap in gaps]
irm_accs = [run_color_gap_experiment(gap, erm=False) for gap in gaps]
with open(results_file, "wb") as f:
pickle.dump((erm_accs, irm_accs), f)
# Compute mean/std
erm_train, erm_train_se = compute_stats(erm_accs, 'train')
erm_test, erm_test_se = compute_stats(erm_accs, 'test')
irm_train, irm_train_se = compute_stats(irm_accs, 'train')
irm_test, irm_test_se = compute_stats(irm_accs, 'test')
# Plot (two-column)
def _plot(style='seaborn'):
plt.style.use(style)
fig, ax = plt.subplots(nrows=1, ncols=2, figsize=(12, 5))
# ax[0]: train
ax[0].errorbar(gaps, erm_train, yerr=erm_train_se, label='ERM')
ax[0].errorbar(gaps, irm_train, yerr=irm_train_se, label='IRMv1')
ax[0].set_title("Train ($p_1 = 0.2 + gap/2$, $p_2 = 0.2 - gap/2$)")
ax[0].set_xlabel("Probability gap between training environments ($|p_1 - p_2|$)")
ax[0].set_ylabel("Accuracy")
ax[0].set_ylim((0.0, 1.0))
ax[0].legend(loc=4)
# ax[1]: test
ax[1].errorbar(gaps, erm_test, yerr=erm_test_se, label='ERM')
ax[1].errorbar(gaps, irm_test, yerr=irm_test_se, label='IRMv1')
ax[1].set_title("Test ($p_\mathrm{test} = 0.9$)")
ax[1].set_xlabel("Probability gap between training environments ($|p_1 - p_2|$)")
ax[1].set_ylabel("Accuracy")
ax[1].set_ylim((0.0, 1.0))
ax[1].legend(loc=4)
plt.savefig(f"q1_plot_{style}.png", bbox_inches='tight')
_plot('seaborn')
_plot('seaborn-colorblind')
def run_q3(results_file="q3_results.pkl"):
"""Run Q3 experiment (skip if results_file exists) and make plots."""
# Run/load results
gaps = np.linspace(0.0, 0.5, 51)
print("label gaps tested:", gaps)
if os.path.exists(results_file):
with open(results_file, "rb") as f:
erm_accs, irm_accs = pickle.load(f)
else:
# Main experiment: takes a while
erm_accs = [run_label_gap_experiment(gap, erm=True) for gap in gaps]
irm_accs = [run_label_gap_experiment(gap, erm=False) for gap in gaps]
with open(results_file, "wb") as f:
pickle.dump((erm_accs, irm_accs), f)
# Compute mean/std
erm_train, erm_train_se = compute_stats(erm_accs, 'train')
erm_test, erm_test_se = compute_stats(erm_accs, 'test')
irm_train, irm_train_se = compute_stats(irm_accs, 'train')
irm_test, irm_test_se = compute_stats(irm_accs, 'test')
# Plot (two-column)
def _plot(style='seaborn'):
plt.style.use(style)
fig, ax = plt.subplots(nrows=1, ncols=2, figsize=(12, 5))
# ax[0]: train
ax[0].errorbar(gaps, erm_train, yerr=erm_train_se, label='ERM')
ax[0].errorbar(gaps, irm_train, yerr=irm_train_se, label='IRMv1')
ax[0].plot(gaps, [0.5 for _ in gaps], color='orange', linestyle='-', label='Random')
ax[0].axvline(0.1, color='gray', linestyle='--', label='$|p_1 - p_2|$')
ax[0].set_title("Train ($\eta_1 = 0.25 + gap/2$, $\eta_2 = 0.25 - gap/2$)")
ax[0].set_xlabel("Label corruption gap between training environments ($|\eta_1 - \eta_2|$)")
ax[0].set_ylabel("Accuracy")
ax[0].set_ylim((0.0, 1.0))
ax[0].legend(loc=4)
# ax[1]: test
ax[1].errorbar(gaps, erm_test, yerr=erm_test_se, label='ERM')
ax[1].errorbar(gaps, irm_test, yerr=irm_test_se, label='IRMv1')
ax[1].plot(gaps, [0.5 for _ in gaps], color='orange', linestyle='-', label='Random')
ax[1].axvline(0.1, color='gray', linestyle='--', label='$|p_1 - p_2|$')
ax[1].set_title("Test ($\eta_\mathrm{test} = 0.25$)")
ax[1].set_xlabel("Label corruption gap between training environments ($|\eta_1 - \eta_2|$)")
ax[1].set_ylabel("Accuracy")
ax[1].set_ylim((0.0, 1.0))
ax[1].legend(loc=1)
plt.savefig(f"q3_plot_{style}.png", bbox_inches='tight')
_plot('seaborn')
_plot('seaborn-colorblind')
def main():
parser = argparse.ArgumentParser(description='Make plots for Q1 and Q3')
parser.add_argument('--experiment', type=str, choices={'q1', 'q3'})
args = parser.parse_args()
if args.experiment == 'q1':
run_q1()
else:
run_q3()
if __name__ == '__main__':
main()
| 6,581 | 34.578378 | 100 | py |
irm-empirical-study | irm-empirical-study-master/punctuated_sst2/main.py | """
Minimal IRM for ColoredSST-2 with a bag-of-words model
"""
import argparse
import itertools as it
from typing import List
import numpy as np
import torch
from torch import nn, optim, autograd
import torch.nn.functional as F
import torchtext
from data_processors import get_train_examples, get_test_examples
class BOWClassifier(nn.Module):
"""Simple bag-of-words embeddings + MLP."""
def __init__(
self,
embeddings: torch.FloatTensor,
n_layers: int,
n_classes: int,
):
super().__init__()
self.embedding = nn.EmbeddingBag.from_pretrained(embeddings,
freeze=True,
mode='mean')
self.hidden_dim = self.embedding.embedding_dim
self.n_layers = n_layers
self.hidden_layers = nn.ModuleList([
nn.Linear(self.hidden_dim, self.hidden_dim)
for _ in range(n_layers - 1)
])
self.n_classes = n_classes
self.output_layer = nn.Linear(self.hidden_dim, self.n_classes)
def forward(
self,
text: torch.LongTensor,
offsets: torch.LongTensor,
):
hidden = self.embedding(text, offsets)
for hidden_layer in self.hidden_layers:
hidden = F.relu(hidden_layer(hidden))
return self.output_layer(hidden)
def mean_nll(logits, y):
return nn.functional.cross_entropy(logits, y)
def mean_accuracy(logits, y):
preds = torch.argmax(logits, dim=1).float()
return (preds == y).float().mean()
def penalty(logits, y):
scale = torch.ones((1, logits.size(-1))).cuda().requires_grad_()
loss = mean_nll(logits * scale, y)
grad = autograd.grad(loss, [scale], create_graph=True)[0]
return torch.sum(grad ** 2)
def pretty_print(*values):
col_width = 13
def format_val(v):
if not isinstance(v, str):
v = np.array2string(v, precision=5, floatmode='fixed')
return v.ljust(col_width)
str_values = [format_val(v) for v in values]
print(" ".join(str_values))
def convert_examples_to_features(
examples: List[dict],
vocab: torchtext.vocab.Vectors,
device: torch.device
):
"""Convert examples to torch.Tensors of (text, offsets, labels)."""
text, offsets, labels = [], [], []
current_offset = 0
for example in examples:
# input
words = example['text'].split()
word_ids = [vocab.stoi[word] for word in words if word in vocab.stoi]
if len(word_ids) < 1:
continue
text.extend(word_ids)
offsets.append(current_offset)
current_offset += len(word_ids)
# label
labels.append(int(example['label']))
return {
'text': torch.tensor(text).to(device),
'offsets': torch.tensor(offsets).to(device),
'labels': torch.tensor(labels).to(device),
}
def run_punctuated_sst2(
datadir: str,
glove_name: str = "6B", # 6B, 42B, 840B, twitter.27B
n_layers: int = 3,
l2_regularizer_weight: float = 0.001,
lr: float = 0.001,
n_restarts: int = 5,
penalty_anneal_iters: int = 100,
penalty_weight: float = 10000.0,
steps: int = 501,
track_best: bool = False,
verbose: bool = False
):
"""Run PunctuatedSST-2 experiment and return train/test accuracies."""
# Device
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
# Load vocab (a torchtext.vocab.Vectors object)
vocab = torchtext.vocab.GloVe(name=glove_name, dim=300)
embeddings = vocab.vectors
# Prepare environments
train_examples = get_train_examples(datadir)
test_examples = get_test_examples(datadir)
n_classes = 2
train_envs = {env_name: convert_examples_to_features(examples, vocab, device)
for env_name, examples in train_examples.items()}
test_envs = {env_name: convert_examples_to_features(examples, vocab, device)
for env_name, examples in test_examples.items()}
all_envs = [env_name for env_name in it.chain(train_envs, test_envs)]
final_accs = {env_name: [] for env_name in all_envs}
best = [{'step': 0, 'min_acc': 0.0, 'loss': 0.0}
for _ in range(n_restarts)]
for restart in range(n_restarts):
# Initialize model
model = BOWClassifier(embeddings, n_layers, n_classes).to(device)
if verbose and restart == 0:
print(model)
print("# trainable parameters:", sum(p.numel() for p in model.parameters() if p.requires_grad))
# Train loop
optimizer = optim.Adam(model.parameters(), lr=lr)
if verbose:
pretty_print('step', 'train nll', 'train acc', 'train penalty', 'test0 acc', 'test1 acc', 'test_ood acc')
best_min_acc, best_loss, best_step = 0.0, 0.0, 0
for step in range(steps):
for _, env in it.chain(train_envs.items(), test_envs.items()):
logits = model(env['text'], env['offsets']) # multi-class logit
env['nll'] = mean_nll(logits, env['labels'])
env['acc'] = mean_accuracy(logits, env['labels'])
env['penalty'] = penalty(logits, env['labels'])
train_nll = torch.stack([env['nll'] for _, env in train_envs.items()]).mean()
train_acc = torch.stack([env['acc'] for _, env in train_envs.items()]).mean()
train_penalty = torch.stack([env['penalty'] for _, env in train_envs.items()]).mean()
weight_norm = torch.tensor(0.).cuda()
for w in model.parameters():
if w.requires_grad:
weight_norm += w.norm().pow(2)
loss = train_nll.clone()
loss += l2_regularizer_weight * weight_norm
annealed_penalty_weight = (penalty_weight
if step >= penalty_anneal_iters else 1.0)
loss += annealed_penalty_weight * train_penalty
if annealed_penalty_weight > 1.0:
# Rescale the entire loss to keep gradients in a reasonable range
loss /= annealed_penalty_weight
optimizer.zero_grad()
loss.backward()
optimizer.step()
# monitor stats at min_acc for hyperopt (best or last)
min_acc = min(env['acc'].item() for _, env in test_envs.items())
if not track_best or min_acc > best_min_acc:
best_min_acc = min_acc
best_loss = loss.item()
best_step = step
best[restart].update({
'step': step,
'min_acc': best_min_acc, # minimum of test accuracies
'loss': best_loss, # training loss
'train0_acc': train_envs['train0']['acc'].item(),
'train1_acc': train_envs['train1']['acc'].item(),
'test0_acc': test_envs['test0']['acc'].item(),
'test1_acc': test_envs['test1']['acc'].item(),
'test_ood_acc': test_envs['test_ood']['acc'].item(),
})
if verbose and step % 100 == 0:
pretty_print(
np.int32(step),
train_nll.detach().cpu().numpy(),
train_acc.detach().cpu().numpy(),
train_penalty.detach().cpu().numpy(),
test_envs['test0']['acc'].detach().cpu().numpy(),
test_envs['test1']['acc'].detach().cpu().numpy(),
test_envs['test_ood']['acc'].detach().cpu().numpy(),
)
for env_name in train_envs:
final_accs[env_name].append(train_envs[env_name]['acc'].item())
for env_name in test_envs:
final_accs[env_name].append(test_envs[env_name]['acc'].item())
if verbose:
accs = ", ".join(f"{env_name} {best[restart][f'{env_name}_acc']:.5f}"
for env_name in all_envs)
print(f'Restart {restart}: {accs}, '
f"min test acc {best[restart]['min_acc']:.5f} (step {best_step})")
print(f'[Accuracies at best minimum test set accuracy over {n_restarts} restarts]')
pretty_print("env_name", "mean", "std")
for env_name in all_envs:
best_accs = [best[restart][f'{env_name}_acc'] for restart in range(n_restarts)]
mean, std = np.mean(best_accs), np.std(best_accs)
pretty_print(env_name, mean, std)
best_or_last = "Best" if track_best else "Final"
print(f'[{best_or_last} minimum test set accuracy over {n_restarts} restarts]')
best_min_accs = [best[restart]['min_acc'] for restart in range(n_restarts)]
mean, std = np.mean(best_min_accs), np.std(best_min_accs)
pretty_print("mean", "std")
pretty_print(mean, std)
return best
def main():
parser = argparse.ArgumentParser(description='Minimal PunctuatedSST-2')
parser.add_argument('--datadir', type=str, default='data/PunctuatedSST-2',
help='directory containing PunctuatedSST-2 datasets')
parser.add_argument('--glove_name', type=str, default='6B',
help='name specifying GloVe vectors (default: 6B)')
parser.add_argument('--n_layers', type=int, default=3)
parser.add_argument('--l2_regularizer_weight', type=float, default=0.001)
parser.add_argument('--lr', type=float, default=0.001)
parser.add_argument('--n_restarts', type=int, default=50)
parser.add_argument('--penalty_anneal_iters', type=int, default=100)
parser.add_argument('--penalty_weight', type=float, default=10000.0)
parser.add_argument('--steps', type=int, default=501)
parser.add_argument('--track_best', action='store_true')
parser.add_argument('-v', '--verbose', action='store_true')
args = parser.parse_args()
run_punctuated_sst2(**vars(args))
if __name__ == '__main__':
main()
| 10,008 | 37.794574 | 117 | py |
irm-empirical-study | irm-empirical-study-master/punctuated_sst2/utils.py | """
Utility functions for SST-2
"""
import csv
import os.path
def read_raw_data(path, input_cols, label_col, label_map=None):
"""Read columns from a raw tsv file."""
with open(path) as f:
headers = next(f).strip().split("\t")
inputs, labels = [], []
for line in f:
items = line.strip().split("\t")
inp = tuple(items[headers.index(input_col)]
for input_col in input_cols)
label = items[headers.index(label_col)]
if label_map is not None:
label = label_map[label]
inputs.append(inp)
labels.append(label)
return inputs, labels
def write_processed_data(outputs, destdir):
"""Write processed data (one tsv per env)."""
os.makedirs(destdir, exist_ok=True)
for name, (inputs, labels) in outputs.items():
fname = os.path.join(destdir, f"{name}.tsv")
with open(fname, "w", encoding="utf-8-sig") as f:
writer = csv.writer(f, delimiter="\t", quotechar=None)
writer.writerow(["sentence", "label"])
for inp, label in zip(inputs, labels):
writer.writerow(list(inp) + [label])
print("| wrote {} lines to {}".format(
len(inputs), os.path.join(destdir, name))
)
def read_processed_data(fname):
"""Read processed data as a list of dictionaries.
Reads from TSV lines with the following header line:
sentence label
"""
examples = []
with open(fname, encoding="utf-8") as f:
for (i, line) in enumerate(f):
if i == 0:
continue
text, label = line.split("\t")
examples.append({'text': text, 'label': label})
return examples
| 1,756 | 30.375 | 66 | py |
irm-empirical-study | irm-empirical-study-master/punctuated_sst2/data_processors.py |
"""
Data processors for multi-environment settings.
Reference:
https://github.com/huggingface/transformers/blob/master/src/transformers/data/processors/glue.py
"""
from glob import glob
import os.path
from utils import read_processed_data
def get_env_name(fname):
basename = os.path.basename(fname)
env_name, ext = os.path.splitext(basename)
assert ext == ".tsv"
return env_name
def get_train_examples(data_dir):
"""Load training examples from multiple environments."""
train_envs = glob(os.path.join(data_dir, "train*.tsv"))
print(f"loading train data from {len(train_envs)} environments")
# return examples as dict; check that sizes match for training
train_examples = {}
prev_length = None
for train_env in train_envs:
env_name = get_env_name(train_env)
examples = read_processed_data(train_env)
if prev_length:
assert len(examples) == prev_length, \
f"data size between training environments differ"
train_examples[env_name] = examples
prev_length = len(examples)
return train_examples
def get_test_examples(data_dir):
"""Load test examples from multiple environments."""
test_envs = glob(os.path.join(data_dir, "test*.tsv"))
print(f"loading test data from {len(test_envs)} environments")
# test0: examples0, test1: examples1, test_ood: examples_ood
test_examples = {}
for test_env in test_envs:
env_name = get_env_name(test_env)
examples = read_processed_data(test_env)
test_examples[env_name] = examples
return test_examples
| 1,617 | 28.418182 | 100 | py |
irm-empirical-study | irm-empirical-study-master/punctuated_sst2/make_environments.py | #!/usr/bin/python3 -u
"""
Create environments for PunctuatedSST-2.
Download and unzip SST-2 data from:
https://gluebenchmark.com/tasks
"""
import argparse
from collections import Counter
import numpy as np
import os.path
import string
from utils import read_raw_data, write_processed_data
def color_sentence(sents, artifact_type, artifact_label):
"""Color (perturb) sentences according to spurious artifacts.
Artifact label is assumed to be binary.
[artifact types]
default: spurious punctuation (contains period or exclamation mark)
grayscale: return original sentence (no artifact)
"""
assert isinstance(sents, tuple)
assert artifact_type in {"default", "grayscale"}
assert artifact_label in {0, 1}
if artifact_type == "grayscale":
return sents
color = " ." if artifact_label == 1 else " !"
out = []
for sent in sents:
tokens = sent.split()
if tokens[-1] in string.punctuation:
tokens = tokens[:-1]
out.append(" ".join(tokens) + color)
return tuple(out)
def split_envs(inputs, labels,
name="", n_envs=1, match_lengths=False, rng=None):
"""Randomly split inputs and labels into different environments.
Optionally matches the number of samples in each environment. (for train)
"""
n = len(inputs)
if rng is None:
print("warning: RNG not provided, using unknown random seed")
rng = np.random.RandomState()
# randomly split into environments
env_indices = rng.randint(n_envs, size=n)
out = {
f"{name}{env}": {"inputs": [], "labels": []}
for env in range(n_envs)
}
for inp, label, env in zip(inputs, labels, env_indices):
env_name = f"{name}{env}"
out[env_name]["inputs"].append(inp)
out[env_name]["labels"].append(label)
# match lengths between environments
if match_lengths:
maxlen = max(len(ds["inputs"]) for env_name, ds in out.items())
for env_name, ds in out.items():
n_extra = maxlen - len(ds["inputs"])
if n_extra >= 1:
extra_indices = rng.choice(len(ds["inputs"]), size=n_extra)
ds["inputs"] += [ds["inputs"][i] for i in extra_indices]
ds["labels"] += [ds["labels"][i] for i in extra_indices]
# out: { "nameN": {"inputs": inputsN, "labels": labelsN} }
return out
def color_binary_dataset(inputs, labels, artifact_type,
flip_label=0.25, p_env=0.1, rng=None):
"""Give artifical "color" tokens to inputs that correlate with the label.
*Assumed: label is binary.*
Analogous to the colored MNIST dataset construction in the IRM paper."""
if rng is None:
print("warning: RNG not provided, using unknown random seed")
rng = np.random.RandomState()
colored_inputs, colored_labels = [], []
for input_sent, label in zip(inputs, labels):
# randomly flip labels
if flip_label > 0.0 and rng.random(1).item() < flip_label:
label = 1 - label
# assign artifact by environment probability
artifact_label = label if rng.random(1).item() >= p_env else 1 - label
colored_inputs.append(
color_sentence(input_sent, artifact_type, artifact_label)
)
colored_labels.append(label)
return colored_inputs, colored_labels
def color_sst2(datadir, destdir, version):
"""Generate a PunctuatedSST-2 dataset.
datadir is the location that contains the original SST-2 data."""
# default setup
n_envs = 2
p_env_test = 0.9
label_map = {"0": 0, "1": 1}
rng = np.random.RandomState(1)
if version == "grayscale":
artifact_type, flip_label, p_envs = ("grayscale", 0.25, [0.0, 0.0])
else:
artifact_type, flip_label, p_envs = ("default", 0.25, [0.2, 0.1])
# train: train0(p=0.2), train1(p=0.1)
inputs, labels = read_raw_data(
os.path.join(datadir, "train.tsv"), ["sentence"], "label", label_map
)
train = split_envs(
inputs, labels,
name="train", n_envs=n_envs, match_lengths=True, rng=rng
)
for env in range(n_envs):
ctr = Counter(train[f"train{env}"]["labels"])
majority_ratio = ctr.most_common(1)[0][1] / sum(ctr.values())
print(f"train{env}:", ctr, ", majority:", majority_ratio)
train0, train1 = [
color_binary_dataset(train[f"train{env}"]["inputs"],
train[f"train{env}"]["labels"],
artifact_type,
flip_label=flip_label,
p_env=p_env,
rng=rng)
for env, p_env in enumerate(p_envs)
]
# test: test0(p=0.2), test1(p=0.1), test_ood(p=0.9)
# (we use the SST-2 dev set for all evaluation)
inputs, labels = read_raw_data(
os.path.join(datadir, "dev.tsv"), ["sentence"], "label", label_map
)
test = split_envs(
inputs, labels,
name="test", n_envs=n_envs + 1, match_lengths=False, rng=rng
)
for env in range(n_envs + 1):
ctr = Counter(test[f"test{env}"]["labels"])
majority_ratio = ctr.most_common(1)[0][1] / sum(ctr.values())
print(f"test{env}:" if env < n_envs else "test_ood", ctr,
", majority:", majority_ratio)
test0, test1, test_ood = [
color_binary_dataset(test[f"test{env}"]["inputs"],
test[f"test{env}"]["labels"],
artifact_type,
flip_label=flip_label,
p_env=p_env,
rng=rng)
for env, p_env in enumerate(p_envs + [p_env_test])
]
outputs = {
"train0": train0,
"train1": train1,
"test0": test0,
"test1": test1,
"test_ood": test_ood,
}
write_processed_data(outputs, destdir)
return train0, train1, test0, test1, test_ood
def main():
parser = argparse.ArgumentParser(description="make environments for PunctuatedSST-2")
parser.add_argument('--datadir', help="directory containing raw data")
parser.add_argument('--destdir', help="output directory")
parser.add_argument('--version', default="default",
help="dataset version (default or grayscale)")
args = parser.parse_args()
color_sst2(args.datadir, args.destdir, args.version)
if __name__ == "__main__":
main()
| 6,486 | 32.438144 | 89 | py |
dwave-tabu | dwave-tabu-master/setup.py | # Copyright 2021 D-Wave Systems Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from setuptools import setup, Extension
from setuptools.command.build_ext import build_ext
from Cython.Build import cythonize
import numpy
class build_ext_with_args(build_ext):
"""Add compiler-specific compile/link flags."""
extra_compile_args = {
'msvc': ['/std:c++14'],
'unix': ['-std=c++11'],
}
extra_link_args = {
'msvc': [],
'unix': ['-std=c++11'],
}
def build_extensions(self):
compiler = self.compiler.compiler_type
compile_args = self.extra_compile_args[compiler]
for ext in self.extensions:
ext.extra_compile_args = compile_args
link_args = self.extra_link_args[compiler]
for ext in self.extensions:
ext.extra_compile_args = link_args
super().build_extensions()
extensions = [Extension(
name='tabu.tabu_search',
sources=['tabu/tabu_search.pyx', 'tabu/src/utils.cpp', 'tabu/src/bqp.cpp'],
include_dirs=[numpy.get_include()]
)]
setup(
cmdclass={'build_ext': build_ext_with_args},
ext_modules=cythonize(extensions),
)
| 1,665 | 27.724138 | 79 | py |
dwave-tabu | dwave-tabu-master/tests/test_sampler.py | # Copyright 2018 D-Wave Systems Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test the TabuSampler python interface."""
import unittest
import dimod
import numpy as np
import tabu
from tabu.utils import tictoc
@dimod.testing.load_sampler_bqm_tests(tabu.TabuSampler)
class TestTabuSampler(unittest.TestCase):
def test_instantiation(self):
sampler = tabu.TabuSampler()
dimod.testing.assert_sampler_api(sampler)
def test_sample_basic(self):
sampler = tabu.TabuSampler()
bqm = dimod.BinaryQuadraticModel.from_ising({}, {'ab': -1, 'bc': +1, 'ac': +1})
resp = sampler.sample(bqm)
dimod.testing.assert_response_energies(resp, bqm)
def test_sample_num_reads(self):
bqm = dimod.BinaryQuadraticModel.from_ising({}, {'ab': -1, 'bc': +1, 'ac': +1})
resp = tabu.TabuSampler().sample(bqm, num_reads=57)
dimod.testing.assert_response_energies(resp, bqm)
self.assertEqual(sum(resp.record.num_occurrences), 57)
def test_disconnected_problem(self):
h = {}
J = {
# K_3
(0, 1): -1,
(1, 2): -1,
(0, 2): -1,
# disonnected K_3
(3, 4): -1,
(4, 5): -1,
(3, 5): -1,
}
bqm = dimod.BinaryQuadraticModel.from_ising(h, J)
resp = tabu.TabuSampler().sample(bqm)
dimod.testing.assert_response_energies(resp, bqm)
def test_empty(self):
resp = tabu.TabuSampler().sample(dimod.BinaryQuadraticModel.empty(dimod.SPIN))
dimod.testing.assert_response_energies(resp, dimod.BinaryQuadraticModel.empty(dimod.SPIN))
resp = tabu.TabuSampler().sample(dimod.BinaryQuadraticModel.empty(dimod.BINARY))
dimod.testing.assert_response_energies(resp, dimod.BinaryQuadraticModel.empty(dimod.BINARY))
resp = tabu.TabuSampler().sample_qubo({})
dimod.testing.assert_response_energies(resp, dimod.BinaryQuadraticModel.empty(dimod.BINARY))
resp = tabu.TabuSampler().sample_ising({}, {})
dimod.testing.assert_response_energies(resp, dimod.BinaryQuadraticModel.empty(dimod.SPIN))
def test_single_variable_problem(self):
bqm = dimod.BinaryQuadraticModel({'a': 1}, {}, 0.0, dimod.SPIN)
resp = tabu.TabuSampler().sample(bqm)
dimod.testing.assert_response_energies(resp, bqm)
self.assertEqual(resp.first.energy, -1)
def test_linear_problem(self):
bqm = dimod.BinaryQuadraticModel.from_ising({v: -1 for v in range(100)}, {})
resp = tabu.TabuSampler().sample(bqm)
dimod.testing.assert_response_energies(resp, bqm)
def test_initial_states_smoketest(self):
bqm = dimod.BinaryQuadraticModel.from_ising({}, {'ab': -1, 'bc': 1, 'ac': 1})
resp = tabu.TabuSampler().sample(bqm, initial_states=tabu.TabuSampler().sample(bqm))
dimod.testing.assert_response_energies(resp, bqm)
def test_initial_states(self):
bqm = dimod.BinaryQuadraticModel.from_ising({}, {'ab': -1, 'bc': 1, 'ac': 1})
init = dimod.SampleSet.from_samples({'a': 0, 'b': 0, 'c': 0}, vartype=dimod.BINARY, energy=0)
resp = tabu.TabuSampler().sample(bqm, initial_states=init)
dimod.testing.assert_response_energies(resp, bqm)
def test_initial_states_generator(self):
bqm = dimod.BinaryQuadraticModel.from_ising({}, {'ab': -1, 'bc': 1, 'ac': 1})
init = dimod.SampleSet.from_samples_bqm([{'a': 1, 'b': 1, 'c': 1},
{'a': -1, 'b': -1, 'c': -1}], bqm)
# 2 fixed initial state, 8 random
resp = tabu.TabuSampler().sample(bqm, initial_states=init, num_reads=10)
self.assertEqual(len(resp), 10)
# 2 fixed initial states, 8 random, explicit
resp = tabu.TabuSampler().sample(bqm, initial_states=init, initial_states_generator='random', num_reads=10)
self.assertEqual(len(resp), 10)
# all random
resp = tabu.TabuSampler().sample(bqm, initial_states_generator='random', num_reads=10)
self.assertEqual(len(resp), 10)
# all random
resp = tabu.TabuSampler().sample(bqm, num_reads=10)
self.assertEqual(len(resp), 10)
# initial_states truncated to num_reads?
resp = tabu.TabuSampler().sample(bqm, initial_states=init, initial_states_generator='none', num_reads=1)
self.assertEqual(len(resp), 1)
resp = tabu.TabuSampler().sample(bqm, initial_states=init, initial_states_generator='tile', num_reads=1)
self.assertEqual(len(resp), 1)
resp = tabu.TabuSampler().sample(bqm, initial_states=init, initial_states_generator='random', num_reads=1)
self.assertEqual(len(resp), 1)
# 2 fixed initial states, repeated 5 times
resp = tabu.TabuSampler().sample(bqm, initial_states=init, initial_states_generator='tile', num_reads=10)
self.assertEqual(len(resp), 10)
# can't tile empty states
with self.assertRaises(ValueError):
resp = tabu.TabuSampler().sample(bqm, initial_states_generator='tile', num_reads=10)
# not enough initial states
with self.assertRaises(ValueError):
resp = tabu.TabuSampler().sample(bqm, initial_states_generator='none', num_reads=3)
# initial_states incompatible with the bqm
init = dimod.SampleSet.from_samples({'a': 1, 'b': 1}, vartype='SPIN', energy=0)
with self.assertRaises(ValueError):
resp = tabu.TabuSampler().sample(bqm, initial_states=init)
def test_input_validation(self):
bqm = dimod.BinaryQuadraticModel.from_ising({}, {'ab': -1, 'bc': 1, 'ac': 1})
empty = dimod.BinaryQuadraticModel.empty(dimod.SPIN)
# empty bqm
self.assertEqual(len(tabu.TabuSampler().sample(empty)), 0)
# invalid tenure type
with self.assertRaises(TypeError):
tabu.TabuSampler().sample(bqm, tenure=2.0)
# invalid tenure range
with self.assertRaises(ValueError):
tabu.TabuSampler().sample(bqm, tenure=100)
# invalid num_reads type
with self.assertRaises(TypeError):
tabu.TabuSampler().sample(bqm, num_reads=10.0)
# invalid num_reads range
with self.assertRaises(ValueError):
tabu.TabuSampler().sample(bqm, num_reads=0)
# invalid initial_states type
with self.assertRaises(ValueError):
tabu.TabuSampler().sample(bqm, initial_states=[])
with self.assertRaises(ValueError):
tabu.TabuSampler().sample(bqm, initial_states_generator='non-existing')
# invalid initial_states length
with self.assertRaises(ValueError):
tabu.TabuSampler().sample(bqm, initial_states=[1, 1])
def test_soft_num_reads(self):
"""Number of reads adapts to initial_states size, if provided."""
bqm = dimod.BinaryQuadraticModel.from_ising({}, {'ab': -1, 'bc': 1, 'ac': 1})
init = dimod.SampleSet.from_samples_bqm([{'a': 1, 'b': 1, 'c': 1},
{'a': -1, 'b': -1, 'c': -1}], bqm)
# default num_reads == 1
self.assertEqual(len(tabu.TabuSampler().sample(bqm)), 1)
# with initial_states, num_reads == len(initial_states)
self.assertEqual(len(tabu.TabuSampler().sample(bqm, initial_states=init)), 2)
# if explicitly given, with initial_states, they are expanded
self.assertEqual(len(tabu.TabuSampler().sample(bqm, initial_states=init, num_reads=3)), 3)
# if explicitly given, without initial_states, they are generated
self.assertEqual(len(tabu.TabuSampler().sample(bqm, num_reads=4)), 4)
def test_seed(self):
sampler = tabu.TabuSampler()
# use a bqm with a huge plateau (extreme ground state degeneracy)
# to give the tabu sampler many chances of flipping (exercising rng)
bqm = dimod.generators.random.randint(10, dimod.SPIN, low=1, high=1)
tenure = 5
all_samples = []
for seed in (1, 25, 2352):
response0 = sampler.sample(bqm, num_reads=1, tenure=tenure, num_restarts=1, timeout=None, seed=seed)
response1 = sampler.sample(bqm, num_reads=1, tenure=tenure, num_restarts=1, timeout=None, seed=seed)
samples0 = response0.record.sample
samples1 = response1.record.sample
self.assertTrue(np.array_equal(samples0, samples1), "Same seed returned different results")
for previous_sample in all_samples:
self.assertFalse(np.array_equal(samples0, previous_sample), "Different seed returned same results")
all_samples.append(samples0)
def test_timeout(self):
sampler = tabu.TabuSampler()
bqm = dimod.BinaryQuadraticModel.from_ising({}, {'ab': -1, 'bc': 1, 'ac': 1})
with tictoc() as tt:
response = sampler.sample(bqm, num_reads=1, timeout=500, seed=123)
self.assertAlmostEqual(tt.dt, 0.5, places=1)
with tictoc() as tt:
response = sampler.sample(bqm, num_reads=3, timeout=200, seed=123)
self.assertAlmostEqual(tt.dt, 0.6, places=1)
def test_num_restarts(self):
sampler = tabu.TabuSampler()
bqm = dimod.generators.random.randint(10, 'SPIN', seed=123)
target_restarts = 100
response = sampler.sample(bqm, num_reads=1, timeout=None, num_restarts=target_restarts, seed=345)
num_restarts = response.record['num_restarts']
self.assertEqual(target_restarts, num_restarts)
def test_energy_threshold(self):
sampler = tabu.TabuSampler()
bqm = dimod.generators.random.randint(100, 'SPIN', seed=123)
energy_threshold = -400
# Expect that energy_threshold is met before timeout
with tictoc() as tt:
response = sampler.sample(bqm, timeout=100000, energy_threshold=energy_threshold, seed=345)
self.assertLessEqual(tt.dt, 1.0)
| 10,492 | 39.203065 | 115 | py |
dwave-tabu | dwave-tabu-master/tests/test_search.py | # Copyright 2019 D-Wave Systems Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test the (private) TabuSearch python interface."""
import unittest
from concurrent.futures import ThreadPoolExecutor, wait
import dimod
import numpy as np
import tabu
from tabu.utils import tictoc
class TestTabuSearch(unittest.TestCase):
def test_trivial(self):
qubo = [[1.0]]
init = [1]
tenure = len(init) - 1
timeout = 1
restarts = 100
search = tabu.TabuSearch(qubo, init, tenure, timeout, restarts)
solution = list(search.bestSolution())
energy = search.bestEnergy()
self.assertEqual(solution, [0])
self.assertEqual(energy, 0.0)
def test_correctness(self):
qubo = [[-1.2, 1.1], [1.1, -1.2]]
init = [1, 1]
tenure = len(init) - 1
timeout = 20
restarts = 100
search = tabu.TabuSearch(qubo, init, tenure, timeout, restarts)
solution = list(search.bestSolution())
energy = search.bestEnergy()
self.assertEqual(solution, [0, 1])
self.assertEqual(energy, -1.2)
def test_concurrency(self):
def search(timeout, restarts=int(1e6)):
return tabu.TabuSearch([[1.0]], [1], 0, timeout, restarts).bestEnergy()
with ThreadPoolExecutor(max_workers=3) as executor:
# ~ 0.5s (but be gracious on slow CI VMs)
with tictoc() as tt:
wait([executor.submit(search, timeout=500) for _ in range(3)])
self.assertTrue(0.4 < tt.dt < 1.6)
# ~ 1s (but be gracious on slow CI VMs)
with tictoc() as tt:
wait([executor.submit(search, timeout=500) for _ in range(4)])
self.assertTrue(0.9 < tt.dt < 2.1)
def test_float(self):
n = 20
init = [1] * n
tenure = len(init) - 1
timeout = 20
restarts = 100
bqm = dimod.generators.random.uniform(n, 'BINARY', low=-100, high=100, seed=123)
Q, _ = tabu.TabuSampler._bqm_to_tabu_qubo(bqm)
search = tabu.TabuSearch(Q, init, tenure, timeout, restarts)
self.assertAlmostEqual(search.bestEnergy(), -1465.9867898)
bqm = dimod.generators.random.uniform(n, 'BINARY', low=-1, high=1, seed=123)
Q, _ = tabu.TabuSampler._bqm_to_tabu_qubo(bqm)
search = tabu.TabuSearch(Q, init, tenure, timeout, restarts)
self.assertAlmostEqual(search.bestEnergy(), -14.65986790)
def test_exceptions(self):
qubo = [[-1.2, 1.1], [1.1, -1.2]]
timeout = 10
restarts = 100
# Wrong length for init_solution
with self.assertRaises(RuntimeError):
init = [1, 1, 1]
tenure = len(init) - 1
search = tabu.TabuSearch(qubo, init, tenure, timeout, restarts)
# Tenure out of bounds
with self.assertRaises(RuntimeError):
init = [1, 1]
tenure = 3
search = tabu.TabuSearch(qubo, init, tenure, timeout, restarts)
| 3,522 | 31.321101 | 88 | py |
dwave-tabu | dwave-tabu-master/tests/__init__.py | 0 | 0 | 0 | py |
|
dwave-tabu | dwave-tabu-master/tabu/package_info.py | # Copyright 2018 D-Wave Systems Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from tabu import __version__, __package_name__
__author__ = 'D-Wave Systems Inc.'
__author_email__ = '[email protected]'
__description__ = 'Optimized Tabu solver for QUBOs'
__url__ = 'https://github.com/dwavesystems/dwave-tabu'
__license__ = 'Apache 2.0'
| 839 | 37.181818 | 74 | py |
dwave-tabu | dwave-tabu-master/tabu/sampler.py | # Copyright 2018 D-Wave Systems Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS F ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A dimod :term:`sampler` that uses the MST2 multistart tabu search algorithm."""
import numpy as np
import dimod
from tabu import TabuSearch
__all__ = ["TabuSampler"]
class TabuSampler(dimod.Sampler, dimod.Initialized):
"""A tabu-search sampler.
Examples:
This example solves a two-variable Ising model.
>>> from tabu import TabuSampler
>>> samples = TabuSampler().sample_ising({'a': -0.5, 'b': 1.0}, {'ab': -1})
>>> list(samples.data()) # doctest: +SKIP
[Sample(sample={'a': -1, 'b': -1}, energy=-1.5, num_occurrences=1)]
>>> samples.first.energy
-1.5
"""
properties = None
parameters = None
def __init__(self):
self.parameters = {
'initial_states': [],
'initial_states_generator': [],
'num_reads': [],
'seed': [],
'tenure': [],
'timeout': [],
'num_restarts': [],
'energy_threshold': [],
}
self.properties = {}
def sample(self, bqm, initial_states=None, initial_states_generator='random',
num_reads=None, seed=None, tenure=None, timeout=20, num_restarts=1000000,
energy_threshold=None, **kwargs):
"""Run a multistart tabu search on a given binary quadratic model.
Args:
bqm (:class:`~dimod.BinaryQuadraticModel`):
The binary quadratic model (BQM) to be sampled.
initial_states (:class:`~dimod.SampleSet`, optional, default=None):
One or more samples, each defining an initial state for all the
problem variables. Initial states are given one per read, but
if fewer than `num_reads` initial states are defined, additional
values are generated as specified by `initial_states_generator`.
initial_states_generator (str, 'none'/'tile'/'random', optional, default='random'):
Defines the expansion of `initial_states` if fewer than
`num_reads` are specified:
* "none":
If the number of initial states specified is smaller than
`num_reads`, raises ValueError.
* "tile":
Reuses the specified initial states if fewer than `num_reads`
or truncates if greater.
* "random":
Expands the specified initial states with randomly generated
states if fewer than `num_reads` or truncates if greater.
num_reads (int, optional, default=len(initial_states) or 1):
Number of reads. Each read is generated by one run of the tabu
algorithm. If `num_reads` is not explicitly given, it is selected
to match the number of initial states given. If initial states
are not provided, only one read is performed.
seed (int (32-bit unsigned integer), optional):
Seed to use for the PRNG. If the `timeout` parameter is not None,
results from the same seed may not be identical between runs due to
finite clock resolution.
tenure (int, optional):
Tabu tenure, which is the length of the tabu list, or number of recently
explored solutions kept in memory.
Default is a quarter of the number of problem variables up to
a maximum value of 20.
timeout (int, optional, default=20):
Total running time per read in milliseconds.
num_restarts (int, optional, default=1,000,000):
Number of tabu search restarts per read.
energy_threshold (float, optional):
Terminate when an energy lower than ``energy_threshold`` is found.
Returns:
:class:`~dimod.SampleSet`: A `dimod` :class:`.~dimod.SampleSet` object.
Examples:
This example samples a simple two-variable Ising model.
>>> import dimod
>>> bqm = dimod.BQM.from_ising({}, {'ab': 1})
>>> import tabu
>>> sampler = tabu.TabuSampler()
>>> samples = sampler.sample(bqm)
>>> samples.record[0].energy
-1.0
"""
if not bqm:
return dimod.SampleSet.from_samples([], energy=0, vartype=bqm.vartype)
if tenure is None:
tenure = min(20, len(bqm) // 4)
elif not isinstance(tenure, int):
raise TypeError("'tenure' should be an integer in range [0, num_vars - 1]")
elif not 0 <= tenure < len(bqm):
raise ValueError("'tenure' should be an integer in range [0, num_vars - 1]")
# Get initial_states in binary form
parsed = self.parse_initial_states(bqm.binary,
initial_states=initial_states,
initial_states_generator=initial_states_generator,
num_reads=num_reads,
seed=seed)
parsed_initial_states = np.ascontiguousarray(parsed.initial_states.record.sample)
qubo, varorder = self._bqm_to_tabu_qubo(bqm.binary)
if timeout is None:
timeout = -1 # Using negative timeout to mean ignore timeout parameter
# run Tabu search
samples = np.empty((parsed.num_reads, len(bqm)), dtype=np.int8)
rng = np.random.default_rng(seed)
restarts = []
for ni, initial_state in enumerate(parsed_initial_states):
seed_per_read = rng.integers(2**32, dtype=np.uint32)
r = TabuSearch(qubo, initial_state, tenure, timeout, num_restarts, seed_per_read, energy_threshold)
samples[ni, :] = r.bestSolution()
restarts.append(r.numRestarts())
# we received samples in binary form, so convert if needed
if bqm.vartype is dimod.SPIN:
samples *= 2
samples -= 1
elif bqm.vartype is not dimod.BINARY:
# sanity check
raise ValueError("unknown vartype")
return dimod.SampleSet.from_samples_bqm((samples, varorder), bqm=bqm, num_restarts=restarts)
@staticmethod
def _bqm_to_tabu_qubo(bqm):
# construct dense matrix representation
ldata, (irow, icol, qdata), offset, varorder = bqm.binary.to_numpy_vectors(return_labels=True)
ud = np.zeros((len(bqm), len(bqm)), dtype=np.double)
ud[np.diag_indices(len(bqm), 2)] = ldata
ud[irow, icol] = qdata
# Note: normally, conversion would be: `ud + ud.T - np.diag(np.diag(ud))`,
# but the Tabu solver we're using requires slightly different qubo matrix.
ud *= .5
symm = ud + ud.T
return symm, varorder
| 7,485 | 38.608466 | 111 | py |
dwave-tabu | dwave-tabu-master/tabu/utils.py | # Copyright 2021 D-Wave Systems Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
__all__ = ['tictoc']
class tictoc:
"""Timer as a context manager.
Elapsed wall clock time in floating point seconds available in :attr:`.dt`.
Adapted from D-Wave Hybrid's :class:`hybrid.profiling.tictoc`
(https://github.com/dwavesystems/dwave-hybrid/blob/5844ce2461795a0b4dec58ec1dd4c1264dbd9e84/hybrid/profiling.py#L33-L87).
"""
# NOTE: make sure to remove if/when we collect common utilities in a package
# (e.g. dwave-common)
dt: float = None
def __enter__(self):
self.tick = time.perf_counter()
return self
def __exit__(self, exc_type, exc_value, traceback):
self.dt = time.perf_counter() - self.tick
| 1,271 | 31.615385 | 125 | py |
dwave-tabu | dwave-tabu-master/tabu/__init__.py | # Copyright 2018 D-Wave Systems Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__package_name__ = 'dwave-tabu'
__version__ = '0.4.5'
__all__ = ['TabuSearch', 'TabuSampler']
from tabu.tabu_search import TabuSearch
from tabu.sampler import TabuSampler
| 755 | 33.363636 | 74 | py |
dwave-tabu | dwave-tabu-master/docs/conf.py | # -*- coding: utf-8 -*-
#
# tabu documentation build configuration file
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# Note: make sure the package is installed when building docs.
# -- Project information -----------------------------------------------------
from tabu import package_info
project = 'D-Wave Tabu'
copyright = '2018, D-Wave Systems Inc.'
author = package_info.__author__
version = package_info.__version__
release = package_info.__version__
# -- General configuration ------------------------------------------------
# import sphinx
# if sphinx.__version__ # can check here
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autosummary',
'sphinx.ext.autodoc',
'sphinx.ext.coverage',
'sphinx.ext.doctest',
'sphinx.ext.intersphinx',
'sphinx.ext.mathjax',
'sphinx.ext.napoleon',
'sphinx.ext.todo',
'sphinx.ext.viewcode',
'recommonmark',
'sphinx.ext.ifconfig',
]
autosummary_generate = True
# templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
source_suffix = ['.rst', '.md']
#source_parsers = {'.md': 'recommonmark.parser.CommonMarkParser'}
# The master toctree document.
master_doc = 'index'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
add_module_names = False
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store', 'sdk_index.rst']
linkcheck_retries = 2
linkcheck_anchors = False
linkcheck_ignore = ['https://cloud.dwavesys.com/leap', # redirects, many checks
]
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
modindex_common_prefix = ['dwave-tabu.']
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
import sphinx_rtd_theme
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
html_static_path = ['_static']
def setup(app):
app.add_css_file('cookie_notice.css')
app.add_js_file('cookie_notice.js')
app.add_config_value('target', 'repo', 'env')
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
# html_static_path = ['_static']
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {
'python': ('https://docs.python.org/3', None),
'qbsolv': ('https://docs.ocean.dwavesys.com/projects/qbsolv/en/stable/', None),
'oceandocs': ('https://docs.ocean.dwavesys.com/en/stable/', None),
'sysdocs_gettingstarted': ('https://docs.dwavesys.com/docs/stable/', None),
}
| 4,053 | 31.174603 | 83 | py |
cudaMST | cudaMST-master/verify_correctness.py | #! /usr/bin/env python2
import sys
edges = []
with open(sys.argv[1]) as fh:
fh.readline()
for line in fh.readlines():
u, v, w = line.strip().split()
u = int(u)
v = int(v)
w = float(w)
edges.append((u, v, w))
weight = 0.
vertices = set()
with open(sys.argv[2]) as fh:
fh.readline()
for line in fh.readlines():
i = int(line.strip())
weight += edges[i][2]
vertices.add(edges[i][0])
vertices.add(edges[i][1])
print weight
print len(vertices)
| 530 | 18.666667 | 38 | py |
cudaMST | cudaMST-master/gpuMSTdpk/runTests.py | import subprocess
import sys
import random
import os
def onPprocessors(command,p) :
if os.environ.has_key("OPENMP"):
os.putenv("OMP_NUM_THREADS", "%d" %p)
return command
elif os.environ.has_key("CILK"):
return command + " -cilk_set_worker_count " + `p`
elif os.environ.has_key("MKLROOT"):
return "export CILK_NWORKERS="+`p`+"; " + command
return command
def shellGetOutput(str) :
process = subprocess.Popen(str,shell=True,stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
output, err = process.communicate()
if (len(err) > 0):
raise NameError(str+"\n"+output+err)
return output
def stripFloat(val) :
trunc = float(int(val*1000))/1000
return str(trunc).rstrip('0')
def runSingle(runProgram, options, ifile, procs) :
comString = "./"+runProgram+" "+options+" "+ifile
if (procs > 0) :
comString = onPprocessors(comString,procs)
out = shellGetOutput(comString)
#print(out)
try:
times = [float(str[str.index(' ')+1:]) for str in out.split('\n') if str.startswith("PBBS-time: ")]
return times
except (ValueError,IndexError):
raise NameError(comString+"\n"+out)
def runTest(runProgram, checkProgram, dataDir, test, rounds, procs, noOutput) :
random.seed()
outFile="/tmp/ofile%d_%d" %(random.randint(0, 1000000), random.randint(0, 1000000))
[weight, inputFileNames, runOptions, checkOptions] = test
if type(inputFileNames) is str :
inputFileNames = [inputFileNames]
shortInputNames = " ".join(inputFileNames)
if len(dataDir)>0:
out = shellGetOutput("cd " + dataDir + "; make " + shortInputNames)
longInputNames = " ".join(dataDir + "/" + name for name in inputFileNames)
runOptions = runOptions + " -r " + `rounds`
if (noOutput == 0) :
runOptions = runOptions + " -o " + outFile
times = runSingle(runProgram, runOptions, longInputNames, procs)
if (noOutput == 0) :
checkString = ("./" + checkProgram + " " + checkOptions + " "
+ longInputNames + " " + outFile)
checkOut = shellGetOutput(checkString)
# Allow checker output comments. Comments are lines prefixed by '::'
nonCommentLines = [s for s in checkOut.split('\n') if not s.startswith(':') and len(s)>0]
if (len(nonCommentLines) > 0) :
print("CheckOut:", checkOut)
raise NameError(checkString+"\n"+checkOut)
os.remove(outFile)
ptimes = str([stripFloat(time)
for time in times])[1:-1]
outputStr = ""
if (len(runOptions) > 0) :
outputStr = " : " + runOptions
print(`weight` + " : " + shortInputNames + outputStr + " : "
+ ptimes)
return [weight,times]
def averageTime(times) :
return sum(times)/len(times)
def timeAll(name, runProgram, checkProgram, dataDir, tests, rounds, procs, noOutput,
addToDatabase, problem) :
totalTime = 0
totalWeight = 0
try:
results = [runTest(runProgram, checkProgram, dataDir, test, rounds, procs,
noOutput)
for test in tests]
totalTimeMean = 0
totalTimeMin = 0
totalTimeMedian = 0
totalWeight = 0
j = 0
for (weight,times) in results:
l = len(times)
if (l == 0):
print("Warning, no timed results for", tests[j])
continue
times = sorted(times)
totalTimeMean = totalTimeMean + weight*sum(times)/l
totalTimeMin = totalTimeMin + weight*times[0]
totalTimeMedian = totalTimeMedian + weight*times[(l-1)/2]
totalWeight = totalWeight + weight
j += 1
print(name + " : " + `procs` +" : " +
"weighted time, min=" + stripFloat(totalTimeMin/totalWeight) +
" median=" + stripFloat(totalTimeMedian/totalWeight) +
" mean=" + stripFloat(totalTimeMean/totalWeight))
if (addToDatabase) :
try:
dbAddResult(problem=problem, program=runProgram, results=results, numProcs=procs, mean=totalTimeMean/totalWeight,
min=totalTimeMin/totalWeight, median=totalTimeMedian/totalWeight, tests=tests)
except:
print("Could not insert result in database. Error:", sys.exc_info()[0])
# if (os.getlogin() == 'akyrola'): raise
return 0
except NameError,v:
x, = v
print "TEST TERMINATED ABNORMALLY:\n["+x + "]"
return 1
except KeyboardInterrupt:
return 1
def getOption(str) :
a = sys.argv
l = len(a)
for i in range(1, l) :
if (a[i] == str) :
return True
return False
def getArg(str, default) :
a = sys.argv
l = len(a)
for i in range(1, l) :
if (a[i] == str and (i+1 != l)) :
return sys.argv[i+1]
return default
def getArgs() :
noOutput = getOption("-x")
addToDatabase = getOption("-d")
processors = int(getArg("-p", 0))
rounds = int(getArg("-r", 1))
return (noOutput, rounds, addToDatabase, processors)
def timeAllArgs(runProgram, problem, checkProgram, dataDir, tests) :
(noOutput, rounds, addToDatabase, procs) = getArgs()
name = os.path.basename(os.getcwd())
timeAll(name, runProgram, checkProgram, dataDir, tests, rounds, procs, noOutput, addToDatabase, problem)
#
# Database insertions
# - [email protected]
import os
def dbInitConnection():
import MySQLdb
global cursor
# TODO: move to a config file
dbconn = MySQLdb.connect (host = "multi6.aladdin.cs.cmu.edu",
user = "pbbs",
passwd = "pbbspasshuuhaa",
db = "pbbsweb")
cursor = dbconn.cursor ()
dbconn.autocommit(1)
def dbAddResult(problem, program, results, numProcs, mean, min, median, tests):
dbInitConnection()
contentHash = computeContentHash(tests)
program = shellGetOutput("pwd").split('/')[-1].replace('\r','').replace('\n', '') + '/' + program
problemId = dbGetProblemId(problem, contentHash)
programId = dbGetProgramId(program, problemId)
hostId = getHostId()
#username = os.getlogin()
# getlogin does not work with some terminals (see various posts on web)
# guyb replaced with the following
username = os.getenv('USER')
if (numProcs == 0): numProcs = detectCPUs()
# Insert run into db
cursor.execute(""" insert into pbbs_runs (problem_id,program_id,numprocs,mean_time,min_time,median_time,username,host_id) values(
%s, %s, %s, %s, %s, %s, %s, %s)
""", (problemId, programId, numProcs, mean, min, median, username, hostId))
cursor.execute(" select last_insert_id()")
runId = cursor.fetchone()[0]
for i in range(0, len(results)):
(weight, times) = results[i]
test = tests[i]
[weight,inputFileNames,runOptions,checkOptions] = test
if type(inputFileNames) is list :
inputFileNames = "+".join(inputFileNames)
for time in times:
cursor.execute(""" insert into pbbs_subruns(run_id, inputfile, time, weight, params, check_params) values(
%s, %s , %s , %s, %s, %s) """,
(runId, inputFileNames, time, weight, runOptions, checkOptions))
def computeContentHash(tests):
hash = ""
for test in tests:
[weight,inputFileNames,runOptions,checkOptions] = test
if type(inputFileNames) is list :
inputFileNames = "+".join(inputFileNames)
hash += ";%f%s%s%s" %(weight,inputFileNames.strip(), runOptions.strip(),checkOptions.strip())
hash = hash.replace(' ', '_')
return hash
def dbGetProblemId(probname, contentHash):
cursor.execute("select id from pbbs_problems where name=%s and content_hash=%s", (probname, contentHash))
row = cursor.fetchone()
if row == None:
# Insert into db
cursor.execute( "insert into pbbs_problems (name,content_hash) values(%s,%s) ", (probname, contentHash))
cursor.execute(" select last_insert_id()")
row = cursor.fetchone()
return row[0]
def dbGetProgramId(progname, problemId):
cursor.execute("select id from pbbs_programs where name=%s and problem_id=%s", (progname, problemId))
row = cursor.fetchone()
if row == None:
# Insert into db
cursor.execute( "insert into pbbs_programs (problem_id, name) values(%s, %s) ", (problemId, progname))
cursor.execute(" select last_insert_id()")
row = cursor.fetchone()
return row[0]
import platform
def getHostId():
(procmodel, mhz) = detectCPUModel()
numprocs = detectCPUs()
(sysname, nodename, release, version, machine) = os.uname()
if (os.environ.has_key("OPENMP")):
nodename = nodename + "[OPENMP]"
cursor.execute("select id from pbbs_hosts where hostname=%s and procmodel=%s and version=%s and numprocs=%s", (nodename, procmodel, version, numprocs))
row = cursor.fetchone()
if row == None:
cursor.execute(""" insert into pbbs_hosts(hostname,sysname,releasen,version,machine,numprocs,procmodel,mhz) values
(%s, %s, %s, %s, %s, %s, %s, %s) """,
(nodename, sysname, release, version, machine, numprocs, procmodel, mhz))
cursor.execute(" select last_insert_id()")
row = cursor.fetchone()
return row[0]
def detectCPUModel():
mhz = 0
model = platform.processor()
try:
if (platform.system() == "Darwin"):
model = shellGetOutput("system_profiler SPHardwareDataType |grep 'Processor Name'")
mhz = shellGetOutput("system_profiler SPHardwareDataType |grep 'Processor Speed'")
else:
model = shellGetOutput('grep "model name" /proc/cpuinfo').split('\n')[0]
mhz = shellGetOutput('grep "cpu MHz" /proc/cpuinfo').split('\n')[0]
model = model.split(':')[-1].strip()
mhz = mhz.split(':')[-1].strip()
except:
# Could not get processor model
print("Could not determine CPU model", sys.exc_info()[0])
return (model, mhz)
def detectCPUs():
"""
Detects the number of CPUs on a system. Cribbed from pp.
"""
# Linux, Unix and MacOS:
if hasattr(os, "sysconf"):
if os.sysconf_names.has_key("SC_NPROCESSORS_ONLN"):
# Linux & Unix:
ncpus = os.sysconf("SC_NPROCESSORS_ONLN")
if isinstance(ncpus, int) and ncpus > 0:
return ncpus
else: # OSX:
return int(os.popen2("sysctl -n hw.ncpu")[1].read())
# Windows:
if os.environ.has_key("NUMBER_OF_PROCESSORS"):
ncpus = int(os.environ["NUMBER_OF_PROCESSORS"]);
if ncpus > 0:
return ncpus
return 1 # Default
| 10,981 | 37 | 155 | py |
cudaMST | cudaMST-master/gpuMST/runTests.py | import subprocess
import sys
import random
import os
def onPprocessors(command,p) :
if os.environ.has_key("OPENMP"):
os.putenv("OMP_NUM_THREADS", "%d" %p)
return command
elif os.environ.has_key("CILK"):
return command + " -cilk_set_worker_count " + `p`
elif os.environ.has_key("MKLROOT"):
return "export CILK_NWORKERS="+`p`+"; " + command
return command
def shellGetOutput(str) :
process = subprocess.Popen(str,shell=True,stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
output, err = process.communicate()
if (len(err) > 0):
raise NameError(str+"\n"+output+err)
return output
def stripFloat(val) :
trunc = float(int(val*1000))/1000
return str(trunc).rstrip('0')
def runSingle(runProgram, options, ifile, procs) :
comString = "./"+runProgram+" "+options+" "+ifile
if (procs > 0) :
comString = onPprocessors(comString,procs)
out = shellGetOutput(comString)
#print(out)
try:
times = [float(str[str.index(' ')+1:]) for str in out.split('\n') if str.startswith("PBBS-time: ")]
return times
except (ValueError,IndexError):
raise NameError(comString+"\n"+out)
def runTest(runProgram, checkProgram, dataDir, test, rounds, procs, noOutput) :
random.seed()
outFile="/tmp/ofile%d_%d" %(random.randint(0, 1000000), random.randint(0, 1000000))
[weight, inputFileNames, runOptions, checkOptions] = test
if type(inputFileNames) is str :
inputFileNames = [inputFileNames]
shortInputNames = " ".join(inputFileNames)
if len(dataDir)>0:
out = shellGetOutput("cd " + dataDir + "; make " + shortInputNames)
longInputNames = " ".join(dataDir + "/" + name for name in inputFileNames)
runOptions = runOptions + " -r " + `rounds`
if (noOutput == 0) :
runOptions = runOptions + " -o " + outFile
times = runSingle(runProgram, runOptions, longInputNames, procs)
if (noOutput == 0) :
checkString = ("./" + checkProgram + " " + checkOptions + " "
+ longInputNames + " " + outFile)
checkOut = shellGetOutput(checkString)
# Allow checker output comments. Comments are lines prefixed by '::'
nonCommentLines = [s for s in checkOut.split('\n') if not s.startswith(':') and len(s)>0]
if (len(nonCommentLines) > 0) :
print("CheckOut:", checkOut)
raise NameError(checkString+"\n"+checkOut)
os.remove(outFile)
ptimes = str([stripFloat(time)
for time in times])[1:-1]
outputStr = ""
if (len(runOptions) > 0) :
outputStr = " : " + runOptions
print(`weight` + " : " + shortInputNames + outputStr + " : "
+ ptimes)
return [weight,times]
def averageTime(times) :
return sum(times)/len(times)
def timeAll(name, runProgram, checkProgram, dataDir, tests, rounds, procs, noOutput,
addToDatabase, problem) :
totalTime = 0
totalWeight = 0
try:
results = [runTest(runProgram, checkProgram, dataDir, test, rounds, procs,
noOutput)
for test in tests]
totalTimeMean = 0
totalTimeMin = 0
totalTimeMedian = 0
totalWeight = 0
j = 0
for (weight,times) in results:
l = len(times)
if (l == 0):
print("Warning, no timed results for", tests[j])
continue
times = sorted(times)
totalTimeMean = totalTimeMean + weight*sum(times)/l
totalTimeMin = totalTimeMin + weight*times[0]
totalTimeMedian = totalTimeMedian + weight*times[(l-1)/2]
totalWeight = totalWeight + weight
j += 1
print(name + " : " + `procs` +" : " +
"weighted time, min=" + stripFloat(totalTimeMin/totalWeight) +
" median=" + stripFloat(totalTimeMedian/totalWeight) +
" mean=" + stripFloat(totalTimeMean/totalWeight))
if (addToDatabase) :
try:
dbAddResult(problem=problem, program=runProgram, results=results, numProcs=procs, mean=totalTimeMean/totalWeight,
min=totalTimeMin/totalWeight, median=totalTimeMedian/totalWeight, tests=tests)
except:
print("Could not insert result in database. Error:", sys.exc_info()[0])
# if (os.getlogin() == 'akyrola'): raise
return 0
except NameError,v:
x, = v
print "TEST TERMINATED ABNORMALLY:\n["+x + "]"
return 1
except KeyboardInterrupt:
return 1
def getOption(str) :
a = sys.argv
l = len(a)
for i in range(1, l) :
if (a[i] == str) :
return True
return False
def getArg(str, default) :
a = sys.argv
l = len(a)
for i in range(1, l) :
if (a[i] == str and (i+1 != l)) :
return sys.argv[i+1]
return default
def getArgs() :
noOutput = getOption("-x")
addToDatabase = getOption("-d")
processors = int(getArg("-p", 0))
rounds = int(getArg("-r", 1))
return (noOutput, rounds, addToDatabase, processors)
def timeAllArgs(runProgram, problem, checkProgram, dataDir, tests) :
(noOutput, rounds, addToDatabase, procs) = getArgs()
name = os.path.basename(os.getcwd())
timeAll(name, runProgram, checkProgram, dataDir, tests, rounds, procs, noOutput, addToDatabase, problem)
#
# Database insertions
# - [email protected]
import os
def dbInitConnection():
import MySQLdb
global cursor
# TODO: move to a config file
dbconn = MySQLdb.connect (host = "multi6.aladdin.cs.cmu.edu",
user = "pbbs",
passwd = "pbbspasshuuhaa",
db = "pbbsweb")
cursor = dbconn.cursor ()
dbconn.autocommit(1)
def dbAddResult(problem, program, results, numProcs, mean, min, median, tests):
dbInitConnection()
contentHash = computeContentHash(tests)
program = shellGetOutput("pwd").split('/')[-1].replace('\r','').replace('\n', '') + '/' + program
problemId = dbGetProblemId(problem, contentHash)
programId = dbGetProgramId(program, problemId)
hostId = getHostId()
#username = os.getlogin()
# getlogin does not work with some terminals (see various posts on web)
# guyb replaced with the following
username = os.getenv('USER')
if (numProcs == 0): numProcs = detectCPUs()
# Insert run into db
cursor.execute(""" insert into pbbs_runs (problem_id,program_id,numprocs,mean_time,min_time,median_time,username,host_id) values(
%s, %s, %s, %s, %s, %s, %s, %s)
""", (problemId, programId, numProcs, mean, min, median, username, hostId))
cursor.execute(" select last_insert_id()")
runId = cursor.fetchone()[0]
for i in range(0, len(results)):
(weight, times) = results[i]
test = tests[i]
[weight,inputFileNames,runOptions,checkOptions] = test
if type(inputFileNames) is list :
inputFileNames = "+".join(inputFileNames)
for time in times:
cursor.execute(""" insert into pbbs_subruns(run_id, inputfile, time, weight, params, check_params) values(
%s, %s , %s , %s, %s, %s) """,
(runId, inputFileNames, time, weight, runOptions, checkOptions))
def computeContentHash(tests):
hash = ""
for test in tests:
[weight,inputFileNames,runOptions,checkOptions] = test
if type(inputFileNames) is list :
inputFileNames = "+".join(inputFileNames)
hash += ";%f%s%s%s" %(weight,inputFileNames.strip(), runOptions.strip(),checkOptions.strip())
hash = hash.replace(' ', '_')
return hash
def dbGetProblemId(probname, contentHash):
cursor.execute("select id from pbbs_problems where name=%s and content_hash=%s", (probname, contentHash))
row = cursor.fetchone()
if row == None:
# Insert into db
cursor.execute( "insert into pbbs_problems (name,content_hash) values(%s,%s) ", (probname, contentHash))
cursor.execute(" select last_insert_id()")
row = cursor.fetchone()
return row[0]
def dbGetProgramId(progname, problemId):
cursor.execute("select id from pbbs_programs where name=%s and problem_id=%s", (progname, problemId))
row = cursor.fetchone()
if row == None:
# Insert into db
cursor.execute( "insert into pbbs_programs (problem_id, name) values(%s, %s) ", (problemId, progname))
cursor.execute(" select last_insert_id()")
row = cursor.fetchone()
return row[0]
import platform
def getHostId():
(procmodel, mhz) = detectCPUModel()
numprocs = detectCPUs()
(sysname, nodename, release, version, machine) = os.uname()
if (os.environ.has_key("OPENMP")):
nodename = nodename + "[OPENMP]"
cursor.execute("select id from pbbs_hosts where hostname=%s and procmodel=%s and version=%s and numprocs=%s", (nodename, procmodel, version, numprocs))
row = cursor.fetchone()
if row == None:
cursor.execute(""" insert into pbbs_hosts(hostname,sysname,releasen,version,machine,numprocs,procmodel,mhz) values
(%s, %s, %s, %s, %s, %s, %s, %s) """,
(nodename, sysname, release, version, machine, numprocs, procmodel, mhz))
cursor.execute(" select last_insert_id()")
row = cursor.fetchone()
return row[0]
def detectCPUModel():
mhz = 0
model = platform.processor()
try:
if (platform.system() == "Darwin"):
model = shellGetOutput("system_profiler SPHardwareDataType |grep 'Processor Name'")
mhz = shellGetOutput("system_profiler SPHardwareDataType |grep 'Processor Speed'")
else:
model = shellGetOutput('grep "model name" /proc/cpuinfo').split('\n')[0]
mhz = shellGetOutput('grep "cpu MHz" /proc/cpuinfo').split('\n')[0]
model = model.split(':')[-1].strip()
mhz = mhz.split(':')[-1].strip()
except:
# Could not get processor model
print("Could not determine CPU model", sys.exc_info()[0])
return (model, mhz)
def detectCPUs():
"""
Detects the number of CPUs on a system. Cribbed from pp.
"""
# Linux, Unix and MacOS:
if hasattr(os, "sysconf"):
if os.sysconf_names.has_key("SC_NPROCESSORS_ONLN"):
# Linux & Unix:
ncpus = os.sysconf("SC_NPROCESSORS_ONLN")
if isinstance(ncpus, int) and ncpus > 0:
return ncpus
else: # OSX:
return int(os.popen2("sysctl -n hw.ncpu")[1].read())
# Windows:
if os.environ.has_key("NUMBER_OF_PROCESSORS"):
ncpus = int(os.environ["NUMBER_OF_PROCESSORS"]);
if ncpus > 0:
return ncpus
return 1 # Default
| 10,981 | 37 | 155 | py |
cudaMST | cudaMST-master/parallelKruskal/runTests.py | import subprocess
import sys
import random
import os
def onPprocessors(command,p) :
if os.environ.has_key("OPENMP"):
os.putenv("OMP_NUM_THREADS", "%d" %p)
return command
elif os.environ.has_key("CILK"):
return command + " -cilk_set_worker_count " + `p`
elif os.environ.has_key("MKLROOT"):
return "export CILK_NWORKERS="+`p`+"; " + command
return command
def shellGetOutput(str) :
process = subprocess.Popen(str,shell=True,stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
output, err = process.communicate()
if (len(err) > 0):
raise NameError(str+"\n"+output+err)
return output
def stripFloat(val) :
trunc = float(int(val*1000))/1000
return str(trunc).rstrip('0')
def runSingle(runProgram, options, ifile, procs) :
comString = "./"+runProgram+" "+options+" "+ifile
if (procs > 0) :
comString = onPprocessors(comString,procs)
out = shellGetOutput(comString)
#print(out)
try:
times = [float(str[str.index(' ')+1:]) for str in out.split('\n') if str.startswith("PBBS-time: ")]
return times
except (ValueError,IndexError):
raise NameError(comString+"\n"+out)
def runTest(runProgram, checkProgram, dataDir, test, rounds, procs, noOutput) :
random.seed()
outFile="/tmp/ofile%d_%d" %(random.randint(0, 1000000), random.randint(0, 1000000))
[weight, inputFileNames, runOptions, checkOptions] = test
if type(inputFileNames) is str :
inputFileNames = [inputFileNames]
shortInputNames = " ".join(inputFileNames)
if len(dataDir)>0:
out = shellGetOutput("cd " + dataDir + "; make " + shortInputNames)
longInputNames = " ".join(dataDir + "/" + name for name in inputFileNames)
runOptions = runOptions + " -r " + `rounds`
if (noOutput == 0) :
runOptions = runOptions + " -o " + outFile
times = runSingle(runProgram, runOptions, longInputNames, procs)
if (noOutput == 0) :
checkString = ("./" + checkProgram + " " + checkOptions + " "
+ longInputNames + " " + outFile)
checkOut = shellGetOutput(checkString)
# Allow checker output comments. Comments are lines prefixed by '::'
nonCommentLines = [s for s in checkOut.split('\n') if not s.startswith(':') and len(s)>0]
if (len(nonCommentLines) > 0) :
print("CheckOut:", checkOut)
raise NameError(checkString+"\n"+checkOut)
os.remove(outFile)
ptimes = str([stripFloat(time)
for time in times])[1:-1]
outputStr = ""
if (len(runOptions) > 0) :
outputStr = " : " + runOptions
print(`weight` + " : " + shortInputNames + outputStr + " : "
+ ptimes)
return [weight,times]
def averageTime(times) :
return sum(times)/len(times)
def timeAll(name, runProgram, checkProgram, dataDir, tests, rounds, procs, noOutput,
addToDatabase, problem) :
totalTime = 0
totalWeight = 0
try:
results = [runTest(runProgram, checkProgram, dataDir, test, rounds, procs,
noOutput)
for test in tests]
totalTimeMean = 0
totalTimeMin = 0
totalTimeMedian = 0
totalWeight = 0
j = 0
for (weight,times) in results:
l = len(times)
if (l == 0):
print("Warning, no timed results for", tests[j])
continue
times = sorted(times)
totalTimeMean = totalTimeMean + weight*sum(times)/l
totalTimeMin = totalTimeMin + weight*times[0]
totalTimeMedian = totalTimeMedian + weight*times[(l-1)/2]
totalWeight = totalWeight + weight
j += 1
print(name + " : " + `procs` +" : " +
"weighted time, min=" + stripFloat(totalTimeMin/totalWeight) +
" median=" + stripFloat(totalTimeMedian/totalWeight) +
" mean=" + stripFloat(totalTimeMean/totalWeight))
if (addToDatabase) :
try:
dbAddResult(problem=problem, program=runProgram, results=results, numProcs=procs, mean=totalTimeMean/totalWeight,
min=totalTimeMin/totalWeight, median=totalTimeMedian/totalWeight, tests=tests)
except:
print("Could not insert result in database. Error:", sys.exc_info()[0])
# if (os.getlogin() == 'akyrola'): raise
return 0
except NameError,v:
x, = v
print "TEST TERMINATED ABNORMALLY:\n["+x + "]"
return 1
except KeyboardInterrupt:
return 1
def getOption(str) :
a = sys.argv
l = len(a)
for i in range(1, l) :
if (a[i] == str) :
return True
return False
def getArg(str, default) :
a = sys.argv
l = len(a)
for i in range(1, l) :
if (a[i] == str and (i+1 != l)) :
return sys.argv[i+1]
return default
def getArgs() :
noOutput = getOption("-x")
addToDatabase = getOption("-d")
processors = int(getArg("-p", 0))
rounds = int(getArg("-r", 1))
return (noOutput, rounds, addToDatabase, processors)
def timeAllArgs(runProgram, problem, checkProgram, dataDir, tests) :
(noOutput, rounds, addToDatabase, procs) = getArgs()
name = os.path.basename(os.getcwd())
timeAll(name, runProgram, checkProgram, dataDir, tests, rounds, procs, noOutput, addToDatabase, problem)
#
# Database insertions
# - [email protected]
import os
def dbInitConnection():
import MySQLdb
global cursor
# TODO: move to a config file
dbconn = MySQLdb.connect (host = "multi6.aladdin.cs.cmu.edu",
user = "pbbs",
passwd = "pbbspasshuuhaa",
db = "pbbsweb")
cursor = dbconn.cursor ()
dbconn.autocommit(1)
def dbAddResult(problem, program, results, numProcs, mean, min, median, tests):
dbInitConnection()
contentHash = computeContentHash(tests)
program = shellGetOutput("pwd").split('/')[-1].replace('\r','').replace('\n', '') + '/' + program
problemId = dbGetProblemId(problem, contentHash)
programId = dbGetProgramId(program, problemId)
hostId = getHostId()
#username = os.getlogin()
# getlogin does not work with some terminals (see various posts on web)
# guyb replaced with the following
username = os.getenv('USER')
if (numProcs == 0): numProcs = detectCPUs()
# Insert run into db
cursor.execute(""" insert into pbbs_runs (problem_id,program_id,numprocs,mean_time,min_time,median_time,username,host_id) values(
%s, %s, %s, %s, %s, %s, %s, %s)
""", (problemId, programId, numProcs, mean, min, median, username, hostId))
cursor.execute(" select last_insert_id()")
runId = cursor.fetchone()[0]
for i in range(0, len(results)):
(weight, times) = results[i]
test = tests[i]
[weight,inputFileNames,runOptions,checkOptions] = test
if type(inputFileNames) is list :
inputFileNames = "+".join(inputFileNames)
for time in times:
cursor.execute(""" insert into pbbs_subruns(run_id, inputfile, time, weight, params, check_params) values(
%s, %s , %s , %s, %s, %s) """,
(runId, inputFileNames, time, weight, runOptions, checkOptions))
def computeContentHash(tests):
hash = ""
for test in tests:
[weight,inputFileNames,runOptions,checkOptions] = test
if type(inputFileNames) is list :
inputFileNames = "+".join(inputFileNames)
hash += ";%f%s%s%s" %(weight,inputFileNames.strip(), runOptions.strip(),checkOptions.strip())
hash = hash.replace(' ', '_')
return hash
def dbGetProblemId(probname, contentHash):
cursor.execute("select id from pbbs_problems where name=%s and content_hash=%s", (probname, contentHash))
row = cursor.fetchone()
if row == None:
# Insert into db
cursor.execute( "insert into pbbs_problems (name,content_hash) values(%s,%s) ", (probname, contentHash))
cursor.execute(" select last_insert_id()")
row = cursor.fetchone()
return row[0]
def dbGetProgramId(progname, problemId):
cursor.execute("select id from pbbs_programs where name=%s and problem_id=%s", (progname, problemId))
row = cursor.fetchone()
if row == None:
# Insert into db
cursor.execute( "insert into pbbs_programs (problem_id, name) values(%s, %s) ", (problemId, progname))
cursor.execute(" select last_insert_id()")
row = cursor.fetchone()
return row[0]
import platform
def getHostId():
(procmodel, mhz) = detectCPUModel()
numprocs = detectCPUs()
(sysname, nodename, release, version, machine) = os.uname()
if (os.environ.has_key("OPENMP")):
nodename = nodename + "[OPENMP]"
cursor.execute("select id from pbbs_hosts where hostname=%s and procmodel=%s and version=%s and numprocs=%s", (nodename, procmodel, version, numprocs))
row = cursor.fetchone()
if row == None:
cursor.execute(""" insert into pbbs_hosts(hostname,sysname,releasen,version,machine,numprocs,procmodel,mhz) values
(%s, %s, %s, %s, %s, %s, %s, %s) """,
(nodename, sysname, release, version, machine, numprocs, procmodel, mhz))
cursor.execute(" select last_insert_id()")
row = cursor.fetchone()
return row[0]
def detectCPUModel():
mhz = 0
model = platform.processor()
try:
if (platform.system() == "Darwin"):
model = shellGetOutput("system_profiler SPHardwareDataType |grep 'Processor Name'")
mhz = shellGetOutput("system_profiler SPHardwareDataType |grep 'Processor Speed'")
else:
model = shellGetOutput('grep "model name" /proc/cpuinfo').split('\n')[0]
mhz = shellGetOutput('grep "cpu MHz" /proc/cpuinfo').split('\n')[0]
model = model.split(':')[-1].strip()
mhz = mhz.split(':')[-1].strip()
except:
# Could not get processor model
print("Could not determine CPU model", sys.exc_info()[0])
return (model, mhz)
def detectCPUs():
"""
Detects the number of CPUs on a system. Cribbed from pp.
"""
# Linux, Unix and MacOS:
if hasattr(os, "sysconf"):
if os.sysconf_names.has_key("SC_NPROCESSORS_ONLN"):
# Linux & Unix:
ncpus = os.sysconf("SC_NPROCESSORS_ONLN")
if isinstance(ncpus, int) and ncpus > 0:
return ncpus
else: # OSX:
return int(os.popen2("sysctl -n hw.ncpu")[1].read())
# Windows:
if os.environ.has_key("NUMBER_OF_PROCESSORS"):
ncpus = int(os.environ["NUMBER_OF_PROCESSORS"]);
if ncpus > 0:
return ncpus
return 1 # Default
| 10,981 | 37 | 155 | py |
cudaMST | cudaMST-master/graphData/data/graph2binary.py | import sys
import random
from struct import *
filename = sys.argv[1]
out = sys.argv[2]
f = open(filename,'r')
a = f.read()
b = a.split('\n')
b.pop(0)
numVertices = int(b[0])
numEdges = int(b[1])
#print numVertices, numEdges
'''
for x in range(numVertices):
offset = int(b[2+x])
if x == numVertices-1:
length = numEdges - offset
else:
length = int(b[2+x+1]) - offset
adj = [0]*numEdges
#line = '';
for i in range(length):
adj[int(b[2+numVertices+offset+i])] = 1
#line += str(b[2+numVertices+offset+i]) + ' '
#print line
line = '';
for i in range(numEdges):
line += str(adj[i]) + ' '
print line
'''
rowArray = [];
colArray = [];
for x in range(numVertices):
offset = int(b[2+x])
if x == numVertices-1:
length = numEdges - offset
else:
length = int(b[2+x+1]) - offset
for i in range(length):
rowArray.append(x)
colArray.append(int(b[2+numVertices+offset+i]))
v = [1]*numEdges
#print numVertices,numEdges
#print len(colArray)
#print len(rowArray)
#print v
ff = open(out,'wb')
ff.write(pack('III',numVertices,numVertices,numEdges))
ff.write(pack(numEdges*'I',*rowArray))
ff.write(pack(numEdges*'I',*colArray))
ff.write(pack(numEdges*'d',*v))
#print rowArray
#print colArray
#print v
| 1,323 | 19.6875 | 55 | py |
cudaMST | cudaMST-master/graphData/data/pbbs2dimacs.py | # converts graph input into dimacs format (for Galois MST)
import re
import sys
import random
filename = sys.argv[1]
f = open(filename,'r')
a = f.read()
b = a.split('\n')
b.pop(0)
b.pop()
numVertices = sys.argv[2]
numEdges = len(b)
print 'p sp ' + numVertices + ' ' + str(numEdges)
for line in b:
points = line.split(' ')
u = points[0]
v = points[1]
weightList = points[2].split('e')
mul = float(pow(10,int(weightList[1])))
weight = float(weightList[0]) * mul
print 'a ' + str(u) + ' ' + str(v) + ' ' + str(weight*100)
| 556 | 19.62963 | 62 | py |
cudaMST | cudaMST-master/serialMST/runTests.py | import subprocess
import sys
import random
import os
def onPprocessors(command,p) :
if os.environ.has_key("OPENMP"):
os.putenv("OMP_NUM_THREADS", "%d" %p)
return command
elif os.environ.has_key("CILK"):
return command + " -cilk_set_worker_count " + `p`
elif os.environ.has_key("MKLROOT"):
return "export CILK_NWORKERS="+`p`+"; " + command
return command
def shellGetOutput(str) :
process = subprocess.Popen(str,shell=True,stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
output, err = process.communicate()
if (len(err) > 0):
raise NameError(str+"\n"+output+err)
return output
def stripFloat(val) :
trunc = float(int(val*1000))/1000
return str(trunc).rstrip('0')
def runSingle(runProgram, options, ifile, procs) :
comString = "./"+runProgram+" "+options+" "+ifile
if (procs > 0) :
comString = onPprocessors(comString,procs)
out = shellGetOutput(comString)
#print(out)
try:
times = [float(str[str.index(' ')+1:]) for str in out.split('\n') if str.startswith("PBBS-time: ")]
return times
except (ValueError,IndexError):
raise NameError(comString+"\n"+out)
def runTest(runProgram, checkProgram, dataDir, test, rounds, procs, noOutput) :
random.seed()
outFile="/tmp/ofile%d_%d" %(random.randint(0, 1000000), random.randint(0, 1000000))
[weight, inputFileNames, runOptions, checkOptions] = test
if type(inputFileNames) is str :
inputFileNames = [inputFileNames]
shortInputNames = " ".join(inputFileNames)
if len(dataDir)>0:
out = shellGetOutput("cd " + dataDir + "; make " + shortInputNames)
longInputNames = " ".join(dataDir + "/" + name for name in inputFileNames)
runOptions = runOptions + " -r " + `rounds`
if (noOutput == 0) :
runOptions = runOptions + " -o " + outFile
times = runSingle(runProgram, runOptions, longInputNames, procs)
if (noOutput == 0) :
checkString = ("./" + checkProgram + " " + checkOptions + " "
+ longInputNames + " " + outFile)
checkOut = shellGetOutput(checkString)
# Allow checker output comments. Comments are lines prefixed by '::'
nonCommentLines = [s for s in checkOut.split('\n') if not s.startswith(':') and len(s)>0]
if (len(nonCommentLines) > 0) :
print("CheckOut:", checkOut)
raise NameError(checkString+"\n"+checkOut)
os.remove(outFile)
ptimes = str([stripFloat(time)
for time in times])[1:-1]
outputStr = ""
if (len(runOptions) > 0) :
outputStr = " : " + runOptions
print(`weight` + " : " + shortInputNames + outputStr + " : "
+ ptimes)
return [weight,times]
def averageTime(times) :
return sum(times)/len(times)
def timeAll(name, runProgram, checkProgram, dataDir, tests, rounds, procs, noOutput,
addToDatabase, problem) :
totalTime = 0
totalWeight = 0
try:
results = [runTest(runProgram, checkProgram, dataDir, test, rounds, procs,
noOutput)
for test in tests]
totalTimeMean = 0
totalTimeMin = 0
totalTimeMedian = 0
totalWeight = 0
j = 0
for (weight,times) in results:
l = len(times)
if (l == 0):
print("Warning, no timed results for", tests[j])
continue
times = sorted(times)
totalTimeMean = totalTimeMean + weight*sum(times)/l
totalTimeMin = totalTimeMin + weight*times[0]
totalTimeMedian = totalTimeMedian + weight*times[(l-1)/2]
totalWeight = totalWeight + weight
j += 1
print(name + " : " + `procs` +" : " +
"weighted time, min=" + stripFloat(totalTimeMin/totalWeight) +
" median=" + stripFloat(totalTimeMedian/totalWeight) +
" mean=" + stripFloat(totalTimeMean/totalWeight))
if (addToDatabase) :
try:
dbAddResult(problem=problem, program=runProgram, results=results, numProcs=procs, mean=totalTimeMean/totalWeight,
min=totalTimeMin/totalWeight, median=totalTimeMedian/totalWeight, tests=tests)
except:
print("Could not insert result in database. Error:", sys.exc_info()[0])
# if (os.getlogin() == 'akyrola'): raise
return 0
except NameError,v:
x, = v
print "TEST TERMINATED ABNORMALLY:\n["+x + "]"
return 1
except KeyboardInterrupt:
return 1
def getOption(str) :
a = sys.argv
l = len(a)
for i in range(1, l) :
if (a[i] == str) :
return True
return False
def getArg(str, default) :
a = sys.argv
l = len(a)
for i in range(1, l) :
if (a[i] == str and (i+1 != l)) :
return sys.argv[i+1]
return default
def getArgs() :
noOutput = getOption("-x")
addToDatabase = getOption("-d")
processors = int(getArg("-p", 0))
rounds = int(getArg("-r", 1))
return (noOutput, rounds, addToDatabase, processors)
def timeAllArgs(runProgram, problem, checkProgram, dataDir, tests) :
(noOutput, rounds, addToDatabase, procs) = getArgs()
name = os.path.basename(os.getcwd())
timeAll(name, runProgram, checkProgram, dataDir, tests, rounds, procs, noOutput, addToDatabase, problem)
#
# Database insertions
# - [email protected]
import os
def dbInitConnection():
import MySQLdb
global cursor
# TODO: move to a config file
dbconn = MySQLdb.connect (host = "multi6.aladdin.cs.cmu.edu",
user = "pbbs",
passwd = "pbbspasshuuhaa",
db = "pbbsweb")
cursor = dbconn.cursor ()
dbconn.autocommit(1)
def dbAddResult(problem, program, results, numProcs, mean, min, median, tests):
dbInitConnection()
contentHash = computeContentHash(tests)
program = shellGetOutput("pwd").split('/')[-1].replace('\r','').replace('\n', '') + '/' + program
problemId = dbGetProblemId(problem, contentHash)
programId = dbGetProgramId(program, problemId)
hostId = getHostId()
#username = os.getlogin()
# getlogin does not work with some terminals (see various posts on web)
# guyb replaced with the following
username = os.getenv('USER')
if (numProcs == 0): numProcs = detectCPUs()
# Insert run into db
cursor.execute(""" insert into pbbs_runs (problem_id,program_id,numprocs,mean_time,min_time,median_time,username,host_id) values(
%s, %s, %s, %s, %s, %s, %s, %s)
""", (problemId, programId, numProcs, mean, min, median, username, hostId))
cursor.execute(" select last_insert_id()")
runId = cursor.fetchone()[0]
for i in range(0, len(results)):
(weight, times) = results[i]
test = tests[i]
[weight,inputFileNames,runOptions,checkOptions] = test
if type(inputFileNames) is list :
inputFileNames = "+".join(inputFileNames)
for time in times:
cursor.execute(""" insert into pbbs_subruns(run_id, inputfile, time, weight, params, check_params) values(
%s, %s , %s , %s, %s, %s) """,
(runId, inputFileNames, time, weight, runOptions, checkOptions))
def computeContentHash(tests):
hash = ""
for test in tests:
[weight,inputFileNames,runOptions,checkOptions] = test
if type(inputFileNames) is list :
inputFileNames = "+".join(inputFileNames)
hash += ";%f%s%s%s" %(weight,inputFileNames.strip(), runOptions.strip(),checkOptions.strip())
hash = hash.replace(' ', '_')
return hash
def dbGetProblemId(probname, contentHash):
cursor.execute("select id from pbbs_problems where name=%s and content_hash=%s", (probname, contentHash))
row = cursor.fetchone()
if row == None:
# Insert into db
cursor.execute( "insert into pbbs_problems (name,content_hash) values(%s,%s) ", (probname, contentHash))
cursor.execute(" select last_insert_id()")
row = cursor.fetchone()
return row[0]
def dbGetProgramId(progname, problemId):
cursor.execute("select id from pbbs_programs where name=%s and problem_id=%s", (progname, problemId))
row = cursor.fetchone()
if row == None:
# Insert into db
cursor.execute( "insert into pbbs_programs (problem_id, name) values(%s, %s) ", (problemId, progname))
cursor.execute(" select last_insert_id()")
row = cursor.fetchone()
return row[0]
import platform
def getHostId():
(procmodel, mhz) = detectCPUModel()
numprocs = detectCPUs()
(sysname, nodename, release, version, machine) = os.uname()
if (os.environ.has_key("OPENMP")):
nodename = nodename + "[OPENMP]"
cursor.execute("select id from pbbs_hosts where hostname=%s and procmodel=%s and version=%s and numprocs=%s", (nodename, procmodel, version, numprocs))
row = cursor.fetchone()
if row == None:
cursor.execute(""" insert into pbbs_hosts(hostname,sysname,releasen,version,machine,numprocs,procmodel,mhz) values
(%s, %s, %s, %s, %s, %s, %s, %s) """,
(nodename, sysname, release, version, machine, numprocs, procmodel, mhz))
cursor.execute(" select last_insert_id()")
row = cursor.fetchone()
return row[0]
def detectCPUModel():
mhz = 0
model = platform.processor()
try:
if (platform.system() == "Darwin"):
model = shellGetOutput("system_profiler SPHardwareDataType |grep 'Processor Name'")
mhz = shellGetOutput("system_profiler SPHardwareDataType |grep 'Processor Speed'")
else:
model = shellGetOutput('grep "model name" /proc/cpuinfo').split('\n')[0]
mhz = shellGetOutput('grep "cpu MHz" /proc/cpuinfo').split('\n')[0]
model = model.split(':')[-1].strip()
mhz = mhz.split(':')[-1].strip()
except:
# Could not get processor model
print("Could not determine CPU model", sys.exc_info()[0])
return (model, mhz)
def detectCPUs():
"""
Detects the number of CPUs on a system. Cribbed from pp.
"""
# Linux, Unix and MacOS:
if hasattr(os, "sysconf"):
if os.sysconf_names.has_key("SC_NPROCESSORS_ONLN"):
# Linux & Unix:
ncpus = os.sysconf("SC_NPROCESSORS_ONLN")
if isinstance(ncpus, int) and ncpus > 0:
return ncpus
else: # OSX:
return int(os.popen2("sysctl -n hw.ncpu")[1].read())
# Windows:
if os.environ.has_key("NUMBER_OF_PROCESSORS"):
ncpus = int(os.environ["NUMBER_OF_PROCESSORS"]);
if ncpus > 0:
return ncpus
return 1 # Default
| 10,981 | 37 | 155 | py |
python-cpl | python-cpl-master/setup.py | import os
from setuptools import setup, Extension
author = 'Ole Streicher'
email = '[email protected]'
license_ = 'GPL'
cpl_version = '0.7.4'
description = "Python interface for the ESO Common Pipeline Library"
long_description = '''\
This module can list, configure and execute CPL-based recipes from Python
(python2 and python3). The input, calibration and output data can be
specified as FITS files or as ``astropy.io.fits`` objects in memory.
The ESO `Common Pipeline Library <http://www.eso.org/sci/software/cpl/>`_
(CPL) comprises a set of ISO-C libraries that provide a comprehensive,
efficient and robust software toolkit. It forms a basis for the creation of
automated astronomical data-reduction tasks. One of the features provided by
the CPL is the ability to create data-reduction algorithms that run as plugins
(dynamic libraries). These are called "recipes" and are one of the main
aspects of the CPL data-reduction development environment.
'''
pkgname = 'python-cpl'
baseurl = ('https://files.pythonhosted.org/packages/source/'
'{0}/{1}/{1}-{2}.tar.gz'.format(pkgname[0], pkgname, cpl_version))
classifiers = '''Development Status :: 4 - Beta
Intended Audience :: Science/Research
License :: OSI Approved :: GNU General Public License v2 or later (GPLv2+)
Operating System :: MacOS :: MacOS X
Operating System :: POSIX
Operating System :: Unix
Programming Language :: C
Programming Language :: Python
Programming Language :: Python :: 2
Programming Language :: Python :: 3
Topic :: Scientific/Engineering :: Astronomy
'''.splitlines()
def create_version_file(cpl_version=cpl_version):
with open(os.path.join('cpl', 'version.py'), 'w') as vfile:
vfile.write("version = %s\n" % repr(cpl_version))
vfile.write("author = %s\n" % repr(author))
vfile.write("email = %s\n" % repr(email))
vfile.write("license_ = %s\n" % repr(license_))
try:
create_version_file()
except IOError:
pass
module1 = Extension('cpl.CPL_recipe',
sources=['cpl/CPL_recipe.c', 'cpl/CPL_library.c'])
setup(
name=pkgname,
version=cpl_version,
author=author,
author_email=email,
description=description,
long_description=long_description,
license=license_,
url='https://pypi.org/project/%s/%s' % (pkgname, cpl_version),
download_url='%s/%s-%s.tar.gz' % (baseurl, pkgname, cpl_version),
classifiers=classifiers,
python_requires='>=2.7',
install_requires=['astropy'],
provides=['cpl'],
packages=['cpl'],
ext_modules=[module1]
)
| 2,556 | 34.027397 | 78 | py |
python-cpl | python-cpl-master/cpl/esorex.py | '''`EsoRex <http://www.eso.org/sci/software/cpl/esorex.html>`_ is a standard
execution environment for CPL recipes provided by `ESO <http://www.eso.org>`_.
'''
import os
import logging
from .recipe import Recipe
from . import logger
def load_sof(source):
'''Read an :program:`EsoRex` SOF file.
:param source: SOF ("Set Of Files") file object or string with SOF
file content.
:type source: :class:`str` or :class:`file`
These files contain the raw and calibration files for a recipe. The
content of the file is returned as a map with the tag as key and the list
of file names as value.
The result of this function may directly set as :attr:`cpl.Recipe.calib`
attribute::
import cpl
myrecipe = cpl.Recipe('muse_bias')
myrecipe.calib = cpl.esorex.read_sof(open('muse_bias.sof'))
.. note::
The raw data frame is silently ignored wenn setting
:attr:`cpl.Recipe.calib` for MUSE recipes. Other recipes ignore the raw
data frame only if it was set manually as :attr:`cpl.Recipe.tag` or in
:attr:`cpl.Recipe.tags` since there is no way to automatically
distinguish between them.
'''
if isinstance(source, str):
return load_sof(open(source) if os.path.exists(source) else source.split('\n'))
else:
res = dict()
for line in source:
if not line or line.startswith('#'):
continue
ls = line.split()
fn = ls[0]
key = ls[1]
if key not in res:
res[key] = fn
elif isinstance(res[key], list):
res[key].append(fn)
else:
res[key] = [ res[key], fn ]
return res
def load_rc(source = None):
'''Read an :program:`EsoRex` configuration file.
:param source: Configuration file object, or string with file content.
If not set, the :program:`EsoRex` config file
:file:`~/.esorex/esorex.rc` is used.
:type source: :class:`str` or :class:`file`
These files contain configuration parameters for :program:`EsoRex` or
recipes. The content of the file is returned as a map with the (full)
parameter name as key and its setting as string value.
The result of this function may directly set as :attr:`cpl.Recipe.param`
attribute::
import cpl
myrecipe = cpl.Recipe('muse_bias')
myrecipe.param = cpl.esorex.load_rc('muse_bias.rc')
'''
if source is None:
source = open(os.path.expanduser('~/.esorex/esorex.rc'))
if isinstance(source, str):
return load_rc(open(source) if os.path.exists(source) else source.split('\n'))
else:
res = dict()
for line in source:
if not line or not line.strip() or line.startswith('#'):
continue
name = line.split('=', 1)[0]
value = line.split('=', 1)[1]
if name and value:
res[name.strip()] = value.strip()
return res
def init(source = None):
'''Set up the logging and the recipe search path from the
:file:`esorex.rc` file.
:param source: Configuration file object, or string with file content.
If not set, the esorex config file :file:`~/.esorex/esorex.rc` is used.
:type source: :class:`str` or :class:`file`
'''
rc = load_rc(source)
if 'esorex.caller.recipe-dir' in rc:
Recipe.path = rc['esorex.caller.recipe-dir'].split(':')
if 'esorex.caller.msg-level' in rc:
msg.level = rc['esorex.caller.msg-level']
if 'esorex.caller.log-level' in rc:
log.level = rc['esorex.caller.log-level']
if 'esorex.caller.log-dir' in rc:
log.dir = rc['esorex.caller.log-dir']
if 'esorex.caller.log-file' in rc:
log.filename = rc['esorex.caller.log-file']
class CplLogger(object):
DEBUG = logging.DEBUG
INFO = logging.INFO
WARN = logging.WARN
ERROR = logging.ERROR
OFF = logging.CRITICAL + 1
def __init__(self):
self.handler = None
self._component = False
self._time = False
self._threadid = False
self.format = None
self.dir = None
self._level = CplLogger.OFF
def _init_handler(self):
if not self.handler:
self.handler = logging.StreamHandler()
logging.getLogger().addHandler(self.handler)
self.handler.setLevel(self._level)
self.handler.setFormatter(logging.Formatter(self.format,
'%H:%M:%S'))
def _shutdown_handler(self):
if self.handler:
logging.getLogger().removeHandler(self.handler)
self.handler.close()
self.handler = None
@property
def level(self):
'''Log level for output to the terminal. Any of
[ DEBUG, INFO, WARN, ERROR, OFF ].
'''
return self._level
@level.setter
def level(self, level):
if isinstance(level, (str)):
level = logger.level[level.upper()]
if level == CplLogger.OFF:
self._shutdown_handler()
else:
self._init_handler()
logging.getLogger().setLevel(logging.DEBUG)
if self.handler:
self.handler.setLevel(level)
self._level = level
@property
def format(self):
'''Output format.
.. seealso :: `logging.LogRecord attributes <http://docs.python.org/library/logging.html#logrecord-attributes>`_
Key mappings in the logging output.'''
return self._format
@format.setter
def format(self, fmt):
if fmt is None:
fmt = '%(asctime)s ' if self._time else ''
fmt += '[%(levelname)7s]'
fmt += '[%(threadName)s] ' if self._threadid else ' '
fmt += '%(name)s: ' if self._component else ''
fmt += '%(message)s'
if self.handler:
self.handler.setFormatter(logging.Formatter(fmt, '%H:%M:%S'))
self._format = fmt
@property
def component(self):
'''If :obj:`True`, attach the component name to output messages.
'''
return self._component
@component.setter
def component(self, enable):
self._component = enable
self.format = None
@property
def time(self):
'''If :obj:`True`, attach a time tag to output messages.
'''
return self._time
@time.setter
def time(self, enable):
self._time = enable
self.format = None
@property
def threadid(self):
'''If :obj:`True`, attach a thread tag to output messages.
'''
return self._threadid
@threadid.setter
def threadid(self, enable):
self._threadid = enable
self.format = None
class CplFileLogger(CplLogger):
def __init__(self):
CplLogger.__init__(self)
self._filename = None
self.threadid = True
self.component = True
self.time = True
self.level = CplLogger.INFO
def _init_handler(self):
if not self.handler:
if self._filename:
if self.dir:
fname = os.path.join(self.dir, self._filename)
self.handler = logging.FileHandler(fname)
else:
self.handler = logging.FileHandler(self._filename)
else:
self.handler = None
if self.handler:
logging.getLogger().addHandler(self.handler)
self.handler.setLevel(self._level)
self.handler.setFormatter(logging.Formatter(self.format,
'%H:%M:%S'))
@property
def filename(self):
'''Log file name.
'''
return self._filename
@filename.setter
def filename(self, name):
if self._filename != name:
self._shutdown_handler()
self._filename = name
self._init_handler()
msg = CplLogger()
'''This variable is a :class:`CplLogger` instance that provides a convienience
stream handler similar to the terminal logging functionality of the CPL. It
basically does the same as::
import logging
log = logging.getLogger()
log.setLevel(logging.INFO)
ch = logging.StreamHandler()
ch.setLevel(logging.OFF)
ch.setFormatter(logging.Formatter('[%(levelname)7s] %(message)s'))
log.addHandler(ch)
'''
log = CplFileLogger()
'''This variable is a :class:`CplFileLogger` instance that provides a convienience
file handler similar to the file logging functionality of the CPL. It
basically does the same as::
import logging
log = logging.getLogger()
log.setLevel(logging.INFO)
ch = logging.FileHandler(filename)
ch.setLevel(logging.INFO)
ch.setFormatter(logging.Formatter('%(asctime)s [%(levelname)7s] %(funcName)s: %(message)s'))
log.addHandler(ch)
'''
| 8,970 | 30.699647 | 121 | py |
python-cpl | python-cpl-master/cpl/dfs.py | import sys
from astropy.io import fits
import cpl
class ProcessingInfo(object):
'''Support for reading input files and parameters from the FITS
header of a CPL processed file.
This is done through the FITS headers that were written by the DFS function
called within the processing recipe.
.. attribute:: name
Recipe name
.. attribute:: version
Recipe version string
.. attribute:: pipeline
Pipeline name
.. attribute:: cpl_version
CPL version string
.. attribute:: tag
Tag name
.. attribute:: calib
Calibration frames from a FITS file processed with CPL.
The result of this function may directly set as :attr:`cpl.Recipe.calib`
attribute::
import cpl
myrecipe = cpl.Recipe('muse_bias')
myrecipe.calib = cpl.dfs.ProcessingInfo('MASTER_BIAS_0.fits').calib
.. note::
This will not work properly for files that had
:class:`astropy.io.fits.HDUList` inputs since they have assigned a
temporary file name only.
.. attribute:: raw
Raw (input) frames
.. note::
This will not work properly for files that had
:class:`astropy.io.fits.HDUList` inputs since they have assigned a
temporary file name only.
.. attribute:: param
Processing parameters.
The result of this function may directly set as :attr:`cpl.Recipe.param`
attribute::
import cpl
myrecipe = cpl.Recipe('muse_bias')
myrecipe.param = cpl.dfs.ProcessingInfo('MASTER_BIAS_0.fits').param
.. attribute:: md5sum
MD5 sum of the data portions of the output file (header keyword
'DATAMD5').
.. attribute:: md5sums
MD5 sums of the input and calibration files. :class:`dict` with the
file name as key and the corresponding MD5 sum as value.
.. note::
Due to a design decision in CPL, the raw input files are not
accompanied with the MD5 sum.
'''
def __init__(self, source, recno = -1):
'''
:param source: Object pointing to the result file header
:type source: :class:`str` or :class:`astropy.io.fits.HDUList`
or :class:`astropy.io.fits.PrimaryHDU` or
:class:`astropy.io.fits.Header`
:param recno: Record number. Optional. If not given, the last record
(with the highest record number) is used.
:type recno: :class:`int`
'''
if isinstance(source, str):
header = fits.open(source)[0].header
elif isinstance(source, (fits.HDUList, list)):
header = source[0].header
elif isinstance(source, fits.PrimaryHDU):
header = source.header
elif isinstance(source, (fits.Header, dict)):
header = source
else:
raise ValueError('Cannot assign type {0} to header'.format(
source.__class__.__name__))
if recno < 0:
for reccnt in range(1, 2**16):
if 'HIERARCH ESO PRO REC{0} ID'.format(reccnt) not in header:
break
recno += reccnt
self.name = header['HIERARCH ESO PRO REC{0} ID'.format(recno)]
self.product = header['HIERARCH ESO PRO CATG']
self.orig_filename = header['PIPEFILE']
pipe_id = header.get('HIERARCH ESO PRO REC{0} PIPE ID'.format(recno))
if pipe_id:
self.pipeline, version = pipe_id.split('/')
num_version = 0
for i in version.split('.'):
num_version = num_version * 100 + int(i)
self.version = (num_version, version)
else:
self.pipeline = None
self.version = None
self.cpl_version = header.get('HIERARCH ESO PRO REC{0} DRS ID'.format(recno))
self.md5sum = header.get('DATAMD5')
self.md5sums = {}
self.calib = ProcessingInfo._get_rec_keys(header, recno, 'CAL', 'CATG', 'NAME')
for cat, md5 in ProcessingInfo._get_rec_keys(header, recno, 'CAL', 'CATG',
'DATAMD5').items():
if isinstance(md5, list):
for i, m in enumerate(md5):
if m is not None:
self.md5sums[self.calib[cat][i]] = m
elif md5 is not None:
self.md5sums[self.calib[cat]] = md5
raw = ProcessingInfo._get_rec_keys(header, recno, 'RAW', 'CATG', 'NAME')
if raw:
self.tag = list(raw.keys())[0]
self.raw = raw[self.tag]
md5 = ProcessingInfo._get_rec_keys(header, recno, 'RAW', 'CATG',
'DATAMD5')[self.tag]
if isinstance(md5, list):
for i, m in enumerate(md5):
if m is not None:
self.md5sums[self.raw[i]] = m
elif md5 is not None:
self.md5sums[self.raw] = md5
else:
self.tag = None
self.raw = None
self.input = None
param = ProcessingInfo._get_rec_keys(header, recno, 'PARAM', 'NAME', 'VALUE')
self.param = dict()
for k,v in param.items():
self.param[k] = ProcessingInfo._best_type(v)
def create_recipe(self):
'''Create a recipe and configure it with the parameters, calibration frames,
and the input tag. The recipe version will be the latest available one.
'''
recipe = cpl.Recipe(self.name)
recipe.param = self.param
recipe.calib = self.calib
recipe.tag = self.tag
return recipe
def create_script(self, scriptfile = sys.stdout):
'''Create a sample script that creates the recipe, configures it with
the parameters, calibration frames and input tags, and finally
starts the recipe.
'''
if isinstance(scriptfile, str):
scriptfile = file(scriptfile, mode='w')
scriptfile.write('import cpl\n\n')
scriptfile.write('# Recipe: {0}.{1}, Version {2}, CPL version {3}\n'.format(
self.pipeline, self.name, self.version[1], self.cpl_version))
scriptfile.write('{0} = cpl.Recipe({1}, version = {2})\n'.format(
self.name, repr(self.name), repr(self.version[0])))
scriptfile.write('\n# Parameters:\n')
for k,v in self.param.items():
scriptfile.write('{0}.param.{1} = {2}\n'.format(self.name, k, repr(v)))
if self.calib:
scriptfile.write('\n# Calibration frames:\n')
for k,v in self.calib.items():
scriptfile.write('{0}.calib.{1} = {2}\n'.format(self.name, k, repr(v)))
scriptfile.write('\n# Process input frames:\n')
scriptfile.write('{0}.tag = {1}\n'.format(self.name, repr(self.tag)))
scriptfile.write('res = {0}({1})\n'.format(self.name, repr(self.raw)))
# scriptfile.write('{0} = res.{1}\n'.format(self.product.lower(), self.product))
# scriptfile.write('{0}.writeto({1})\n'.format(self.product.lower(),
# repr(self.orig_filename)))
def __str__(self):
s = 'Recipe: {0}, Version {1}, CPL version {2}\n'.format(
self.name, self.version, self.cpl_version)
s += 'Parameters:\n'
for k,v in self.param.items():
s += ' {0}.{1}.{2} = {3}\n'.format(self.pipeline, self.name, k, v)
if self.calib:
s += 'Calibration frames:\n'
for k,v in self.calib.items():
if isinstance(v, (str, unicode)):
s += ' {0} {1}\n'.format(v,k)
else:
m = max(len(n) for n in v)
for n in v:
s += ' {0:<{width}} {1}\n'.format(n, m, width = m)
if self.raw is not None:
s += 'Input frames:\n'
if isinstance(self.raw, (str, unicode)):
s += ' {0} {1}\n'.format(self.raw, self.tag)
else:
m = max(len(n) for n in self.raw)
for n in self.raw:
s += ' {0:<{width}} {1}\n'.format(n, self.tag, width = m)
return s
def printinfo(self):
'''Print the recipe information to standard output.
'''
print(str(self))
@staticmethod
def _get_rec_keys(header, recno, key, name, value):
'''Get a dictionary of key/value pairs from the DFS section of the
header.
:param key: Common keyword for the value. Usually 'PARAM' for
parameters, 'RAW' for raw frames, and 'CAL' for
calibration frames.
:type key: :class:`str`
:param recno: Record number.
:type recno: :class:`int`
:param name: Header keyword (last part) for the name of each key
:type name: :class:`str`
:param value: Header keyword (last part) for the value of each key
:type name: :class:`str`
When the header
HIERARCH ESO PRO REC1 PARAM1 NAME = 'nifu'
HIERARCH ESO PRO REC1 PARAM1 VALUE = '1'
HIERARCH ESO PRO REC1 PARAM2 NAME = 'combine'
HIERARCH ESO PRO REC1 PARAM2 VALUE = 'median'
is called with
ProcessingInfo._get_rec_keys(1, 'PARAM', 'NAME', 'VALUE')
the returned dictionary will contain the keys
res['nifu'] = '1'
res['combine'] = 'median'
'''
res = dict()
for i in range(1, 2**16):
try:
prefix = 'HIERARCH ESO PRO REC{0} {1}{2}'.format(recno, key, i)
k = header['{0} {1}'.format(prefix, name)]
fn = header.get('{0} {1}'.format(prefix, value))
if k not in res:
res[k] = fn
elif isinstance(res[k], list):
res[k].append(fn)
else:
res[k] = [ res[k], fn ]
except KeyError:
break
return res
@staticmethod
def _best_type(value):
'''Convert the value to the best applicable type: :class:`int`,
:class:`float`, :class:`bool` or :class`str`.
:param value: Value to convert.
:type value: :class:`str`
'''
for t in int, float:
try:
return t(value)
except ValueError:
pass
return {'true':True, 'false':False}.get(value, value)
@staticmethod
def list(source):
'''Get a list of all `ProcessingInfo` objects in the FITS header. The
list is sorted by the execution order.
:param source: Object pointing to the result file header
:type source: :class:`str` or :class:`astropy.io.fits.HDUList`
or :class:`astropy.io.fits.PrimaryHDU` or
:class:`astropy.io.fits.Header`
'''
pi = []
for i in range(1, 2**16):
try:
pi.append(ProcessingInfo(source, i))
except KeyError:
break
return pi
if __name__ == '__main__':
for arg in sys.argv[1:]:
print('{0}\nfile: {1}'.format('-' * 72, arg))
pi = cpl.dfs.ProcessingInfo(arg)
pi.printinfo()
| 11,358 | 35.641935 | 87 | py |
python-cpl | python-cpl-master/cpl/frames.py | from __future__ import absolute_import
import os
from astropy.io import fits
from . import md5sum
class FrameConfig(object):
'''Frame configuration.
Each :class:`FrameConfig` object stores information about one the data
type a recipe can process. They are used for defining the calibration
files. However, since this information is not generally provided by CPL
recipes, it contains only dummy information, except for the MUSE recipes.
The objects stores a frame tag, a unique identifier for a certain kind of
frame, the minimum and maximum number of frames needed.
Attributes:
.. attribute:: tag
Category tag name. The tag name is used to distinguish between
different types of files. An examples of tag names is 'MASTER_BIAS'
which specifies the master bias calibration file(s).
.. attribute:: min
Minimal number of frames, or :obj:`None` if not specified. A frame is
required if the :attr:`min` is set to a value greater than 0.
.. attribute:: max
Maximal number of frames, or :obj:`None` if not specified
.. attribute:: frames
List of frames (file names or :class:`astropy.io.fits.HDUList` objects)
that are assigned to this frame type.
'''
def __init__(self, tag, min_frames = 0, max_frames = 0, frames = None):
self.tag = tag
self.min = min_frames if min_frames > 0 else None
self.max = max_frames if max_frames > 0 else None
self.frames = frames
self.__doc__ = self._doc()
def extend_range(self, min_frames, max_frames):
if self.min is not None:
self.min = min(self.min, min_frames) if min_frames is not None \
else None
if self.max is not None:
self.max = max(self.max, max_frames) if max_frames is not None \
else None
def set_range(self, min_frames, max_frames):
self.min = min_frames
self.max = max_frames
def __str__(self):
return str(self.frames)
def __repr__(self):
return 'FrameDef(%s, frames=%s)' % (repr(self.tag), repr(self.frames))
def _doc(self):
if self.max is None or self.min is None:
r = ' one frame or list of frames'
elif self.max == 1:
r = ' one frame'
elif self.min > 1 and self.max > self.min:
r = ' list of %i-%i frames' % (self.min, self.max)
elif self.max > 1:
r = ' one frame or list of max. %i frames' % self.max
elif self.min > 1:
r = ' list of min. %i frames' % self.max
else:
r = ' one frame or list of frames'
if not self.min:
r += ' (optional)'
return r
def __getitem__(self, i):
return (self.tag, self.frames)[i]
class FrameList(object):
def __init__(self, recipe, other = None):
self._recipe = recipe
self._values = dict()
if isinstance(other, self.__class__):
self._set_items((o.tag, o.frames) for o in other)
elif isinstance(other, dict):
self._set_items(other.items())
elif other:
self._set_items(other)
def _set_items(self, l):
for o in l:
self[o[0]] = o[1]
@property
def _cpl_dict(self):
cpl_frameconfigs = self._recipe._recipe.frameConfig()
if cpl_frameconfigs is None:
return None
s = dict()
for configs in cpl_frameconfigs:
c_cfg = configs[1]
for f in c_cfg:
if f[0] in s:
s[f[0]].extend_range(f[1], f[2])
elif f[0] in self._values:
s[f[0]] = self._values[f[0]]
s[f[0]].set_range(f[1], f[2])
else:
s[f[0]] = FrameConfig(f[0], f[1], f[2])
self._values[f[0]] = s[f[0]]
return s
@property
def _dict(self):
return self._cpl_dict or self._values
def __iter__(self):
return iter(self._dict.values())
def __getitem__(self, key):
return self._dict[key]
def __setitem__(self, key, value):
d = self._cpl_dict
if d is not None:
d[key].frames = value
else:
self._values.setdefault(key, FrameConfig(key)).frames = value
def __delitem__(self, key):
self._dict[key].frames = None
def __contains__(self, key):
return key in self._dict
def __len__(self):
return len(self._dict)
def __getattr__(self, key):
return self[key]
def __setattr__(self, key, value):
if key.startswith('_'):
super(FrameList, self).__setattr__(key, value)
else:
self[key] = value
def __delattr__(self, key):
del self[key]
def __dir__(self):
return self._dict.keys()
def __repr__(self):
return repr(dict(iter(self)))
def __str__(self):
return str(dict(iter(self)))
def __eq__(self, other):
return dict(iter(self)) == other
@property
def __doc__(self):
r = 'Frames for recipe %s.\n\nAttributes:\n' % (
self._recipe.name)
for s in self:
r += '%s: %s\n' % (self._key(s), s.__doc__)
return r
def _aslist(self, frames):
flist = FrameList(self._recipe, self)
if frames is not None:
flist._set_items(frames.items())
return [(f.tag, f.frames) for f in flist]
def mkabspath(frames, tmpdir):
'''Convert all filenames in the frames list into absolute paths.
:class:`astropy.io.fits.HDUList`s will be converted to temporary files
located in the temporary directory tmpdir.
The replacement is done in-place. The function will return the list of
temporary files.
param frames: :class:`list` of (tag, frame) tuples with frame being either
a file name or a HDU list.
param tmpdir: directory where the temporary files are being created.
'''
tmpfiles = list()
for i, frame in enumerate(frames):
if isinstance(frame[1], fits.HDUList):
md5 = md5sum.update_md5(frame[1])
filename = os.path.abspath(os.path.join(tmpdir, '%s_%s.fits'
% (frame[0], md5[:8])))
try:
os.remove(filename)
except:
pass
frames[i] = ( frame[0], filename )
tmpfiles.append(filename)
frame[1].writeto(filename)
else:
frames[i] = ( frame[0], os.path.abspath(frame[1]) )
return tmpfiles
def expandframelist(frames):
'''Convert a dictionary with frames into a frame list where each frame
gets its own entry in the form (tag, frame)
'''
framelist = list()
for tag, f in frames:
if isinstance(f, list) and not isinstance(f, fits.HDUList):
framelist += [ (tag, frame) for frame in f ]
elif f is not None:
framelist.append((tag, f))
return framelist
| 7,106 | 30.171053 | 78 | py |