Skip to content

Imops

Efficient parallelizable algorithms for multidimensional arrays to speed up your data pipelines

Install

pip install imops  # default install with Cython backend
pip install imops[numba]  # additionally install Numba backend

Functions

imops.crop.crop_to_shape(x, shape, axis=None, ratio=0.5)

Crop x to match shape along axis.

Parameters:

Name Type Description Default
x ndarray

n-dimensional array

required
shape AxesLike

final shape

required
axis AxesLike

axis along which x will be padded

None
ratio AxesParams

float or sequence of floats describing what proportion of cropping to apply on the left sides of cropping axes. Remaining ratio of cropping will be applied on the right sides

0.5

Returns:

Name Type Description
cropped ndarray

cropped array

Examples:

x  # array of shape [2, 3, 4]
cropped = crop_to_shape(x, [1, 2, 3], ratio=0)  # crop to shape [1, 2, 3] from the right
cropped = crop_to_shape(x, 2, axis=1, ratio=1)  # crop to shape [2, 2, 4] from the left
cropped = crop_to_shape(x, [3, 4, 5])  # fail due to bigger resulting shape
Source code in imops/crop.py
def crop_to_shape(x: np.ndarray, shape: AxesLike, axis: AxesLike = None, ratio: AxesParams = 0.5) -> np.ndarray:
    """
    Crop `x` to match `shape` along `axis`.

    Parameters
    ----------
    x: np.ndarray
        n-dimensional array
    shape: AxesLike
        final shape
    axis: AxesLike
        axis along which `x` will be padded
    ratio: AxesParams
        float or sequence of floats describing what proportion of cropping to apply on the left sides of cropping axes.
        Remaining ratio of cropping will be applied on the right sides

    Returns
    -------
    cropped: np.ndarray
        cropped array

    Examples
    --------
    ```python
    x  # array of shape [2, 3, 4]
    cropped = crop_to_shape(x, [1, 2, 3], ratio=0)  # crop to shape [1, 2, 3] from the right
    cropped = crop_to_shape(x, 2, axis=1, ratio=1)  # crop to shape [2, 2, 4] from the left
    cropped = crop_to_shape(x, [3, 4, 5])  # fail due to bigger resulting shape
    ```
    """
    x = np.asarray(x)
    shape = np.asarray(shape)
    assert_subdtype(shape.dtype, np.integer, 'shape')

    axis, shape, ratio = broadcast_axis(axis, x.ndim, shape, ratio)

    old_shape, new_shape = np.array(x.shape), np.array(fill_by_indices(x.shape, shape, axis))
    if (old_shape < new_shape).any():
        raise ValueError(f'The resulting shape cannot be greater than the original one: {old_shape} vs {new_shape}.')

    ndim = len(x.shape)
    ratio = fill_by_indices(np.zeros(ndim), ratio, axis)
    start = ((old_shape - new_shape) * ratio).astype(int)

    # TODO: Create contiguous array?
    return x[tuple(map(slice, start, start + new_shape))]

imops.crop.crop_to_box(x, box, axis=None, padding_values=None, num_threads=_NUMERIC_DEFAULT_NUM_THREADS, backend=None)

Crop x according to box along axis.

Parameters:

Name Type Description Default
x ndarray

n-dimensional array

required
box ndarray

array of shape (2, x.ndim or len(axis) if axis is passed) describing crop boundaries

required
axis AxesLike

axis along which x will be cropped

None
padding_values AxesParams

values to pad with if box exceeds the input's limits

None
num_threads int

the number of threads to use for computation. Default = 4. If negative value passed cpu count + num_threads + 1 threads will be used

_NUMERIC_DEFAULT_NUM_THREADS
backend BackendLike

which backend to use. cython and scipy are available, cython is used by default

None

Returns:

Name Type Description
cropped ndarray

cropped array

Examples:

x  # array of shape [2, 3, 4]
cropped = crop_to_box(x, np.array([[0, 0, 0], [1, 1, 1]]))  # crop to shape [1, 1, 1]
cropped = crop_to_box(x, np.array([[0, 0, 0], [5, 5, 5]]))  # fail, box exceeds the input's limits
cropped = crop_to_box(x, np.array([[0], [5]]), axis=0, padding_values=0)  # pad with 0-s to shape [5, 3, 4]
Source code in imops/crop.py
def crop_to_box(
    x: np.ndarray,
    box: np.ndarray,
    axis: AxesLike = None,
    padding_values: AxesParams = None,
    num_threads: int = _NUMERIC_DEFAULT_NUM_THREADS,
    backend: BackendLike = None,
) -> np.ndarray:
    """
    Crop `x` according to `box` along `axis`.

    Parameters
    ----------
    x: np.ndarray
        n-dimensional array
    box: np.ndarray
        array of shape (2, x.ndim or len(axis) if axis is passed) describing crop boundaries
    axis: AxesLike
        axis along which `x` will be cropped
    padding_values: AxesParams
        values to pad with if box exceeds the input's limits
    num_threads: int
        the number of threads to use for computation. Default = 4. If negative value passed
        cpu count + num_threads + 1 threads will be used
    backend: BackendLike
        which backend to use. `cython` and `scipy` are available, `cython` is used by default

    Returns
    -------
    cropped: np.ndarray
        cropped array

    Examples
    --------
    ```python
    x  # array of shape [2, 3, 4]
    cropped = crop_to_box(x, np.array([[0, 0, 0], [1, 1, 1]]))  # crop to shape [1, 1, 1]
    cropped = crop_to_box(x, np.array([[0, 0, 0], [5, 5, 5]]))  # fail, box exceeds the input's limits
    cropped = crop_to_box(x, np.array([[0], [5]]), axis=0, padding_values=0)  # pad with 0-s to shape [5, 3, 4]
    ```
    """
    x = np.asarray(x)
    box = np.asarray(box)
    assert_subdtype(box.dtype, np.integer, 'box')

    start, stop = box
    axis, start, stop = broadcast_axis(axis, x.ndim, start, stop)

    slice_start = np.maximum(start, 0)
    slice_stop = np.minimum(stop, np.array(x.shape)[list(axis)])
    padding = np.array([slice_start - start, stop - slice_stop], dtype=int).T

    if padding_values is None and padding.any():
        raise ValueError(f"The box {box} exceeds the input's limits {x.shape}.")

    slice_start = fill_by_indices(np.zeros(x.ndim, int), slice_start, axis)
    slice_stop = fill_by_indices(x.shape, slice_stop, axis)
    # TODO: Create contiguous array?
    x = x[tuple(map(slice, slice_start, slice_stop))]

    if padding_values is not None and padding.any():
        x = pad(x, padding, axis, padding_values, num_threads=num_threads, backend=backend)

    return x

imops.pad.pad(x, padding, axis=None, padding_values=0, num_threads=_NUMERIC_DEFAULT_NUM_THREADS, backend=None)

Pad x according to padding along the axis.

Parameters:

Name Type Description Default
x ndarray

n-dimensional array to pad

required
padding Union[AxesLike, Sequence[Sequence[int]]]

if 2D array [[start_1, stop_1], ..., [start_n, stop_n]] - specifies individual padding for each axis from axis. The length of the array must either be equal to 1 or match the length of axis. If 1D array [val_1, ..., val_n] - same as [[val_1, val_1], ..., [val_n, val_n]]. If scalar (val) - same as [[val, val]]

required
axis AxesLike

axis along which x will be padded

None
padding_values Union[AxesParams, Callable]

values to pad with, must be broadcastable to the resulting array. If Callable (e.g. numpy.min) - padding_values(x) will be used

0
num_threads int

the number of threads to use for computation. Default = 4. If negative value passed cpu count + num_threads + 1 threads will be used

_NUMERIC_DEFAULT_NUM_THREADS
backend BackendLike

which backend to use. cython and scipy are available, cython is used by default

None

Returns:

Name Type Description
padded ndarray

padded array

Examples:

padded = pad(x, 2)  # pad 2 zeros on each side of each axes
padded = pad(x, [1, 1], axis=(-1, -2))  # pad 1 zero on each side of last 2 axes
Source code in imops/pad.py
def pad(
    x: np.ndarray,
    padding: Union[AxesLike, Sequence[Sequence[int]]],
    axis: AxesLike = None,
    padding_values: Union[AxesParams, Callable] = 0,
    num_threads: int = _NUMERIC_DEFAULT_NUM_THREADS,
    backend: BackendLike = None,
) -> np.ndarray:
    """
    Pad `x` according to `padding` along the `axis`.

    Parameters
    ----------
    x: np.ndarray
        n-dimensional array to pad
    padding: Union[AxesLike, Sequence[Sequence[int]]]
        if 2D array [[start_1, stop_1], ..., [start_n, stop_n]] - specifies individual padding
        for each axis from `axis`. The length of the array must either be equal to 1 or match the length of `axis`.
        If 1D array [val_1, ..., val_n] - same as [[val_1, val_1], ..., [val_n, val_n]].
        If scalar (val) - same as [[val, val]]
    axis: AxesLike
        axis along which `x` will be padded
    padding_values: Union[AxesParams, Callable]
        values to pad with, must be broadcastable to the resulting array.
        If Callable (e.g. `numpy.min`) - `padding_values(x)` will be used
    num_threads: int
        the number of threads to use for computation. Default = 4. If negative value passed
        cpu count + num_threads + 1 threads will be used
    backend: BackendLike
        which backend to use. `cython` and `scipy` are available, `cython` is used by default

    Returns
    -------
    padded: np.ndarray
        padded array

    Examples
    --------
    ```python
    padded = pad(x, 2)  # pad 2 zeros on each side of each axes
    padded = pad(x, [1, 1], axis=(-1, -2))  # pad 1 zero on each side of last 2 axes
    ```
    """
    x = np.asarray(x)
    padding = np.asarray(padding)
    if padding.ndim < 2:
        padding = padding.reshape(-1, 1)
    axis = axis_from_dim(axis, x.ndim)
    padding = np.asarray(fill_by_indices(np.zeros((x.ndim, 2), dtype=int), np.atleast_2d(padding), axis))
    if (padding < 0).any():
        raise ValueError(f'Padding must be non-negative: {padding.tolist()}.')
    if callable(padding_values):
        padding_values = padding_values(x)

    new_shape = np.array(x.shape) + np.sum(padding, axis=1)
    new_x = np.array(padding_values, dtype=x.dtype)
    new_x = copy(np.broadcast_to(new_x, new_shape), order='C', num_threads=num_threads, backend=backend)

    start = padding[:, 0]
    end = np.where(padding[:, 1] != 0, -padding[:, 1], None)
    # TODO: how to parallelize this?
    new_x[tuple(map(slice, start, end))] = x

    return new_x

imops.pad.pad_to_shape(x, shape, axis=None, padding_values=0, ratio=0.5, num_threads=_NUMERIC_DEFAULT_NUM_THREADS, backend=None)

Pad x to match shape along the axis.

Parameters:

Name Type Description Default
x ndarray

n-dimensional array to pad

required
shape AxesLike

final shape

required
axis AxesLike

axis along which x will be padded

None
padding_values Union[AxesParams, Callable]

values to pad with, must be broadcastable to the resulting array. If Callable (e.g. numpy.min) - padding_values(x) will be used

0
ratio AxesParams

float or sequence of floats describing what proportion of padding to apply on the left sides of padding axes. Remaining ratio of padding will be applied on the right sides

0.5
num_threads int

the number of threads to use for computation. Default = 4. If negative value passed cpu count + num_threads + 1 threads will be used

_NUMERIC_DEFAULT_NUM_THREADS
backend BackendLike

which backend to use. cython and scipy are available, cython is used by default

None

Returns:

Name Type Description
padded ndarray

padded array

Examples:

padded = pad_to_shape(x, [4, 5, 6])  # pad 3d array
padded = pad_to_shape(x, [4, 5], axis=[0, 1], ratio=0)  # pad first 2 axes on the right
Source code in imops/pad.py
def pad_to_shape(
    x: np.ndarray,
    shape: AxesLike,
    axis: AxesLike = None,
    padding_values: Union[AxesParams, Callable] = 0,
    ratio: AxesParams = 0.5,
    num_threads: int = _NUMERIC_DEFAULT_NUM_THREADS,
    backend: BackendLike = None,
) -> np.ndarray:
    """
    Pad `x` to match `shape` along the `axis`.

    Parameters
    ----------
    x: np.ndarray
        n-dimensional array to pad
    shape: AxesLike
        final shape
    axis: AxesLike
        axis along which `x` will be padded
    padding_values: Union[AxesParams, Callable]
        values to pad with, must be broadcastable to the resulting array.
        If Callable (e.g. `numpy.min`) - `padding_values(x)` will be used
    ratio: AxesParams
        float or sequence of floats describing what proportion of padding to apply on the left sides of padding axes.
        Remaining ratio of padding will be applied on the right sides
    num_threads: int
        the number of threads to use for computation. Default = 4. If negative value passed
        cpu count + num_threads + 1 threads will be used
    backend: BackendLike
        which backend to use. `cython` and `scipy` are available, `cython` is used by default

    Returns
    -------
    padded: np.ndarray
        padded array

    Examples
    --------
    ```python
    padded = pad_to_shape(x, [4, 5, 6])  # pad 3d array
    padded = pad_to_shape(x, [4, 5], axis=[0, 1], ratio=0)  # pad first 2 axes on the right
    ```
    """
    x = np.asarray(x)
    axis, shape, ratio = broadcast_axis(axis, x.ndim, shape, ratio)

    old_shape = np.array(x.shape)[list(axis)]
    if (old_shape > shape).any():
        shape = fill_by_indices(x.shape, shape, axis)
        raise ValueError(f'The resulting shape cannot be smaller than the original: {x.shape} vs {shape}.')

    delta = shape - old_shape
    start = (delta * ratio).astype(int)
    padding = np.array((start, delta - start)).T.astype(int)

    return pad(x, padding, axis, padding_values=padding_values, num_threads=num_threads, backend=backend)

imops.pad.pad_to_divisible(x, divisor, axis=None, padding_values=0, ratio=0.5, remainder=0, num_threads=_NUMERIC_DEFAULT_NUM_THREADS, backend=None)

Pad x to be divisible by divisor along the axis.

Parameters:

Name Type Description Default
x ndarray

n-dimensional array to pad

required
divisor AxesLike

float or sequence of floats an incoming array shape will be divisible by

required
axis AxesLike

axis along which the array will be padded. If None - the last len(divisor) axes are used

None
padding_values Union[AxesParams, Callable]

values to pad with. If Callable (e.g. numpy.min) - padding_values(x) will be used

0
ratio AxesParams

float or sequence of floats describing what proportion of padding to apply on the left sides of padding axes. Remaining ratio of padding will be applied on the right sides

0.5
remainder AxesLike

x will be padded such that its shape gives the remainder remainder when divided by divisor

0
num_threads int

the number of threads to use for computation. Default = 4. If negative value passed cpu count + num_threads + 1 threads will be used

_NUMERIC_DEFAULT_NUM_THREADS
backend BackendLike

which backend to use. cython and scipy are available, cython is used by default

None

Returns:

Name Type Description
padded ndarray

padded array

Examples:

x  # array of shape [2, 3, 4]
padded = pad_to_divisible(x, 6)  # pad to shape [6, 6, 6]
padded = pad_to_divisible(x, [4, 3], axis=[0, 1], ratio=1)  # pad first 2 axes on the left, shape - [4, 3, 4]
padded = pad_to_divisible(x, 3, remainder=1)  # pad to shape [4, 4, 4]
Source code in imops/pad.py
def pad_to_divisible(
    x: np.ndarray,
    divisor: AxesLike,
    axis: AxesLike = None,
    padding_values: Union[AxesParams, Callable] = 0,
    ratio: AxesParams = 0.5,
    remainder: AxesLike = 0,
    num_threads: int = _NUMERIC_DEFAULT_NUM_THREADS,
    backend: BackendLike = None,
) -> np.ndarray:
    """
    Pad `x` to be divisible by `divisor` along the `axis`.

    Parameters
    ----------
    x: np.ndarray
        n-dimensional array to pad
    divisor: AxesLike
        float or sequence of floats an incoming array shape will be divisible by
    axis: AxesLike
        axis along which the array will be padded. If None - the last `len(divisor)` axes are used
    padding_values: Union[AxesParams, Callable]
        values to pad with. If Callable (e.g. `numpy.min`) - `padding_values(x)` will be used
    ratio: AxesParams
        float or sequence of floats describing what proportion of padding to apply on the left sides of padding axes.
        Remaining ratio of padding will be applied on the right sides
    remainder: AxesLike
        `x` will be padded such that its shape gives the remainder `remainder` when divided by `divisor`
    num_threads: int
        the number of threads to use for computation. Default = 4. If negative value passed
        cpu count + num_threads + 1 threads will be used
    backend: BackendLike
        which backend to use. `cython` and `scipy` are available, `cython` is used by default

    Returns
    -------
    padded: np.ndarray
        padded array

    Examples
    --------
    ```python
    x  # array of shape [2, 3, 4]
    padded = pad_to_divisible(x, 6)  # pad to shape [6, 6, 6]
    padded = pad_to_divisible(x, [4, 3], axis=[0, 1], ratio=1)  # pad first 2 axes on the left, shape - [4, 3, 4]
    padded = pad_to_divisible(x, 3, remainder=1)  # pad to shape [4, 4, 4]
    ```
    """
    x = np.asarray(x)
    axis = axis_from_dim(axis, x.ndim)
    divisor, remainder, ratio = broadcast_to_axis(axis, divisor, remainder, ratio)

    assert np.all(remainder >= 0)
    shape = np.maximum(np.array(x.shape)[list(axis)], remainder)

    return pad_to_shape(
        x, shape + (remainder - shape) % divisor, axis, padding_values, ratio, num_threads=num_threads, backend=backend
    )

imops.pad.restore_crop(x, box, shape, padding_values=0, num_threads=_NUMERIC_DEFAULT_NUM_THREADS, backend=None)

Pad x to match shape. The left padding is taken equal to box's start.

Parameters:

Name Type Description Default
x ndarray

n-dimensional array to pad

required
box ndarray

array of shape (2, x.ndim) describing crop boundaries

required
shape AxesLike

shape to restore crop to

required
padding_values Union[AxesParams, Callable]

values to pad with. If Callable (e.g. numpy.min) - padding_values(x) will be used

0
num_threads int

the number of threads to use for computation. Default = 4. If negative value passed cpu count + num_threads + 1 threads will be used

_NUMERIC_DEFAULT_NUM_THREADS
backend BackendLike

which backend to use. cython and scipy are available, cython is used by default

None

Returns:

Name Type Description
padded ndarray

padded array

Examples:

x  # array of shape [2, 3, 4]
padded = restore_crop(x, np.array([[0, 0, 0], [2, 3, 4]]), [4, 4, 4])  # pad to shape [4, 4, 4]
padded = restore_crop(x, np.array([[0, 0, 0], [1, 1, 1]]), [4, 4, 4])  # fail, box is inconsistent with an array
padded = restore_crop(x, np.array([[1, 2, 3], [3, 5, 7]]), [3, 5, 7])  # pad to shape [3, 5, 7]
Source code in imops/pad.py
def restore_crop(
    x: np.ndarray,
    box: np.ndarray,
    shape: AxesLike,
    padding_values: Union[AxesParams, Callable] = 0,
    num_threads: int = _NUMERIC_DEFAULT_NUM_THREADS,
    backend: BackendLike = None,
) -> np.ndarray:
    """
    Pad `x` to match `shape`. The left padding is taken equal to `box`'s start.

    Parameters
    ----------
    x: np.ndarray
        n-dimensional array to pad
    box: np.ndarray
        array of shape (2, x.ndim) describing crop boundaries
    shape: AxesLike
        shape to restore crop to
    padding_values: Union[AxesParams, Callable]
        values to pad with. If Callable (e.g. `numpy.min`) - `padding_values(x)` will be used
    num_threads: int
        the number of threads to use for computation. Default = 4. If negative value passed
        cpu count + num_threads + 1 threads will be used
    backend: BackendLike
        which backend to use. `cython` and `scipy` are available, `cython` is used by default

    Returns
    -------
    padded: np.ndarray
        padded array

    Examples
    --------
    ```python
    x  # array of shape [2, 3, 4]
    padded = restore_crop(x, np.array([[0, 0, 0], [2, 3, 4]]), [4, 4, 4])  # pad to shape [4, 4, 4]
    padded = restore_crop(x, np.array([[0, 0, 0], [1, 1, 1]]), [4, 4, 4])  # fail, box is inconsistent with an array
    padded = restore_crop(x, np.array([[1, 2, 3], [3, 5, 7]]), [3, 5, 7])  # pad to shape [3, 5, 7]
    ```
    """
    start, stop = np.asarray(box)

    assert len(shape) == x.ndim
    assert len(start) == len(stop) == x.ndim

    x = np.asarray(x)

    if (stop > shape).any() or (stop - start != x.shape).any():
        raise ValueError(
            f'The input array (of shape {x.shape}) was not obtained by cropping a '
            f'box {start, stop} from the shape {shape}.'
        )

    padding = np.array([start, shape - stop], dtype=int).T
    x = pad(x, padding, padding_values=padding_values, num_threads=num_threads, backend=backend)
    assert all(np.array(x.shape) == shape)

    return x

imops.zoom.zoom(x, scale_factor, axis=None, order=1, fill_value=0, num_threads=-1, backend=None)

Rescale x according to scale_factor along the axis.

Uses a fast parallelizable implementation for fp32-fp64 and bool-int16-32-64-uint8-16-32 if order == 0 inputs, ndim <= 4 and order = 0 or 1.

Parameters:

Name Type Description Default
x ndarray

n-dimensional array

required
scale_factor AxesParams

float or sequence of floats describing how to scale along axes

required
axis AxesLike

axis along which array will be scaled

None
order int

order of interpolation

1
fill_value Union[float, Callable]

value to fill past edges. If Callable (e.g. numpy.min) - fill_value(x) will be used

0
num_threads int

the number of threads to use for computation. Default = the cpu count. If negative value passed cpu count + num_threads + 1 threads will be used

-1
backend BackendLike

which backend to use. numba, cython and scipy are available, cython is used by default

None

Returns:

Name Type Description
zoomed ndarray

zoomed array

Examples:

zoomed = zoom(x, 2, axis=[0, 1])  # 3d array
zoomed = zoom(x, [1, 2, 3])  # different scales along each axes
zoomed = zoom(x.astype(int))  # will fall back to scipy's implementation because of int dtype
Source code in imops/zoom.py
def zoom(
    x: np.ndarray,
    scale_factor: AxesParams,
    axis: AxesLike = None,
    order: int = 1,
    fill_value: Union[float, Callable] = 0,
    num_threads: int = -1,
    backend: BackendLike = None,
) -> np.ndarray:
    """
    Rescale `x` according to `scale_factor` along the `axis`.

    Uses a fast parallelizable implementation for fp32-fp64 and bool-int16-32-64-uint8-16-32 if order == 0 inputs,
    ndim <= 4 and order = 0 or 1.

    Parameters
    ----------
    x: np.ndarray
        n-dimensional array
    scale_factor: AxesParams
        float or sequence of floats describing how to scale along axes
    axis: AxesLike
        axis along which array will be scaled
    order: int
        order of interpolation
    fill_value: float | Callable
        value to fill past edges. If Callable (e.g. `numpy.min`) - `fill_value(x)` will be used
    num_threads: int
        the number of threads to use for computation. Default = the cpu count. If negative value passed
        cpu count + num_threads + 1 threads will be used
    backend: BackendLike
        which backend to use. `numba`, `cython` and `scipy` are available, `cython` is used by default

    Returns
    -------
    zoomed: np.ndarray
        zoomed array

    Examples
    --------
    ```python
    zoomed = zoom(x, 2, axis=[0, 1])  # 3d array
    zoomed = zoom(x, [1, 2, 3])  # different scales along each axes
    zoomed = zoom(x.astype(int))  # will fall back to scipy's implementation because of int dtype
    ```
    """
    x = np.asarray(x)
    axis, scale_factor = broadcast_axis(axis, x.ndim, scale_factor)
    scale_factor = fill_by_indices(np.ones(x.ndim, 'float64'), scale_factor, axis)

    if callable(fill_value):
        fill_value = fill_value(x)

    # TODO: does `fill_value/cval` change anythng?
    return _zoom(x, scale_factor, order=order, cval=fill_value, num_threads=num_threads, backend=backend)

imops.zoom.zoom_to_shape(x, shape, axis=None, order=1, fill_value=0, num_threads=-1, backend=None)

Rescale x to match shape along the axis.

Uses a fast parallelizable implementation for fp32-fp64 and bool-int16-32-64-uint8-16-32 if order == 0 inputs, ndim <= 4 and order = 0 or 1.

Parameters:

Name Type Description Default
x ndarray

n-dimensional array

required
shape AxesLike

float or sequence of floats describing desired lengths along axes

required
axis AxesLike

axis along which array will be scaled

None
order int

order of interpolation

1
fill_value Union[float, Callable]

value to fill past edges. If Callable (e.g. numpy.min) - fill_value(x) will be used

0
num_threads int

the number of threads to use for computation. Default = the cpu count. If negative value passed cpu count + num_threads + 1 threads will be used

-1
backend BackendLike

which backend to use. numba, cython and scipy are available, cython is used by default

None

Returns:

Name Type Description
zoomed ndarray

zoomed array

Examples:

zoomed = zoom_to_shape(x, [3, 4, 5])  # 3d array
zoomed = zoom_to_shape(x, [6, 7], axis=[1, 2])  # zoom to shape along specified axes
zoomed = zoom_to_shape(x.astype(int))  # will fall back to scipy's implementation because of int dtype
Source code in imops/zoom.py
def zoom_to_shape(
    x: np.ndarray,
    shape: AxesLike,
    axis: AxesLike = None,
    order: int = 1,
    fill_value: Union[float, Callable] = 0,
    num_threads: int = -1,
    backend: BackendLike = None,
) -> np.ndarray:
    """
    Rescale `x` to match `shape` along the `axis`.

    Uses a fast parallelizable implementation for fp32-fp64 and bool-int16-32-64-uint8-16-32 if order == 0 inputs,
    ndim <= 4 and order = 0 or 1.

    Parameters
    ----------
    x: np.ndarray
        n-dimensional array
    shape: AxesLike
        float or sequence of floats describing desired lengths along axes
    axis: AxesLike
        axis along which array will be scaled
    order: int
        order of interpolation
    fill_value: float | Callable
        value to fill past edges. If Callable (e.g. `numpy.min`) - `fill_value(x)` will be used
    num_threads: int
        the number of threads to use for computation. Default = the cpu count. If negative value passed
        cpu count + num_threads + 1 threads will be used
    backend: BackendLike
        which backend to use. `numba`, `cython` and `scipy` are available, `cython` is used by default

    Returns
    -------
    zoomed: np.ndarray
        zoomed array

    Examples
    --------
    ```python
    zoomed = zoom_to_shape(x, [3, 4, 5])  # 3d array
    zoomed = zoom_to_shape(x, [6, 7], axis=[1, 2])  # zoom to shape along specified axes
    zoomed = zoom_to_shape(x.astype(int))  # will fall back to scipy's implementation because of int dtype
    ```
    """
    x = np.asarray(x)
    axis, shape = broadcast_axis(axis, x.ndim, shape)
    old_shape = np.array(x.shape, 'float64')
    new_shape = np.array(fill_by_indices(x.shape, shape, axis), 'float64')

    return zoom(
        x,
        new_shape / old_shape,
        range(x.ndim),
        order=order,
        fill_value=fill_value,
        num_threads=num_threads,
        backend=backend,
    )

imops.interp1d.interp1d

Faster parallelizable version of scipy.interpolate.interp1d for fp32 / fp64 inputs.

Works faster only for ndim <= 3. Shares interface with scipy.interpolate.interp1d except for num_threads and backend arguments.

Parameters:

Name Type Description Default
x ndarray

1-dimensional array of real values (aka coordinates)

required
y ndarray

n-dimensional array of real values. The length of y along the interpolation axis must be equal to the x length

required
kind Union[int, str]

specifies the kind of interpolation as a string or as an integer specifying the order of interpolation to use. Only kind=1 and 'linearare fast and parallelizable, other kinds will force to usescipy` implementation

'linear'
axis int

specifies the axis of y along which to interpolate. Interpolation defaults to the last axis of y

-1
copy bool

if True, the class makes internal copies of x and y. If False, references to x and y are used

True
bounds_error bool

if True, a ValueError is raised any time interpolation is attempted on a value outside of the range of x where extrapolation is necessary. If False, out of bounds values are assigned fill_value. By default, an error is raised unless fill_value='extrapolate'

None
fill_value Union[float, str]

if a float, this value will be used to fill in for requested points outside of the data range. If not provided, then the default is NaN. If 'extrapolate', values for points outside of the data range will be extrapolated

nan
assume_sorted bool

if False, values of x can be in any order and they are sorted first. If True, x has to be an array of monotonically increasing values

False
num_threads int

the number of threads to use for computation. Default = the cpu count. If negative value passed cpu count + num_threads + 1 threads will be used

-1
backend BackendLike

which backend to use. numba, cython and scipy are available, cython is used by default

None

Methods:

Name Description
__call__

Examples:

import numpy as np
from imops.interp1d import interp1d
x = np.arange(0, 10)
y = np.exp(-x/3.0)
f = interp1d(x, y)
xnew = np.arange(0, 9, 0.1)
ynew = f(xnew)   # use interpolation function returned by `interp1d`
Source code in imops/interp1d.py
class interp1d:
    """
    Faster parallelizable version of `scipy.interpolate.interp1d` for fp32 / fp64 inputs.

    Works faster only for ndim <= 3. Shares interface with `scipy.interpolate.interp1d` except for `num_threads` and
    `backend` arguments.

    Parameters
    ----------
    x: np.ndarray
        1-dimensional array of real values (aka coordinates)
    y: np.ndarray
        n-dimensional array of real values. The length of y along the interpolation axis must be equal to the x length
    kind: Union[int, str]
        specifies the kind of interpolation as a string or as an integer specifying the order of interpolation to use.
        Only kind=1 and 'linear` are fast and parallelizable, other kinds will force to use `scipy` implementation
    axis: int
        specifies the axis of y along which to interpolate. Interpolation defaults to the last axis of y
    copy: bool
        if True, the class makes internal copies of x and y. If False, references to x and y are used
    bounds_error: bool
        if True, a ValueError is raised any time interpolation is attempted on a value outside of the range of x where
        extrapolation is necessary. If False, out of bounds values are assigned fill_value. By default, an error is
        raised unless fill_value='extrapolate'
    fill_value: Union[float, str]
        if a float, this value will be used to fill in for requested points outside of the data range. If not provided,
        then the default is NaN. If 'extrapolate', values for points outside of the data range will be extrapolated
    assume_sorted: bool
        if False, values of x can be in any order and they are sorted first. If True, x has to be an array of
        monotonically increasing values
    num_threads: int
        the number of threads to use for computation. Default = the cpu count. If negative value passed
        cpu count + num_threads + 1 threads will be used
    backend: BackendLike
        which backend to use. `numba`, `cython` and `scipy` are available, `cython` is used by default

    Methods
    -------
    __call__

    Examples
    --------
    ```python
    import numpy as np
    from imops.interp1d import interp1d
    x = np.arange(0, 10)
    y = np.exp(-x/3.0)
    f = interp1d(x, y)
    xnew = np.arange(0, 9, 0.1)
    ynew = f(xnew)   # use interpolation function returned by `interp1d`
    ```
    """

    def __init__(
        self,
        x: np.ndarray,
        y: np.ndarray,
        kind: Union[int, str] = 'linear',
        axis: int = -1,
        copy: bool = True,
        bounds_error: bool = None,
        fill_value: Union[float, str] = np.nan,
        assume_sorted: bool = False,
        num_threads: int = -1,
        backend: BackendLike = None,
    ) -> None:
        backend = resolve_backend(backend, warn_stacklevel=3)
        if backend.name not in ('Scipy', 'Numba', 'Cython'):
            raise ValueError(f'Unsupported backend "{backend.name}".')

        self.backend = backend
        self.dtype = y.dtype
        self.num_threads = num_threads

        if backend.name == 'Scipy':
            self.scipy_interp1d = scipy_interp1d(x, y, kind, axis, copy, bounds_error, fill_value, assume_sorted)
        elif self.dtype not in (np.float32, np.float64) or y.ndim > 3 or kind not in ('linear', 1):
            warn(
                "Fast interpolation is only supported for ndim<=3, dtype=float32 or float64, order=1 or 'linear'. "
                "Falling back to scipy's implementation.",
                stacklevel=2,
            )
            self.scipy_interp1d = scipy_interp1d(x, y, kind, axis, copy, bounds_error, fill_value, assume_sorted)
        else:
            if len(x) != y.shape[axis]:
                raise ValueError(
                    f'x and y arrays must be equal in length along interpolation axis: {len(x)} vs {y.shape[axis]}.'
                )

            if bounds_error and fill_value == 'extrapolate':
                raise ValueError('Cannot extrapolate and raise at the same time.')

            if fill_value == 'extrapolate' and len(x) < 2 or y.shape[axis] < 2:
                raise ValueError('x and y arrays must have at least 2 entries.')

            if fill_value == 'extrapolate':
                self.bounds_error = False
            else:
                self.bounds_error = True if bounds_error is None else bounds_error

            self.axis = axis

            if axis not in (-1, y.ndim - 1):
                y = np.swapaxes(y, -1, axis)

            self.fill_value = fill_value
            self.scipy_interp1d = None
            # FIXME: how to accurately pass `num_threads` and `backend` arguments to `copy`?
            self.x = _copy(x, order='C') if copy else x
            self.n_dummy = 3 - y.ndim
            self.y = y[(None,) * self.n_dummy] if self.n_dummy else y

            if copy:
                self.y = _copy(self.y, order='C')

            self.assume_sorted = assume_sorted

            if backend.name == 'Cython':
                self.src_interp1d = cython_fast_interp1d if backend.fast else cython_interp1d

            if backend.name == 'Numba':
                from numba import njit

                from .src._numba_zoom import _interp1d as numba_interp1d

                njit_kwargs = {kwarg: getattr(backend, kwarg) for kwarg in backend.__dataclass_fields__.keys()}
                self.src_interp1d = njit(**njit_kwargs)(numba_interp1d)

    def __call__(self, x_new: np.ndarray) -> np.ndarray:
        """
        Evaluate the interpolant

        Parameters
        ----------
        x_new: np.ndarray
            1d array points to evaluate the interpolant at.

        Returns
        -------
        y_new: np.ndarray
            interpolated values. Shape is determined by replacing the interpolation axis in the original array with
            the shape of x
        """
        num_threads = normalize_num_threads(self.num_threads, self.backend, warn_stacklevel=3)

        if self.scipy_interp1d is not None:
            return self.scipy_interp1d(x_new)

        extrapolate = self.fill_value == 'extrapolate'
        args = () if self.backend.name in ('Numba',) else (num_threads,)

        if self.backend.name == 'Numba':
            from numba import get_num_threads, set_num_threads

            old_num_threads = get_num_threads()
            set_num_threads(num_threads)
        # TODO: Figure out how to properly handle multiple type signatures in Cython and remove `.astype`-s
        out = self.src_interp1d(
            self.y,
            self.x.astype(np.float64, copy=False),
            x_new.astype(np.float64, copy=False),
            self.bounds_error,
            0.0 if extrapolate else self.fill_value,
            extrapolate,
            self.assume_sorted,
            *args,
        )

        if self.backend.name == 'Numba':
            set_num_threads(old_num_threads)

        out = out.astype(max(self.y.dtype, self.x.dtype, x_new.dtype, key=lambda x: x.type(0).itemsize), copy=False)

        if self.n_dummy:
            out = out[(0,) * self.n_dummy]
        if self.axis not in (-1, out.ndim - 1):
            out = np.swapaxes(out, -1, self.axis)
        # FIXME: fix behaviour with np.inf-s
        if np.isnan(out).any():
            if not np.isinf(out).any():
                raise RuntimeError("Can't decide how to handle nans in the output.")

            have_neg = np.isneginf(out).any()
            have_pos = np.isposinf(out).any()

            if have_pos and have_neg:
                raise RuntimeError("Can't decide how to handle nans in the output.")

            if have_pos:
                return np.nan_to_num(out, copy=False, nan=np.inf, posinf=np.inf)

            return np.nan_to_num(out, copy=False, nan=-np.inf, neginf=-np.inf)

        return out

__call__(x_new)

Evaluate the interpolant

Parameters:

Name Type Description Default
x_new ndarray

1d array points to evaluate the interpolant at.

required

Returns:

Name Type Description
y_new ndarray

interpolated values. Shape is determined by replacing the interpolation axis in the original array with the shape of x

Source code in imops/interp1d.py
def __call__(self, x_new: np.ndarray) -> np.ndarray:
    """
    Evaluate the interpolant

    Parameters
    ----------
    x_new: np.ndarray
        1d array points to evaluate the interpolant at.

    Returns
    -------
    y_new: np.ndarray
        interpolated values. Shape is determined by replacing the interpolation axis in the original array with
        the shape of x
    """
    num_threads = normalize_num_threads(self.num_threads, self.backend, warn_stacklevel=3)

    if self.scipy_interp1d is not None:
        return self.scipy_interp1d(x_new)

    extrapolate = self.fill_value == 'extrapolate'
    args = () if self.backend.name in ('Numba',) else (num_threads,)

    if self.backend.name == 'Numba':
        from numba import get_num_threads, set_num_threads

        old_num_threads = get_num_threads()
        set_num_threads(num_threads)
    # TODO: Figure out how to properly handle multiple type signatures in Cython and remove `.astype`-s
    out = self.src_interp1d(
        self.y,
        self.x.astype(np.float64, copy=False),
        x_new.astype(np.float64, copy=False),
        self.bounds_error,
        0.0 if extrapolate else self.fill_value,
        extrapolate,
        self.assume_sorted,
        *args,
    )

    if self.backend.name == 'Numba':
        set_num_threads(old_num_threads)

    out = out.astype(max(self.y.dtype, self.x.dtype, x_new.dtype, key=lambda x: x.type(0).itemsize), copy=False)

    if self.n_dummy:
        out = out[(0,) * self.n_dummy]
    if self.axis not in (-1, out.ndim - 1):
        out = np.swapaxes(out, -1, self.axis)
    # FIXME: fix behaviour with np.inf-s
    if np.isnan(out).any():
        if not np.isinf(out).any():
            raise RuntimeError("Can't decide how to handle nans in the output.")

        have_neg = np.isneginf(out).any()
        have_pos = np.isposinf(out).any()

        if have_pos and have_neg:
            raise RuntimeError("Can't decide how to handle nans in the output.")

        if have_pos:
            return np.nan_to_num(out, copy=False, nan=np.inf, posinf=np.inf)

        return np.nan_to_num(out, copy=False, nan=-np.inf, neginf=-np.inf)

    return out

imops.interp2d.Linear2DInterpolator

Bases: Linear2DInterpolatorCpp

2D Delaunay triangulation and parallel linear interpolation

Parameters:

Name Type Description Default
points ndarray

2-D array of data point coordinates

required
values ndarray

1-D array of fp32/fp64 values

None
num_threads int

the number of threads to use for computation. Default = the cpu count. If negative value passed cpu count + num_threads + 1 threads will be used

1
triangels

optional precomputed triangulation in the form of array or arrays of points indices

required

Methods:

Name Description
__call__

Examples:

n, m = 1024, 2
points = np.random.randint(low=0, high=1024, size=(n, m))
points = np.unique(points, axis=0)
x_points = points[: n // 2]
values = np.random.uniform(low=0.0, high=1.0, size=(len(x_points),))
interp_points = points[n // 2:]
num_threads = -1  # will be equal to num of CPU cores
interpolator = Linear2DInterpolator(x_points, values, num_threads)
# Also you can pass values to __call__ and rewrite the ones that were passed to __init__
interp_values = interpolator(interp_points, values + 1.0, fill_value=0.0)
Source code in imops/interp2d.py
class Linear2DInterpolator(Linear2DInterpolatorCpp):
    """
    2D Delaunay triangulation and parallel linear interpolation

    Parameters
    ----------
    points: np.ndarray
        2-D array of data point coordinates
    values: np.ndarray
        1-D array of fp32/fp64 values
    num_threads: int
        the number of threads to use for computation. Default = the cpu count. If negative value passed
        cpu count + num_threads + 1 threads will be used
    triangels: np.ndarray
        optional precomputed triangulation in the form of array or arrays of points indices

    Methods
    -------
    __call__

    Examples
    --------
    ```python
    n, m = 1024, 2
    points = np.random.randint(low=0, high=1024, size=(n, m))
    points = np.unique(points, axis=0)
    x_points = points[: n // 2]
    values = np.random.uniform(low=0.0, high=1.0, size=(len(x_points),))
    interp_points = points[n // 2:]
    num_threads = -1  # will be equal to num of CPU cores
    interpolator = Linear2DInterpolator(x_points, values, num_threads)
    # Also you can pass values to __call__ and rewrite the ones that were passed to __init__
    interp_values = interpolator(interp_points, values + 1.0, fill_value=0.0)
    ```
    """

    def __init__(
        self,
        points: np.ndarray,
        values: np.ndarray = None,
        num_threads: int = 1,
        triangles: np.ndarray = None,
        **kwargs,
    ):
        if triangles is not None:
            if not isinstance(triangles, np.ndarray):
                raise TypeError(f'Wrong type of `triangles` argument, expected np.ndarray. Got {type(triangles)}')
            if triangles.ndim != 2 or triangles.shape[1] != 3:
                raise ValueError('Passed `triangles` argument has an incorrect shape')

        if not isinstance(points, np.ndarray):
            raise TypeError(f'Wrong type of `points` argument, expected np.ndarray. Got {type(points)}')

        if points.ndim != 2 or points.shape[1] != 2:
            raise ValueError('Passed `points` argument has an incorrect shape')

        if values is not None:
            if not isinstance(values, np.ndarray):
                raise TypeError(f'Wrong type of `values` argument, expected np.ndarray. Got {type(values)}')

            if values.ndim > 1:
                raise ValueError(f'Wrong shape of `values` argument, expected ndim=1. Got shape {values.shape}')

        super().__init__(points, num_threads, triangles)
        self.kdtree = KDTree(data=points, **kwargs)
        self.values = values
        # FIXME: add backend dispatch
        self.num_threads = normalize_num_threads(num_threads, Cython(), warn_stacklevel=3)

    def __call__(self, points: np.ndarray, values: np.ndarray = None, fill_value: float = 0.0) -> np.ndarray:
        """
        Evaluate the interpolant

        Parameters
        ----------
        points: np.ndarray
            2-D array of data point coordinates to interpolate at
        values: np.ndarray
            1-D array of fp32/fp64 values to use at initial points
        fill_value: float
            value to fill past edges

        Returns
        -------
        new_values: np.ndarray
            interpolated values at given points
        """
        if values is None:
            values = self.values

        if values is None:
            raise ValueError('`values` argument was never passed neither in __init__ or __call__ methods')

        if not isinstance(values, np.ndarray):
            raise TypeError(f'Wrong type of `values` argument, expected np.ndarray. Got {type(values)}')

        if values.ndim > 1:
            raise ValueError(f'Wrong shape of `values` argument, expected ndim=1. Got shape {values.shape}')

        if not isinstance(points, np.ndarray):
            raise TypeError(f'Wrong type of `points` argument, expected np.ndarray. Got {type(points)}')

        if points.ndim != 2 or points.shape[1] != 2:
            raise ValueError('Passed `points` argument has an incorrect shape')

        _, neighbors = self.kdtree.query(
            points, 1, **{'workers': self.num_threads} if python_version()[:3] != '3.6' else {}
        )

        return super().__call__(points, values, neighbors, fill_value)

__call__(points, values=None, fill_value=0.0)

Evaluate the interpolant

Parameters:

Name Type Description Default
points ndarray

2-D array of data point coordinates to interpolate at

required
values ndarray

1-D array of fp32/fp64 values to use at initial points

None
fill_value float

value to fill past edges

0.0

Returns:

Name Type Description
new_values ndarray

interpolated values at given points

Source code in imops/interp2d.py
def __call__(self, points: np.ndarray, values: np.ndarray = None, fill_value: float = 0.0) -> np.ndarray:
    """
    Evaluate the interpolant

    Parameters
    ----------
    points: np.ndarray
        2-D array of data point coordinates to interpolate at
    values: np.ndarray
        1-D array of fp32/fp64 values to use at initial points
    fill_value: float
        value to fill past edges

    Returns
    -------
    new_values: np.ndarray
        interpolated values at given points
    """
    if values is None:
        values = self.values

    if values is None:
        raise ValueError('`values` argument was never passed neither in __init__ or __call__ methods')

    if not isinstance(values, np.ndarray):
        raise TypeError(f'Wrong type of `values` argument, expected np.ndarray. Got {type(values)}')

    if values.ndim > 1:
        raise ValueError(f'Wrong shape of `values` argument, expected ndim=1. Got shape {values.shape}')

    if not isinstance(points, np.ndarray):
        raise TypeError(f'Wrong type of `points` argument, expected np.ndarray. Got {type(points)}')

    if points.ndim != 2 or points.shape[1] != 2:
        raise ValueError('Passed `points` argument has an incorrect shape')

    _, neighbors = self.kdtree.query(
        points, 1, **{'workers': self.num_threads} if python_version()[:3] != '3.6' else {}
    )

    return super().__call__(points, values, neighbors, fill_value)

imops.morphology.binary_dilation(image, footprint=None, output=None, boxed=False, num_threads=-1, backend=None)

Fast parallelizable binary morphological dilation of an image

Parameters:

Name Type Description Default
image ndarray

input image

required
footprint ndarray

the neighborhood expressed as a n-D array of 1's and 0's. If None, use a cross-shaped footprint (connectivity=1)

None
output ndarray

array of the same shape as input, into which the output is placed (must be C-contiguous). By default, a new array is created

None
boxed bool

if True, dilation is performed on cropped image which may speed up computation depedning on how localized True pixels are. This may induce differences with Scikit-Image implementation at border pixels if footprint is exotic (has even shape or center pixel is False)

False
num_threads int

the number of threads to use for computation. Default = the cpu count. If negative value passed cpu count + num_threads + 1 threads will be used

-1
backend BackendLike

which backend to use. cython and scipy are available, cython is used by default

None

Returns:

Name Type Description
dilated ndarray

the result of morphological dilation

Examples:

dilated = binary_dilation(x)
Source code in imops/morphology.py
def binary_dilation(
    image: np.ndarray,
    footprint: np.ndarray = None,
    output: np.ndarray = None,
    boxed: bool = False,
    num_threads: int = -1,
    backend: BackendLike = None,
) -> np.ndarray:
    """
    Fast parallelizable binary morphological dilation of an image

    Parameters
    ----------
    image: np.ndarray
        input image
    footprint: np.ndarray
        the neighborhood expressed as a n-D array of 1's and 0's. If None, use a cross-shaped footprint (connectivity=1)
    output: np.ndarray
        array of the same shape as input, into which the output is placed (must be C-contiguous). By default, a new
        array is created
    boxed: bool
        if True, dilation is performed on cropped image which may speed up computation depedning on how localized True
        pixels are. This may induce differences with Scikit-Image implementation at border pixels if footprint is
        exotic (has even shape or center pixel is False)
    num_threads: int
        the number of threads to use for computation. Default = the cpu count. If negative value passed
        cpu count + num_threads + 1 threads will be used
    backend: BackendLike
        which backend to use. `cython` and `scipy` are available, `cython` is used by default

    Returns
    -------
    dilated: np.ndarray
        the result of morphological dilation

    Examples
    --------
    ```python
    dilated = binary_dilation(x)
    ```
    """
    return _binary_dilation(image, footprint, output, boxed, num_threads, backend)

imops.morphology.binary_erosion(image, footprint=None, output=None, boxed=False, num_threads=-1, backend=None)

Fast parallelizable binary morphological erosion of an image

Parameters:

Name Type Description Default
image ndarray

input image

required
footprint ndarray

the neighborhood expressed as a n-D array of 1's and 0's. If None, use a cross-shaped footprint (connectivity=1)

None
output ndarray

array of the same shape as input, into which the output is placed (must be C-contiguous). By default, a new array is created

None
boxed bool

if True, erosion is performed on cropped image which may speed up computation depedning on how localized True pixels are. This may induce differences with Scikit-Image implementation at border pixels if footprint is exotic (has even shape or center pixel is False)

False
num_threads int

the number of threads to use for computation. Default = the cpu count. If negative value passed cpu count + num_threads + 1 threads will be used

-1
backend BackendLike

which backend to use. cython and scipy are available, cython is used by default

None

Returns:

Name Type Description
eroded ndarray

the result of morphological erosion

Examples:

eroded = binary_erosion(x)
Source code in imops/morphology.py
def binary_erosion(
    image: np.ndarray,
    footprint: np.ndarray = None,
    output: np.ndarray = None,
    boxed: bool = False,
    num_threads: int = -1,
    backend: BackendLike = None,
) -> np.ndarray:
    """
    Fast parallelizable binary morphological erosion of an image

    Parameters
    ----------
    image: np.ndarray
        input image
    footprint: np.ndarray
        the neighborhood expressed as a n-D array of 1's and 0's. If None, use a cross-shaped footprint (connectivity=1)
    output: np.ndarray
        array of the same shape as input, into which the output is placed (must be C-contiguous). By default, a new
        array is created
    boxed: bool
        if True, erosion is performed on cropped image which may speed up computation depedning on how localized True
        pixels are. This may induce differences with Scikit-Image implementation at border pixels if footprint is
        exotic (has even shape or center pixel is False)
    num_threads: int
        the number of threads to use for computation. Default = the cpu count. If negative value passed
        cpu count + num_threads + 1 threads will be used
    backend: BackendLike
        which backend to use. `cython` and `scipy` are available, `cython` is used by default

    Returns
    -------
    eroded: np.ndarray
        the result of morphological erosion

    Examples
    --------
    ```python
    eroded = binary_erosion(x)
    ```
    """
    return _binary_erosion(image, footprint, output, boxed, num_threads, backend)

imops.morphology.binary_opening(image, footprint=None, output=None, boxed=False, num_threads=-1, backend=None)

Fast parallelizable binary morphological opening of an image

Parameters:

Name Type Description Default
image ndarray

input image

required
footprint ndarray

the neighborhood expressed as a n-D array of 1's and 0's. If None, use a cross-shaped footprint (connectivity=1)

None
output ndarray

array of the same shape as input, into which the output is placed (must be C-contiguous). By default, a new array is created

None
boxed bool

if True, opening is performed on cropped image which may speed up computation depedning on how localized True pixels are. This may induce differences with Scikit-Image implementation at border pixels if footprint is exotic (has even shape or center pixel is False)

False
num_threads int

the number of threads to use for computation. Default = the cpu count. If negative value passed cpu count + num_threads + 1 threads will be used

-1
backend BackendLike

which backend to use. cython and scipy are available, cython is used by default

None

Returns:

Name Type Description
opened ndarray

the result of morphological opening

Examples:

opened = binary_opening(x)
Source code in imops/morphology.py
def binary_opening(
    image: np.ndarray,
    footprint: np.ndarray = None,
    output: np.ndarray = None,
    boxed: bool = False,
    num_threads: int = -1,
    backend: BackendLike = None,
) -> np.ndarray:
    """
    Fast parallelizable binary morphological opening of an image

    Parameters
    ----------
    image: np.ndarray
        input image
    footprint: np.ndarray
        the neighborhood expressed as a n-D array of 1's and 0's. If None, use a cross-shaped footprint (connectivity=1)
    output: np.ndarray
        array of the same shape as input, into which the output is placed (must be C-contiguous). By default, a new
        array is created
    boxed: bool
        if True, opening is performed on cropped image which may speed up computation depedning on how localized True
        pixels are. This may induce differences with Scikit-Image implementation at border pixels if footprint is
        exotic (has even shape or center pixel is False)
    num_threads: int
        the number of threads to use for computation. Default = the cpu count. If negative value passed
        cpu count + num_threads + 1 threads will be used
    backend: BackendLike
        which backend to use. `cython` and `scipy` are available, `cython` is used by default

    Returns
    -------
    opened: np.ndarray
        the result of morphological opening

    Examples
    --------
    ```python
    opened = binary_opening(x)
    ```
    """

    return _binary_opening(image, footprint, output, boxed, num_threads, backend)

imops.morphology.binary_closing(image, footprint=None, output=None, boxed=False, num_threads=-1, backend=None)

Fast parallelizable binary morphological closing of an image

Parameters:

Name Type Description Default
image ndarray

input image

required
footprint ndarray

the neighborhood expressed as a n-D array of 1's and 0's. If None, use a cross-shaped footprint (connectivity=1)

None
output ndarray

array of the same shape as input, into which the output is placed (must be C-contiguous). By default, a new array is created

None
boxed bool

if True, closing is performed on cropped image which may speed up computation depedning on how localized True pixels are. This may induce differences with Scikit-Image implementation at border pixels if footprint is exotic (has even shape or center pixel is False)

False
num_threads int

the number of threads to use for computation. Default = the cpu count. If negative value passed cpu count + num_threads + 1 threads will be used

-1
backend BackendLike

which backend to use. cython and scipy are available, cython is used by default

None

Returns:

Name Type Description
closed ndarray

the result of morphological closing

Examples:

closed = binary_closing(x)
Source code in imops/morphology.py
def binary_closing(
    image: np.ndarray,
    footprint: np.ndarray = None,
    output: np.ndarray = None,
    boxed: bool = False,
    num_threads: int = -1,
    backend: BackendLike = None,
) -> np.ndarray:
    """
    Fast parallelizable binary morphological closing of an image

    Parameters
    ----------
    image: np.ndarray
        input image
    footprint: np.ndarray
        the neighborhood expressed as a n-D array of 1's and 0's. If None, use a cross-shaped footprint (connectivity=1)
    output: np.ndarray
        array of the same shape as input, into which the output is placed (must be C-contiguous). By default, a new
        array is created
    boxed: bool
        if True, closing is performed on cropped image which may speed up computation depedning on how localized True
        pixels are. This may induce differences with Scikit-Image implementation at border pixels if footprint is
        exotic (has even shape or center pixel is False)
    num_threads: int
        the number of threads to use for computation. Default = the cpu count. If negative value passed
        cpu count + num_threads + 1 threads will be used
    backend: BackendLike
        which backend to use. `cython` and `scipy` are available, `cython` is used by default

    Returns
    -------
    closed: np.ndarray
        the result of morphological closing

    Examples
    --------
    ```python
    closed = binary_closing(x)
    ```
    """

    return _binary_closing(image, footprint, output, boxed, num_threads, backend)

imops.morphology.distance_transform_edt(image, sampling=None, return_distances=True, return_indices=False, num_threads=-1, backend=None)

Fast parallelizable Euclidean distance transform for <= 3D inputs

This function calculates the distance transform of the image, by replacing each foreground (non-zero) element, with its shortest distance to the background (any zero-valued element).

In addition to the distance transform, the feature transform can be calculated. In this case the index of the closest background element to each foreground element is returned in a separate array.

Parameters:

Name Type Description Default
image array_like

input data to transform. Can be any type but will be converted into binary: 1 wherever input equates to True, 0 elsewhere

required
sampling tuple of `image.ndim` floats

spacing of elements along each dimension. If a sequence, must be of length equal to the input rank; if a single number, this is used for all axes. If not specified, a grid spacing of unity is implied

None
return_distances bool

whether to calculate the distance transform. Default is True

True
return_indices bool

whether to calculate the feature transform. Default is False

False
num_threads int

the number of threads to use for computation. Default = the cpu count. If negative value passed cpu count + num_threads + 1 threads will be used

-1
backend BackendLike

which backend to use. cython and scipy are available, cython is used by default

None

Returns:

Name Type Description
distances float32 ndarray, optional

the calculated distance transform. Returned only when return_distances is True and distances is not supplied. It will have the same shape as the input array

indices int32 ndarray, optional

the calculated feature transform. It has an input-shaped array for each dimension of the input. See example below. Returned only when return_indices is True and indices is not supplied

Notes

The Euclidean distance transform gives values of the Euclidean distance::

            n

y_i = sqrt(sum (x[i]-b[i])**2) i

where b[i] is the background point (value 0) with the smallest Euclidean distance to input points x[i], and n is the number of dimensions.

Examples:

import numpy as np a = np.array(([0,1,1,1,1], [0,0,1,1,1], [0,1,1,1,1], [0,1,1,1,0], [0,1,1,0,0])) distance_transform_edt(a) array([[ 0. , 1. , 1.4142, 2.2361, 3. ], [ 0. , 0. , 1. , 2. , 2. ], [ 0. , 1. , 1.4142, 1.4142, 1. ], [ 0. , 1. , 1.4142, 1. , 0. ], [ 0. , 1. , 1. , 0. , 0. ]])

With a sampling of 2 units along x, 1 along y:

distance_transform_edt(a, sampling=[2, 1]) array([[ 0. , 1. , 2. , 2.8284, 3.6056], [ 0. , 0. , 1. , 2. , 3. ], [ 0. , 1. , 2. , 2.2361, 2. ], [ 0. , 1. , 2. , 1. , 0. ], [ 0. , 1. , 1. , 0. , 0. ]])

Asking for indices as well:

edt, inds = distance_transform_edt(a, return_indices=True) inds array([[[0, 0, 1, 1, 3], [1, 1, 1, 1, 3], [2, 2, 1, 3, 3], [3, 3, 4, 4, 3], [4, 4, 4, 4, 4]], [[0, 0, 1, 1, 4], [0, 1, 1, 1, 4], [0, 0, 1, 4, 4], [0, 0, 3, 3, 4], [0, 0, 3, 3, 4]]])

Source code in imops/morphology.py
def distance_transform_edt(
    image: np.ndarray,
    sampling: Tuple[float] = None,
    return_distances: bool = True,
    return_indices: bool = False,
    num_threads: int = -1,
    backend: BackendLike = None,
) -> Union[np.ndarray, Tuple[np.ndarray]]:
    """
    Fast parallelizable Euclidean distance transform for <= 3D inputs

    This function calculates the distance transform of the `image`, by
    replacing each foreground (non-zero) element, with its
    shortest distance to the background (any zero-valued element).

    In addition to the distance transform, the feature transform can
    be calculated. In this case the index of the closest background
    element to each foreground element is returned in a separate array.

    Parameters
    ----------
    image : array_like
        input data to transform. Can be any type but will be converted
        into binary: 1 wherever input equates to True, 0 elsewhere
    sampling : tuple of `image.ndim` floats, optional
        spacing of elements along each dimension. If a sequence, must be of
        length equal to the input rank; if a single number, this is used for
        all axes. If not specified, a grid spacing of unity is implied
    return_distances : bool, optional
        whether to calculate the distance transform.
        Default is True
    return_indices : bool, optional
        whether to calculate the feature transform.
        Default is False
    num_threads: int
        the number of threads to use for computation. Default = the cpu count. If negative value passed
        cpu count + num_threads + 1 threads will be used
    backend: BackendLike
        which backend to use. `cython` and `scipy` are available, `cython` is used by default

    Returns
    -------
    distances : float32 ndarray, optional
        the calculated distance transform. Returned only when
        `return_distances` is True and `distances` is not supplied.
        It will have the same shape as the input array
    indices : int32 ndarray, optional
        the calculated feature transform. It has an input-shaped array for each
        dimension of the input. See example below.
        Returned only when `return_indices` is True and `indices` is not
        supplied

    Notes
    -----
    The Euclidean distance transform gives values of the Euclidean
    distance::

                    n
      y_i = sqrt(sum (x[i]-b[i])**2)
                    i

    where b[i] is the background point (value 0) with the smallest
    Euclidean distance to input points x[i], and n is the
    number of dimensions.

    Examples
    --------
    import numpy as np
    a = np.array(([0,1,1,1,1],
                  [0,0,1,1,1],
                  [0,1,1,1,1],
                  [0,1,1,1,0],
                  [0,1,1,0,0]))
    distance_transform_edt(a)
    array([[ 0.    ,  1.    ,  1.4142,  2.2361,  3.    ],
           [ 0.    ,  0.    ,  1.    ,  2.    ,  2.    ],
           [ 0.    ,  1.    ,  1.4142,  1.4142,  1.    ],
           [ 0.    ,  1.    ,  1.4142,  1.    ,  0.    ],
           [ 0.    ,  1.    ,  1.    ,  0.    ,  0.    ]])

    With a sampling of 2 units along x, 1 along y:

    distance_transform_edt(a, sampling=[2, 1])
    array([[ 0.    ,  1.    ,  2.    ,  2.8284,  3.6056],
           [ 0.    ,  0.    ,  1.    ,  2.    ,  3.    ],
           [ 0.    ,  1.    ,  2.    ,  2.2361,  2.    ],
           [ 0.    ,  1.    ,  2.    ,  1.    ,  0.    ],
           [ 0.    ,  1.    ,  1.    ,  0.    ,  0.    ]])

    Asking for indices as well:

    edt, inds = distance_transform_edt(a, return_indices=True)
    inds
    array([[[0, 0, 1, 1, 3],
            [1, 1, 1, 1, 3],
            [2, 2, 1, 3, 3],
            [3, 3, 4, 4, 3],
            [4, 4, 4, 4, 4]],
           [[0, 0, 1, 1, 4],
            [0, 1, 1, 1, 4],
            [0, 0, 1, 4, 4],
            [0, 0, 3, 3, 4],
            [0, 0, 3, 3, 4]]])
    """
    backend = resolve_backend(backend, warn_stacklevel=3)
    if backend.name not in ('Scipy', 'Cython'):
        raise ValueError(f'Unsupported backend "{backend.name}".')

    num_threads = normalize_num_threads(num_threads, backend, warn_stacklevel=3)

    if backend.name == 'Scipy':
        return scipy_distance_transform_edt(image, sampling, return_distances, return_indices)

    if image.ndim > 3:
        warn("Fast Euclidean Distance Transform is only supported for ndim<=3. Falling back to scipy's implementation.")
        return scipy_distance_transform_edt(image, sampling, return_distances, return_indices)

    if (not return_distances) and (not return_indices):
        raise RuntimeError('At least one of `return_distances`/`return_indices` must be True')
    if image.dtype != bool:
        image = np.atleast_1d(np.where(image, 1, 0))
    if sampling is not None:
        sampling = _ni_support._normalize_sequence(sampling, image.ndim)
        sampling = np.asarray(sampling, dtype=np.float64)
        if not sampling.flags.contiguous:
            sampling = sampling.copy()

    if return_indices:
        ft = np.zeros((image.ndim,) + image.shape, dtype=np.int32)
        euclidean_feature_transform(image, sampling, ft)

    if return_distances:
        if sampling is not None:
            dt = edt(image, anisotropy=sampling.astype(np.float32), parallel=num_threads)
        else:
            dt = edt(image, parallel=num_threads)

    result = []
    if return_distances:
        result.append(dt)
    if return_indices:
        result.append(ft)

    if len(result) == 2:
        return tuple(result)

    if len(result) == 1:
        return result[0]

    return None

imops.measure.label(label_image, background=None, connectivity=None, return_num=False, return_labels=False, return_sizes=False, dtype=None)

Fast version of skimage.measure.label which optionally returns number of connected components, labels and sizes. If 2 or more outputs are requested NamedTuple is returned.

Parameters:

Name Type Description Default
label_image ndarray

image to label

required
background int

consider all pixels with this value as background pixels, and label them as 0. By default, 0-valued pixels are considered as background pixels

None
connectivity int

maximum number of orthogonal hops to consider a pixel/voxel as a neighbor. Accepted values are ranging from 1 to input.ndim. If None, a full connectivity of input.ndim is used

None
return_num bool

whether to return the number of connected components

False
return_labels bool

whether to return assigned labels

False
return_sizes bool

whether to return sizes of connected components (excluding background)

False
dtype type

if specified, must be one of np.uint16, np.uint32 or np.uint64. If not specified, it will be automatically determined. Most of the time, you should leave this off so that the smallest safe dtype will be used. However, in some applications you can save an up-conversion in the next operation by outputting the appropriately sized type instead. Has no effect for python3.6

None

Returns:

Name Type Description
labeled_image ndarray

array of np.uint16, np.uint32 or np.uint64 numbers depending on the number of connected components and dtype

num_components int

number of connected components excluding background. Returned if return_num is True

labels ndarray

components labels. Returned if return_labels is True

sizes ndarray

components sizes. Returned if return_sizes is True

Examples:

labeled = label(x)
labeled, num_components, sizes = label(x, return_num=True, return_sizes=True)
out = label(x, return_labels=True, return_sizes=True)
out.labeled_image, out.labels, out.sizes  # output fields can be accessed this way
Source code in imops/measure.py
def label(
    label_image: np.ndarray,
    background: int = None,
    connectivity: int = None,
    return_num: bool = False,
    return_labels: bool = False,
    return_sizes: bool = False,
    dtype: type = None,
) -> Union[np.ndarray, NamedTuple]:
    """
    Fast version of `skimage.measure.label` which optionally returns number of connected components, labels and sizes.
    If 2 or more outputs are requested `NamedTuple` is returned.

    Parameters
    ----------
    label_image: np.ndarray
        image to label
    background: int
        consider all pixels with this value as background pixels, and label them as 0. By default, 0-valued pixels are
        considered as background pixels
    connectivity: int
        maximum number of orthogonal hops to consider a pixel/voxel as a neighbor. Accepted values are ranging from 1
        to input.ndim. If None, a full connectivity of input.ndim is used
    return_num: bool
        whether to return the number of connected components
    return_labels: bool
        whether to return assigned labels
    return_sizes: bool
        whether to return sizes of connected components (excluding background)
    dtype:
        if specified, must be one of np.uint16, np.uint32 or np.uint64. If not specified, it will be automatically
        determined. Most of the time, you should leave this off so that the smallest safe dtype will be used. However,
        in some applications you can save an up-conversion in the next operation by outputting the appropriately sized
        type instead. Has no effect for python3.6

    Returns
    -------
    labeled_image: np.ndarray
        array of np.uint16, np.uint32 or np.uint64 numbers depending on the number of connected components and
        `dtype`
    num_components: int
        number of connected components excluding background. Returned if `return_num` is True
    labels: np.ndarray
        components labels. Returned if `return_labels` is True
    sizes: np.ndarray
        components sizes. Returned if `return_sizes` is True

    Examples
    --------
    ```python
    labeled = label(x)
    labeled, num_components, sizes = label(x, return_num=True, return_sizes=True)
    out = label(x, return_labels=True, return_sizes=True)
    out.labeled_image, out.labels, out.sizes  # output fields can be accessed this way
    ```
    """
    ndim = label_image.ndim
    connectivity = connectivity or ndim

    if not 1 <= connectivity <= ndim:
        raise ValueError(f'Connectivity for {ndim}D image should be in [1, ..., {ndim}]. Got {connectivity}.')

    if ndim > 3:
        warn("Fast label is only supported for ndim<=3, Falling back to scikit-image's implementation.", stacklevel=2)
        labeled_image, num_components = skimage_label(
            label_image, background=background, return_num=True, connectivity=connectivity
        )
        if dtype is not None:
            labeled_image = labeled_image.astype(dtype, copy=False)
    else:
        if ndim == 1:
            label_image = label_image[None]

        if background:
            label_image = remap(
                label_image,
                {background: 0, 0: background},
                preserve_missing_labels=True,
                in_place=False,
            )

        labeled_image, num_components = connected_components(
            label_image,
            connectivity=_SKIMAGE2CC3D[(ndim, connectivity)],
            return_N=True,
            **{'out_dtype': dtype} if python_version()[:3] != '3.6' else {},
        )

        if ndim == 1:
            labeled_image = labeled_image[0]

    res = [('labeled_image', labeled_image)]

    if return_num:
        res.append(('num_components', num_components))
    if return_labels:
        res.append(('labels', np.array(range(1, num_components + 1))))
    if return_sizes:
        _, sizes = unique(labeled_image, return_counts=True)
        res.append(('sizes', sizes[1:] if 0 in labeled_image else sizes))

    if len(res) == 1:
        return labeled_image

    return namedtuple('Labeling', [subres[0] for subres in res])(*[subres[1] for subres in res])

imops.measure.center_of_mass(array, labels=None, index=None, num_threads=-1, backend=None)

Calculate the center of mass of the values.

Works faster for ndim <= 3

Parameters:

Name Type Description Default
array ndarray

data from which to calculate center-of-mass. The masses can either be positive or negative

required
labels ndarray

labels for objects in input, as generated by imops.measure.label. Dimensions must be the same as input. If specified, index also must be specified and have same dtype

None
index Union[int, Sequence[int]]

labels for which to calculate centers-of-mass. If specified, labels also must be specified and have same dtype

None
num_threads int

the number of threads to use for computation. Default = the cpu count. If negative value passed cpu count + num_threads + 1 threads will be used. If labels and index are specified, only 1 thread will be used

-1
backend BackendLike

which backend to use. cython and scipy are available, cython is used by default

None

Returns:

Name Type Description
center_of_mass tuple, or list of tuples

coordinates of centers-of-mass

Examples:

center = center_of_mass(np.ones((2, 2)))  # (0.5, 0.5)
Source code in imops/measure.py
def center_of_mass(
    array: np.ndarray,
    labels: np.ndarray = None,
    index: Union[int, Sequence[int]] = None,
    num_threads: int = -1,
    backend: BackendLike = None,
) -> Union[Tuple[float, ...], List[Tuple[float, ...]]]:
    """
    Calculate the center of mass of the values.

    Works faster for ndim <= 3

    Parameters
    ----------
    array: np.ndarray
        data from which to calculate center-of-mass. The masses can either be positive or negative
    labels: np.ndarray
        labels for objects in input, as generated by `imops.measure.label`. Dimensions must be the same as input. If
        specified, `index` also must be specified and have same dtype
    index: Union[int, Sequence[int]]
        labels for which to calculate centers-of-mass. If specified, `labels` also must be specified and have same dtype
    num_threads: int
        the number of threads to use for computation. Default = the cpu count. If negative value passed
        cpu count + num_threads + 1 threads will be used. If `labels` and `index` are specified, only 1 thread will be
        used
    backend: BackendLike
        which backend to use. `cython` and `scipy` are available, `cython` is used by default

    Returns
    -------
    center_of_mass: tuple, or list of tuples
        coordinates of centers-of-mass

    Examples
    --------
    ```python
    center = center_of_mass(np.ones((2, 2)))  # (0.5, 0.5)
    ```
    """
    if (labels is None) ^ (index is None):
        raise ValueError('`labels` and `index` should be both specified or both not specified.')

    backend = resolve_backend(backend, warn_stacklevel=3)

    if backend.name not in ('Scipy', 'Cython'):
        raise ValueError(f'Unsupported backend "{backend.name}".')

    num_threads = normalize_num_threads(num_threads, backend, warn_stacklevel=3)

    if backend.name == 'Scipy':
        return scipy_center_of_mass(array, labels, index)

    ndim = array.ndim
    if ndim > 3:
        warn("Fast center-of-mass is only supported for ndim<=3. Falling back to scipy's implementation.", stacklevel=2)
        return scipy_center_of_mass(array, labels, index)

    if labels is None:
        src_center_of_mass = _fast_center_of_mass if backend.fast else _center_of_mass
    else:
        is_sequence = isinstance(index, (Sequence, np.ndarray))
        index = np.array([index] if not is_sequence else index)

        if labels.shape != array.shape:
            raise ValueError(f'`array` and `labels` must be the same shape, got {array.shape} and {labels.shape}.')

        if labels.dtype != index.dtype:
            raise ValueError(f'`labels` and `index` must have same dtype, got {labels.dtype} and {index.dtype}.')

        if len(index) != len(unique(index.astype(int, copy=False))):
            raise ValueError('`index` should consist of unique values.')

        if num_threads > 1:
            warn('Using single-threaded implementation as `labels` and `index` are specified.', stacklevel=2)

        src_center_of_mass = _fast_labeled_center_of_mass if backend.fast else _labeled_center_of_mass

    if array.dtype not in ('float32', 'float64'):
        array = array.astype(np.float32)

    n_dummy = 3 - ndim
    if n_dummy:
        array = array[(None,) * n_dummy]

    if labels is None:
        return tuple(src_center_of_mass(array, num_threads))[n_dummy:]

    output = [tuple(x)[n_dummy:] for x in src_center_of_mass(array, labels[(None,) * n_dummy], index)]

    return output if is_sequence else output[0]

imops.numeric.pointwise_add(nums, summand, output=None, num_threads=_NUMERIC_DEFAULT_NUM_THREADS, backend=None)

Perform pointwise addition between array and array or scalar.

Uses a fast parallelizable implementation for fp16-32-64 and int16-32-64 inputs and ndim <= 4.

Parameters:

Name Type Description Default
nums ndarray

n-dimensional array

required
summand Union[array, int, float]

array of the same shape or scalar

required
output ndarray

array of the same shape as input, into which the output is placed. By default, a new array is created

None
num_threads int

the number of threads to use for computation. Default = 4. If negative value passed cpu count + num_threads + 1 threads will be used

_NUMERIC_DEFAULT_NUM_THREADS
backend BackendLike

which backend to use. cython and scipy are available, cython is used by default

None

Returns:

Name Type Description
sum ndarray

result of summation

Examples:

sum = pointwise_add(x, 1, x)  # inplace addition
sum = pointwise_add(x, 1, backend='Scipy')  # just `np.add`
sum = pointwise_add(x.astype('float32'), x.astype('float16'))  # will fail because of different dtypes
Source code in imops/numeric.py
def pointwise_add(
    nums: np.ndarray,
    summand: Union[np.array, int, float],
    output: np.ndarray = None,
    num_threads: int = _NUMERIC_DEFAULT_NUM_THREADS,
    backend: BackendLike = None,
) -> np.ndarray:
    """
    Perform pointwise addition between array and array or scalar.

    Uses a fast parallelizable implementation for fp16-32-64 and int16-32-64 inputs and ndim <= 4.

    Parameters
    ----------
    nums: np.ndarray
        n-dimensional array
    summand: np.ndarray | int | float
        array of the same shape or scalar
    output: np.ndarray
        array of the same shape as input, into which the output is placed. By default, a new
        array is created
    num_threads: int
        the number of threads to use for computation. Default = 4. If negative value passed
        cpu count + num_threads + 1 threads will be used
    backend: BackendLike
        which backend to use. `cython` and `scipy` are available, `cython` is used by default

    Returns
    -------
    sum: np.ndarray
        result of summation

    Examples
    --------
    ```python
    sum = pointwise_add(x, 1, x)  # inplace addition
    sum = pointwise_add(x, 1, backend='Scipy')  # just `np.add`
    sum = pointwise_add(x.astype('float32'), x.astype('float16'))  # will fail because of different dtypes
    ```
    """
    backend = resolve_backend(backend, warn_stacklevel=3)
    if backend.name not in ('Scipy', 'Cython'):
        raise ValueError(f'Unsupported backend "{backend.name}".')

    dtype = nums.dtype

    if dtype not in _TYPES:
        raise ValueError(f'Input array dtype must be one of {", ".join(_STR_TYPES)}, got {dtype}.')

    if output is None:
        output = np.empty_like(nums, dtype=dtype)
    elif output.shape != nums.shape:
        raise ValueError(f'Input array and output array shapes must be the same, got {nums.shape} vs {output.shape}.')
    elif dtype != output.dtype:
        raise ValueError(f'Input array and output array dtypes must be the same, got {dtype} vs {output.dtype}.')

    summand_is_array = isinstance(summand, np.ndarray)
    if summand_is_array:
        if dtype != summand.dtype:
            raise ValueError(f'Input and summand arrays must have same dtypes, got {dtype} vs {summand.dtype}.')
    elif not isinstance(summand, (*_TYPES, *(int, float))):
        raise ValueError(f'Summand dtype must be one of {", ".join(_STR_TYPES)}, got {type(summand)}.')
    else:
        summand = dtype.type(summand)

    ndim = nums.ndim
    num_threads = normalize_num_threads(num_threads, backend, warn_stacklevel=3)

    if backend.name == 'Scipy' or ndim > 4:
        np.add(nums, summand, out=output)
        return output

    is_fp16 = dtype == np.float16
    src_pointwise_add = _choose_cython_pointwise_add(ndim, summand_is_array, is_fp16, backend.fast)

    n_dummy = 3 - ndim if ndim <= 3 else 0

    if n_dummy:
        nums = nums[(None,) * n_dummy]
        output = output[(None,) * n_dummy]
        if summand_is_array:
            summand = summand[(None,) * n_dummy]

    if is_fp16:
        output = src_pointwise_add(
            nums.view(np.uint16), summand.view(np.uint16), output.view(np.uint16), num_threads
        ).view(np.float16)
    else:
        output = src_pointwise_add(nums, summand, output, num_threads)

    if n_dummy:
        output = output[(0,) * n_dummy]

    return output

imops.numeric.fill_(nums, value, num_threads=_NUMERIC_DEFAULT_NUM_THREADS, backend=None)

Fill the array with a scalar value.

Uses a fast parallelizable implementation for fp16-32-64 and int16-32-64 inputs and ndim <= 4.

Parameters:

Name Type Description Default
nums ndarray

n-dimensional array

required
value Union[number, int, float]

scalar

required
num_threads int

the number of threads to use for computation. Default = 4. If negative value passed cpu count + num_threads + 1 threads will be used

_NUMERIC_DEFAULT_NUM_THREADS
backend BackendLike

which backend to use. cython and scipy are available, cython is used by default

None

Examples:

fill_(x, 1)
fill_(np.empty((2, 3, 4)), 42)
fill_(x.astype('uint16'), 3)  # will fail because of unsupported uint16 dtype
Source code in imops/numeric.py
def fill_(
    nums: np.ndarray,
    value: Union[np.number, int, float],
    num_threads: int = _NUMERIC_DEFAULT_NUM_THREADS,
    backend: BackendLike = None,
) -> None:
    """
    Fill the array with a scalar value.

    Uses a fast parallelizable implementation for fp16-32-64 and int16-32-64 inputs and ndim <= 4.

    Parameters
    ----------
    nums: np.ndarray
        n-dimensional array
    value: np.number | int | float
        scalar
    num_threads: int
        the number of threads to use for computation. Default = 4. If negative value passed
        cpu count + num_threads + 1 threads will be used
    backend: BackendLike
        which backend to use. `cython` and `scipy` are available, `cython` is used by default

    Examples
    --------
    ```python
    fill_(x, 1)
    fill_(np.empty((2, 3, 4)), 42)
    fill_(x.astype('uint16'), 3)  # will fail because of unsupported uint16 dtype
    ```
    """
    backend = resolve_backend(backend, warn_stacklevel=3)
    if backend.name not in ('Scipy', 'Cython'):
        raise ValueError(f'Unsupported backend "{backend.name}".')

    ndim = nums.ndim
    dtype = nums.dtype

    if dtype not in _TYPES or backend.name == 'Scipy' or ndim > 4:
        nums.fill(value)
        return

    is_fp16 = dtype == np.float16
    num_threads = normalize_num_threads(num_threads, backend, warn_stacklevel=3)
    src_fill_ = _choose_cython_fill_(ndim, backend.fast)
    value = dtype.type(value)

    n_dummy = 3 - ndim if ndim <= 3 else 0

    if n_dummy:
        nums = nums[(None,) * n_dummy]

    if is_fp16:
        src_fill_(nums.view(np.uint16), value.view(np.uint16), num_threads)
    else:
        src_fill_(nums, value, num_threads)

    if n_dummy:
        nums = nums[(0,) * n_dummy]

imops.numeric.full(shape, fill_value, dtype=None, order='C', num_threads=_NUMERIC_DEFAULT_NUM_THREADS, backend=None)

Return a new array of given shape and dtype, filled with fill_value.

Uses a fast parallelizable implementation for fp16-32-64 and int16-32-64 inputs and ndim <= 4.

Parameters:

Name Type Description Default
shape Union[int, Sequence[int]]

desired shape

required
fill_value Union[number, int, float]

scalar to fill array with

required
dtype Union[type, str]

desired dtype to which fill_value will be casted. If not specified, np.array(fill_value).dtype will be used

None
order str

whether to store multidimensional data in C or F contiguous order in memory

'C'
num_threads int

the number of threads to use for computation. Default = 4. If negative value passed cpu count + num_threads + 1 threads will be used

_NUMERIC_DEFAULT_NUM_THREADS
backend BackendLike

which backend to use. cython and scipy are available, cython is used by default

None

Examples:

x = full((2, 3, 4), 1.0)  # same as `np.ones((2, 3, 4))`
x = full((2, 3, 4), 1.5, dtype=int)  # same as np.ones((2, 3, 4), dtype=int)
x = full((2, 3, 4), 1, dtype='uint16')  # will fail because of unsupported uint16 dtype
Source code in imops/numeric.py
def full(
    shape: Union[int, Sequence[int]],
    fill_value: Union[np.number, int, float],
    dtype: Union[type, str] = None,
    order: str = 'C',
    num_threads: int = _NUMERIC_DEFAULT_NUM_THREADS,
    backend: BackendLike = None,
) -> np.ndarray:
    """
    Return a new array of given shape and dtype, filled with `fill_value`.

    Uses a fast parallelizable implementation for fp16-32-64 and int16-32-64 inputs and ndim <= 4.

    Parameters
    ----------
    shape: int | Sequence[int]
        desired shape
    fill_value: np.number | int | float
        scalar to fill array with
    dtype: type | str
        desired dtype to which `fill_value` will be casted. If not specified, `np.array(fill_value).dtype` will be used
    order: str
        whether to store multidimensional data in C or F contiguous order in memory
    num_threads: int
        the number of threads to use for computation. Default = 4. If negative value passed
        cpu count + num_threads + 1 threads will be used
    backend: BackendLike
        which backend to use. `cython` and `scipy` are available, `cython` is used by default

    Examples
    --------
    ```python
    x = full((2, 3, 4), 1.0)  # same as `np.ones((2, 3, 4))`
    x = full((2, 3, 4), 1.5, dtype=int)  # same as np.ones((2, 3, 4), dtype=int)
    x = full((2, 3, 4), 1, dtype='uint16')  # will fail because of unsupported uint16 dtype
    ```
    """
    dtype = dtype or np.array(fill_value).dtype

    nums = np.empty(shape, dtype=dtype, order=order)
    fill_value = nums.dtype.type(fill_value)

    fill_(nums, fill_value, num_threads, backend)

    return nums

imops.numeric.copy(nums, output=None, order='K', num_threads=_NUMERIC_DEFAULT_NUM_THREADS, backend=None)

Return copy of the given array.

Uses a fast parallelizable implementation for fp16-32-64 and int16-32-64 inputs and ndim <= 4.

Parameters:

Name Type Description Default
nums ndarray

n-dimensional array

required
output ndarray

array of the same shape and dtype as input, into which the copy is placed. By default, a new array is created

None
order str

controls the memory layout of the copy. C means C-order, F means F-order, A means F if a is Fortran contiguous, C otherwise. K means match the layout of a as closely as possible

'K'
num_threads int

the number of threads to use for computation. Default = 4. If negative value passed cpu count + num_threads + 1 threads will be used

_NUMERIC_DEFAULT_NUM_THREADS
backend BackendLike

which backend to use. cython and scipy are available, cython is used by default

None

Returns:

Name Type Description
copy ndarray

copy of array

Examples:

copied = copy(x)
copied = copy(x, backend='Scipy')  # same as `np.copy`
copy(x, output=y)  # copied into `y`
Source code in imops/numeric.py
def copy(
    nums: np.ndarray,
    output: np.ndarray = None,
    order: str = 'K',
    num_threads: int = _NUMERIC_DEFAULT_NUM_THREADS,
    backend: BackendLike = None,
) -> np.ndarray:
    """
    Return copy of the given array.

    Uses a fast parallelizable implementation for fp16-32-64 and int16-32-64 inputs and ndim <= 4.

    Parameters
    ----------
    nums: np.ndarray
        n-dimensional array
    output: np.ndarray
        array of the same shape and dtype as input, into which the copy is placed. By default, a new
        array is created
    order: str
        controls the memory layout of the copy. `C` means C-order, `F` means F-order, `A` means `F` if a is Fortran
        contiguous, `C` otherwise. `K` means match the layout of a as closely as possible
    num_threads: int
        the number of threads to use for computation. Default = 4. If negative value passed
        cpu count + num_threads + 1 threads will be used
    backend: BackendLike
        which backend to use. `cython` and `scipy` are available, `cython` is used by default

    Returns
    -------
    copy: np.ndarray
        copy of array

    Examples
    --------
    ```python
    copied = copy(x)
    copied = copy(x, backend='Scipy')  # same as `np.copy`
    copy(x, output=y)  # copied into `y`
    ```
    """
    backend = resolve_backend(backend, warn_stacklevel=3)
    if backend.name not in ('Scipy', 'Cython'):
        raise ValueError(f'Unsupported backend "{backend.name}".')

    ndim = nums.ndim
    dtype = nums.dtype
    num_threads = normalize_num_threads(num_threads, backend, warn_stacklevel=3)

    if output is None:
        output = np.empty_like(nums, dtype=dtype, order=order)
    elif output.shape != nums.shape:
        raise ValueError(f'Input array and output array shapes must be the same, got {nums.shape} vs {output.shape}.')
    elif dtype != output.dtype:
        raise ValueError(f'Input array and output array dtypes must be the same, got {dtype} vs {output.dtype}.')

    if dtype not in _TYPES or backend.name == 'Scipy' or ndim > 4:
        output = np.copy(nums, order=order)
        return output

    is_fp16 = dtype == np.float16
    src_copy = _choose_cython_copy(ndim, is_fp16, backend.fast)

    n_dummy = 3 - ndim if ndim <= 3 else 0

    if n_dummy:
        nums = nums[(None,) * n_dummy]
        output = output[(None,) * n_dummy]

    if is_fp16:
        src_copy(nums.view(np.uint16), output.view(np.uint16), num_threads)
    else:
        src_copy(nums, output, num_threads)

    if n_dummy:
        nums = nums[(0,) * n_dummy]
        output = output[(0,) * n_dummy]

    return output

imops.radon.radon(image, axes=None, theta=180, return_fill=False, num_threads=-1, backend=None)

Fast implementation of Radon transform. Adapted from scikit-image.

Parameters:

Name Type Description Default
image ndarray

an n-dimensional array with at least 2 axes

required
axes Tuple[int, int]

the axes in the image along which the Radon transform will be applied. The image shape along the axes must be of the same length

None
theta Union[int, Sequence[float]]

the angles for which the Radon transform will be computed. If it is an integer - the angles will be evenly distributed between 0 and 180, theta values in total

180
return_fill bool

whether to return the value that fills the image outside the circle working area

False
num_threads int

the number of threads to be used for parallel computation. By default - equals to the number of cpu cores

-1
backend BackendLike

the execution backend. Currently only "Cython" is avaliable

None

Returns:

Name Type Description
sinogram ndarray

the result of the Radon transform

fill_value float

the value that fills the image outside the circle working area. Returned only if return_fill is True

Examples:

sinogram = radon(image)  # 2d image
sinogram, fill_value = radon(image, return_fill=True)  # 2d image with fill value
sinogram = radon(image, axes=(-2, -1))  # nd image
Source code in imops/radon.py
def radon(
    image: np.ndarray,
    axes: Tuple[int, int] = None,
    theta: Union[int, Sequence[float]] = 180,
    return_fill: bool = False,
    num_threads: int = -1,
    backend: BackendLike = None,
) -> Union[np.ndarray, Tuple[np.ndarray, float]]:
    """
    Fast implementation of Radon transform. Adapted from scikit-image.

    Parameters
    ----------
    image: np.ndarray
        an n-dimensional array with at least 2 axes
    axes: tuple[int, int]
        the axes in the `image` along which the Radon transform will be applied.
        The `image` shape along the `axes` must be of the same length
    theta: int | Sequence[float]
        the angles for which the Radon transform will be computed. If it is an integer - the angles will
        be evenly distributed between 0 and 180, `theta` values in total
    return_fill: bool
        whether to return the value that fills the image outside the circle working area
    num_threads: int
        the number of threads to be used for parallel computation. By default - equals to the number of cpu cores
    backend: str | Backend
        the execution backend. Currently only "Cython" is avaliable

    Returns
    -------
    sinogram: np.ndarray
        the result of the Radon transform
    fill_value: float
        the value that fills the image outside the circle working area. Returned only if `return_fill` is True

    Examples
    --------
    ```python
    sinogram = radon(image)  # 2d image
    sinogram, fill_value = radon(image, return_fill=True)  # 2d image with fill value
    sinogram = radon(image, axes=(-2, -1))  # nd image
    ```
    """
    backend = resolve_backend(backend, warn_stacklevel=3)
    if backend.name not in ('Cython',):
        raise ValueError(f'Unsupported backend "{backend.name}".')

    image, axes, extra = normalize_axes(image, axes)
    if image.shape[1] != image.shape[2]:
        raise ValueError(
            f'The image must be square along the provided axes ({axes}), but has shape: {image.shape[1:]}.'
        )

    if isinstance(theta, int):
        theta = np.linspace(0, 180, theta, endpoint=False)

    size = image.shape[1]
    radius = size // 2
    xs = np.arange(-radius, size - radius)
    squared = xs**2
    outside_circle = (squared[:, None] + squared[None, :]) > radius**2
    values = image[:, outside_circle]
    min_, max_ = values.min(), values.max()
    if max_ - min_ > 0.1:
        raise ValueError(
            f'The image must be constant outside the circle. ' f'Got values ranging from {min_} to {max_}.'
        )

    if min_ != 0 or max_ != 0:
        # FIXME: how to accurately pass `num_threads` and `backend` arguments to `copy`?
        image = copy(image, order='C')
        image[:, outside_circle] = 0

    # TODO: f(arange)?
    limits = ((squared[:, None] + squared[None, :]) > (radius + 2) ** 2).sum(0) // 2

    num_threads = normalize_num_threads(num_threads, backend, warn_stacklevel=3)

    radon3d_ = fast_radon3d if backend.fast else radon3d

    sinogram = radon3d_(image, np.deg2rad(theta, dtype=image.dtype), limits, num_threads)

    result = restore_axes(sinogram, axes, extra)
    if return_fill:
        result = result, min_

    return result

imops.radon.inverse_radon(sinogram, axes=None, theta=None, fill_value=0, a=0, b=1, num_threads=-1, backend=None)

Fast implementation of inverse Radon transform. Adapted from scikit-image.

Parameters:

Name Type Description Default
sinogram ndarray

an n-dimensional array with at least 2 axes

required
axes Tuple[int, int]

the axes in the image along which the inverse Radon transform will be applied

None
theta Union[int, Sequence[float]]

the angles for which the inverse Radon transform will be computed. If it is an integer - the angles will be evenly distributed between 0 and 180, theta values in total

None
fill_value float

the value that fills the image outside the circle working area. Can be returned by radon

0
a float

the first parameter of the sharpen filter

0
b float

the second parameter of the sharpen filter

1
num_threads int

the number of threads to be used for parallel computation. By default - equals to the number of cpu cores

-1
backend BackendLike

the execution backend. Currently only "Cython" is avaliable

None

Returns:

Name Type Description
image ndarray

the result of the inverse Radon transform

Examples:

image = inverse_radon(sinogram)  # 2d image
image = inverse_radon(sinogram, fill_value=-1000)  # 2d image with fill value
image = inverse_radon(sinogram, axes=(-2, -1))  # nd image
Source code in imops/radon.py
def inverse_radon(
    sinogram: np.ndarray,
    axes: Tuple[int, int] = None,
    theta: Union[int, Sequence[float]] = None,
    fill_value: float = 0,
    a: float = 0,
    b: float = 1,
    num_threads: int = -1,
    backend: BackendLike = None,
) -> np.ndarray:
    """
    Fast implementation of inverse Radon transform. Adapted from scikit-image.

    Parameters
    ----------
    sinogram: np.ndarray
        an n-dimensional array with at least 2 axes
    axes: tuple[int, int]
        the axes in the `image` along which the inverse Radon transform will be applied
    theta: int | Sequence[float]
        the angles for which the inverse Radon transform will be computed. If it is an integer - the angles will
        be evenly distributed between 0 and 180, `theta` values in total
    fill_value: float
        the value that fills the image outside the circle working area. Can be returned by `radon`
    a: float
        the first parameter of the sharpen filter
    b: float
        the second parameter of the sharpen filter
    num_threads: int
        the number of threads to be used for parallel computation. By default - equals to the number of cpu cores
    backend: str | Backend
        the execution backend. Currently only "Cython" is avaliable

    Returns
    -------
    image: np.ndarray
        the result of the inverse Radon transform

    Examples
    --------
    ```python
    image = inverse_radon(sinogram)  # 2d image
    image = inverse_radon(sinogram, fill_value=-1000)  # 2d image with fill value
    image = inverse_radon(sinogram, axes=(-2, -1))  # nd image
    ```
    """
    backend = resolve_backend(backend, warn_stacklevel=3)
    if backend.name not in ('Cython',):
        raise ValueError(f'Unsupported backend "{backend.name}".')

    sinogram, axes, extra = normalize_axes(sinogram, axes)

    if theta is None:
        theta = sinogram.shape[-1]
    if isinstance(theta, int):
        theta = np.linspace(0, 180, theta, endpoint=False)

    angles_count = len(theta)
    if angles_count != sinogram.shape[-1]:
        raise ValueError(
            f'The given `theta` (size {angles_count}) does not match the number of '
            f'projections in `sinogram` ({sinogram.shape[-1]}).'
        )
    output_size = sinogram.shape[1]
    sinogram = _sinogram_circle_to_square(sinogram)

    img_shape = sinogram.shape[1]
    # Resize image to next power of two (but no less than 64) for
    # Fourier analysis; speeds up Fourier and lessens artifacts
    # TODO: why *2?
    projection_size_padded = max(64, int(2 ** np.ceil(np.log2(2 * img_shape))))
    pad_width = ((0, 0), (0, projection_size_padded - img_shape), (0, 0))
    padded_sinogram = np.pad(sinogram, pad_width, mode='constant', constant_values=0)
    fourier_filter = _smooth_sharpen_filter(projection_size_padded, a, b)

    # Apply filter in Fourier domain
    fourier_img = fft(padded_sinogram, axis=1) * fourier_filter
    filtered_sinogram = np.real(ifft(fourier_img, axis=1)[:, :img_shape, :])

    radius = output_size // 2
    xs = np.arange(-radius, output_size - radius)
    squared = xs**2
    inside_circle = (squared[:, None] + squared[None, :]) <= radius**2

    dtype = sinogram.dtype
    filtered_sinogram = filtered_sinogram.astype(dtype, copy=False)
    theta, xs = np.deg2rad(theta, dtype=dtype), xs.astype(dtype, copy=False)

    num_threads = normalize_num_threads(num_threads, backend, warn_stacklevel=3)

    backprojection3d_ = fast_backprojection3d if backend.fast else backprojection3d

    reconstructed = np.asarray(
        backprojection3d_(filtered_sinogram, theta, xs, inside_circle, fill_value, img_shape, output_size, num_threads)
    )

    return restore_axes(reconstructed, axes, extra)

imops.utils.isin(element, test_elements, num_threads=1)

Calculates element in test_elements, broadcasting over element only. Returns a boolean array of the same shape as element that is True where an element of element is in test_elements and False otherwise.

Parameters:

Name Type Description Default
element ndarray

n-dimensional array

required
test_elements ndarray

1-d array of the values against which to test each value of element

required
num_threads int

the number of threads to use for computation. Default = 1. If negative value passed cpu count + num_threads + 1 threads will be used

1

Returns:

Name Type Description
isin (ndarray, bool)

has the same shape as element. The values element[isin] are in test_elements

Examples:

element = 2*np.arange(4).reshape((2, 2)) test_elements = [1, 2, 4, 8] mask = isin(element, test_elements)

Source code in imops/utils.py
def isin(element: np.ndarray, test_elements: np.ndarray, num_threads: int = 1) -> np.ndarray:
    """
    Calculates `element in test_elements`, broadcasting over `element` only.
    Returns a boolean array of the same shape as `element` that is True where
    an element of `element` is in `test_elements` and False otherwise.

    Parameters
    ----------
    element: np.ndarray
        n-dimensional array
    test_elements: np.ndarray
        1-d array of the values against which to test each value of element
    num_threads: int
        the number of threads to use for computation. Default = 1. If negative value passed
        cpu count + num_threads + 1 threads will be used

    Returns
    -------
    isin: np.ndarray, bool
        has the same shape as `element`. The values `element[isin]` are in `test_elements`

    Examples
    --------
    element = 2*np.arange(4).reshape((2, 2))
    test_elements = [1, 2, 4, 8]
    mask = isin(element, test_elements)
    """
    if element.dtype not in ('int16', 'int32', 'int64'):
        raise ValueError(f'Supported dtypes: int16, int32, int64, got {element.dtype}')

    num_threads = normalize_num_threads(num_threads, Cython(), warn_stacklevel=2)

    contiguos_element = np.ascontiguousarray(element)
    test_elements = np.asarray(test_elements, dtype=element.dtype)
    out = np.zeros_like(contiguos_element, dtype=bool)

    cython_isin(contiguos_element.ravel(), test_elements, out.ravel(), num_threads)

    return out