From ff8c8f860852360b308a82a8e66b41b4fe666b41 Mon Sep 17 00:00:00 2001 From: dkazanc Date: Tue, 24 Feb 2026 17:15:45 +0000 Subject: [PATCH 1/7] adding data types and parameters checks --- httomolibgpu/misc/utils.py | 7 +++++++ httomolibgpu/recon/rotation.py | 8 +++++++- tests/test_recon/test_rotation.py | 27 +++++++++++++++++++++++++++ 3 files changed, 41 insertions(+), 1 deletion(-) diff --git a/httomolibgpu/misc/utils.py b/httomolibgpu/misc/utils.py index 6a6b85bf..b1ab7bbc 100644 --- a/httomolibgpu/misc/utils.py +++ b/httomolibgpu/misc/utils.py @@ -144,3 +144,10 @@ def __naninfs_check( ) ) return data + +def __check_type_consistency(variable, expected_datatype: type, variable_name:str): + """compare variable types and raise error""" + if type(variable) != expected_datatype: + err_str = (f"{variable_name} given as {variable} must be provided as {expected_datatype.__name__}" + ) + raise ValueError(err_str) \ No newline at end of file diff --git a/httomolibgpu/recon/rotation.py b/httomolibgpu/recon/rotation.py index e02595ef..62e7d6c2 100644 --- a/httomolibgpu/recon/rotation.py +++ b/httomolibgpu/recon/rotation.py @@ -49,7 +49,7 @@ import math from typing import List, Literal, Optional, Tuple, Union -from httomolibgpu.misc.utils import data_checker +from httomolibgpu.misc.utils import data_checker, __check_type_consistency __all__ = [ "find_center_vo", @@ -463,6 +463,12 @@ def find_center_360( """ if data.ndim != 3: raise ValueError("A 3D array must be provided") + __check_type_consistency(win_width, int, 'win_width') + __check_type_consistency(denoise, bool, 'denoise') + __check_type_consistency(norm, bool, 'norm') + __check_type_consistency(use_overlap, bool, 'use_overlap') + if side not in ['left','right', None]: + raise ValueError("side must be provided as 'left','right' or None for automatic determination") data = data_checker( data, diff --git a/tests/test_recon/test_rotation.py b/tests/test_recon/test_rotation.py index 0c62a314..010b7fe1 100644 --- a/tests/test_recon/test_rotation.py +++ b/tests/test_recon/test_rotation.py @@ -108,6 +108,33 @@ def test_find_center_360_1D_raises(data): with pytest.raises(ValueError): find_center_360(cp.ones(10)) +def test_find_center_360_raises_win_width(data): + # wrong win_width passed + with pytest.raises(ValueError): + find_center_360(data, win_width = True) + +def test_find_center_360_raises_ind(data): + # wrong ind passed + with pytest.raises(ValueError): + find_center_360(data, ind = 'mid') + +@pytest.mark.parametrize( + "side", + [ + ("side", 1), + ("side", 'lefft'), + ("side", 'rightt'), + ], +) +def test_find_center_360_raises_side(data,side): + # wrong side passed + with pytest.raises(ValueError): + find_center_360(data, win_width=10, side=side) + +def test_find_center_360_raises_denoise(data): + # wrong denoise passed + with pytest.raises(ValueError): + find_center_360(data, denoise='str') def test_find_center_360_NaN_infs_raises(data, flats, darks): #: find_center_360 raises if the data with NaNs or Infs given From b7fcbadbbb8788c8221f8b2f3698b32f6726d279 Mon Sep 17 00:00:00 2001 From: dkazanc Date: Wed, 25 Feb 2026 17:14:49 +0000 Subject: [PATCH 2/7] work started on incorporating checks --- httomolibgpu/misc/corr.py | 39 ++++++++++++---------------- httomolibgpu/misc/utils.py | 40 ++++++++++++++++++++++++++--- httomolibgpu/recon/rotation.py | 42 +++++++++++++++++-------------- tests/test_misc/test_corr.py | 2 +- tests/test_recon/test_rotation.py | 5 ++++ 5 files changed, 81 insertions(+), 47 deletions(-) diff --git a/httomolibgpu/misc/corr.py b/httomolibgpu/misc/corr.py index 95d1b71a..6aab2ef7 100644 --- a/httomolibgpu/misc/corr.py +++ b/httomolibgpu/misc/corr.py @@ -20,15 +20,11 @@ # --------------------------------------------------------------------------- """Module for data correction. For more detailed information see :ref:`data_correction_module`.""" -import numpy as np -from typing import Union - from httomolibgpu import cupywrapper cp = cupywrapper.cp cupy_run = cupywrapper.cupy_run -from numpy import float32 from unittest.mock import Mock if cupy_run: @@ -36,12 +32,13 @@ else: load_cuda_module = Mock() +from httomolibgpu.misc.utils import __check_variable_type, __check_if_data_3D_array, __check_if_data_correct_type, __check_if_positive_nonzero + __all__ = [ "median_filter", "remove_outlier", ] - def median_filter( data: cp.ndarray, kernel_size: int = 3, @@ -55,7 +52,7 @@ def median_filter( data : cp.ndarray Input CuPy 3D array either float32 or uint16 data type. kernel_size : int, optional - The size of the filter's kernel (a diameter). + The size of the filter's kernel (a "diameter"). dif : float, optional Expected difference value between outlier value and the median value of the array, leave equal to 0 for classical median. @@ -70,20 +67,16 @@ def median_filter( ValueError If the input array is not three dimensional. """ - input_type = data.dtype - if input_type not in ["float32", "uint16"]: - raise ValueError("The input data should be either float32 or uint16 data type") - - if data.ndim == 3: - if 0 in data.shape: - raise ValueError("The length of one of dimensions is equal to zero") - else: - raise ValueError("The input array must be a 3D array") - - if kernel_size not in [3, 5, 7, 9, 11, 13]: - raise ValueError("Please select a correct kernel size: 3, 5, 7, 9, 11, 13") - + ### Data and parameters checks ### + methods_name = 'median_filter/remove_outlier' + __check_if_data_3D_array(data, methods_name) + __check_if_data_correct_type(data, accepted_type=['float32', 'uint16'], methods_name=methods_name) + __check_variable_type(kernel_size, int, 'kernel_size', [3, 5, 7, 9, 11, 13], methods_name) + __check_variable_type(dif, float, 'dif', [], methods_name) + ################################### + dz, dy, dx = data.shape + input_type = data.dtype output = cp.copy(data, order="C") # 3d median or dezinger @@ -118,7 +111,7 @@ def remove_outlier( data : cp.ndarray Input CuPy 3D array either float32 or uint16 data type. kernel_size : int, optional - The size of the filter's kernel (a diameter). + The size of the filter's kernel (a "diameter"). dif : float, optional Expected difference value between the outlier value (central voxel) and the median value of the neighbourhood. Lower values lead to median filtering. @@ -133,8 +126,8 @@ def remove_outlier( ValueError Threshold value (dif) must be positive and nonzero. """ - - if dif <= 0.0: - raise ValueError("Threshold value (dif) must be positive and nonzero.") + ### Data and parameters checks ### + __check_if_positive_nonzero(dif, 'dif', positive=True, nonzero=True, methods_name='remove_outlier') + ################################### return median_filter(data, kernel_size, dif) diff --git a/httomolibgpu/misc/utils.py b/httomolibgpu/misc/utils.py index b1ab7bbc..96220a7b 100644 --- a/httomolibgpu/misc/utils.py +++ b/httomolibgpu/misc/utils.py @@ -145,9 +145,41 @@ def __naninfs_check( ) return data -def __check_type_consistency(variable, expected_datatype: type, variable_name:str): - """compare variable types and raise error""" +def __check_variable_type(variable, expected_datatype: type, variable_name:str, literals: list, methods_name: str): if type(variable) != expected_datatype: - err_str = (f"{variable_name} given as {variable} must be provided as {expected_datatype.__name__}" + # compare variable types and raise error + err_str = (f"Variable '{variable_name}' of '{methods_name}' method given as {variable} must have a type {expected_datatype.__name__}." ) - raise ValueError(err_str) \ No newline at end of file + raise ValueError(err_str) + if literals: + # check if variable should be a literal + if variable not in literals: + err_str = (f"Variable '{variable_name}' of '{methods_name}' method given as {variable} must be provided as {literals}." + ) + raise ValueError(err_str) + +def __check_if_data_3D_array(data: cp.ndarray, methods_name: str): + if data.ndim == 3: + if 0 in data.shape: + err_str = (f"The length of one of input data dimensions of method '{methods_name}' is equal to zero." + ) + raise ValueError(err_str) + else: + raise ValueError(f"The input data for method '{methods_name}' must be a 3D CuPy array.") + +def __check_if_data_correct_type(data: cp.ndarray, accepted_type:list, methods_name: str): + if data.dtype not in accepted_type: + err_str = (f"The input data type of method '{methods_name}' must be of {accepted_type}." + ) + raise ValueError(err_str) + +def __check_if_positive_nonzero(variable, variable_name:str, positive: bool, nonzero: bool, methods_name: str): + if positive and variable < 0: + err_str = (f"Variable '{variable_name}' of '{methods_name}' must be positive." + ) + raise ValueError(err_str) + if nonzero and variable == 0: + err_str = (f"Variable '{variable_name}' of '{methods_name}' must be nonzero." + ) + raise ValueError(err_str) + \ No newline at end of file diff --git a/httomolibgpu/recon/rotation.py b/httomolibgpu/recon/rotation.py index 62e7d6c2..2f7bf178 100644 --- a/httomolibgpu/recon/rotation.py +++ b/httomolibgpu/recon/rotation.py @@ -49,7 +49,7 @@ import math from typing import List, Literal, Optional, Tuple, Union -from httomolibgpu.misc.utils import data_checker, __check_type_consistency +from httomolibgpu.misc.utils import data_checker, __check_variable_type, __check_if_data_3D_array, __check_if_data_correct_type __all__ = [ "find_center_vo", @@ -416,7 +416,7 @@ def _downsample(image, dsp_fact0, dsp_fact1): ##%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% # # %%%%%%%%%%%%%%%%%%%%%%%%%find_center_360%%%%%%%%%%%%%%%%%%%%%%%%% -# --- Center of rotation (COR) estimation method ---# +# Center of rotation (COR) estimation method for 360 degrees scan # def find_center_360( data: cp.ndarray, ind: Optional[int] = None, @@ -435,7 +435,7 @@ def find_center_360( data : cp.ndarray 3D tomographic data as a Cupy array. ind : int, optional - Index of the slice to be used for estimate the CoR and the overlap. + Index of the slice to be used for estimate the CoR and the overlap. If 'None' is given, the zero slice will be used. win_width : int, optional Window width used for finding the overlap area. side : {None, left, right}, optional @@ -461,28 +461,32 @@ def find_center_360( Position of the window in the first image giving the best correlation metric. """ - if data.ndim != 3: - raise ValueError("A 3D array must be provided") - __check_type_consistency(win_width, int, 'win_width') - __check_type_consistency(denoise, bool, 'denoise') - __check_type_consistency(norm, bool, 'norm') - __check_type_consistency(use_overlap, bool, 'use_overlap') - if side not in ['left','right', None]: - raise ValueError("side must be provided as 'left','right' or None for automatic determination") + ### Data and parameters checks ### + methods_name = 'find_center_360' + __check_if_data_3D_array(data, methods_name) + __check_if_data_correct_type(data, accepted_type=['float32', 'uint16'], methods_name=methods_name) + if ind is not None: + __check_variable_type(ind, int, 'ind', [], methods_name) + if side is not None: + __check_variable_type(side, str, 'side', ['left','right'], methods_name) + __check_variable_type(win_width, int, 'win_width', [], methods_name) + __check_variable_type(denoise, bool, 'denoise', [], methods_name) + __check_variable_type(norm, bool, 'norm', [], methods_name) + __check_variable_type(use_overlap, bool, 'use_overlap', [], methods_name) + ################################### - data = data_checker( - data, - infsnans_correct=True, - zeros_warning=False, - data_to_method_name="find_center_360", - ) - - # this method works with a 360-degree sinogram. if ind is None: _sino = data[:, 0, :] else: _sino = data[:, ind, :] + data = data_checker( + _sino, + infsnans_correct=True, + zeros_warning=False, + data_to_method_name=methods_name, + ) + nrow, ncol = _sino.shape nrow_180 = nrow // 2 + 1 sino_top = _sino[0:nrow_180, :] diff --git a/tests/test_misc/test_corr.py b/tests/test_misc/test_corr.py index 93f99df0..c5dd5177 100644 --- a/tests/test_misc/test_corr.py +++ b/tests/test_misc/test_corr.py @@ -83,7 +83,7 @@ def test_median_filter3d(data): assert filtered_data.flags.c_contiguous assert ( - median_filter(data.astype(cp.float32), kernel_size=5, dif=1.5).get().dtype + median_filter(data.astype(cp.float32), kernel_size=5, dif=0.0).get().dtype == np.float32 ) diff --git a/tests/test_recon/test_rotation.py b/tests/test_recon/test_rotation.py index 010b7fe1..2734af84 100644 --- a/tests/test_recon/test_rotation.py +++ b/tests/test_recon/test_rotation.py @@ -99,6 +99,11 @@ def test_find_center_360_data(data): assert side == "right" assert_allclose(overlap_pos, 111.906334, rtol=eps) +def test_find_center_360_data2(data): + eps = 1e-5 + cor, overlap, side, overlap_pos = find_center_360(data, ind=3, win_width = 5, side = 'right', norm=True, denoise=False) + assert_allclose(cor, 160, rtol=eps) + assert_allclose(overlap, 16.9026184, rtol=eps) def test_find_center_360_1D_raises(data): #: 360-degree sinogram must be a 3d array From d2b001c4c0f73a4b32d6edddfabb22eded7629bc Mon Sep 17 00:00:00 2001 From: dkazanc Date: Wed, 25 Feb 2026 17:15:12 +0000 Subject: [PATCH 3/7] linting --- httomolibgpu/misc/corr.py | 26 ++++++++++++++----- httomolibgpu/misc/utils.py | 43 +++++++++++++++++++------------ httomolibgpu/recon/rotation.py | 25 +++++++++++------- tests/test_recon/test_rotation.py | 23 ++++++++++++----- 4 files changed, 77 insertions(+), 40 deletions(-) diff --git a/httomolibgpu/misc/corr.py b/httomolibgpu/misc/corr.py index 6aab2ef7..7c11074b 100644 --- a/httomolibgpu/misc/corr.py +++ b/httomolibgpu/misc/corr.py @@ -32,13 +32,19 @@ else: load_cuda_module = Mock() -from httomolibgpu.misc.utils import __check_variable_type, __check_if_data_3D_array, __check_if_data_correct_type, __check_if_positive_nonzero +from httomolibgpu.misc.utils import ( + __check_variable_type, + __check_if_data_3D_array, + __check_if_data_correct_type, + __check_if_positive_nonzero, +) __all__ = [ "median_filter", "remove_outlier", ] + def median_filter( data: cp.ndarray, kernel_size: int = 3, @@ -68,13 +74,17 @@ def median_filter( If the input array is not three dimensional. """ ### Data and parameters checks ### - methods_name = 'median_filter/remove_outlier' + methods_name = "median_filter/remove_outlier" __check_if_data_3D_array(data, methods_name) - __check_if_data_correct_type(data, accepted_type=['float32', 'uint16'], methods_name=methods_name) - __check_variable_type(kernel_size, int, 'kernel_size', [3, 5, 7, 9, 11, 13], methods_name) - __check_variable_type(dif, float, 'dif', [], methods_name) + __check_if_data_correct_type( + data, accepted_type=["float32", "uint16"], methods_name=methods_name + ) + __check_variable_type( + kernel_size, int, "kernel_size", [3, 5, 7, 9, 11, 13], methods_name + ) + __check_variable_type(dif, float, "dif", [], methods_name) ################################### - + dz, dy, dx = data.shape input_type = data.dtype output = cp.copy(data, order="C") @@ -127,7 +137,9 @@ def remove_outlier( Threshold value (dif) must be positive and nonzero. """ ### Data and parameters checks ### - __check_if_positive_nonzero(dif, 'dif', positive=True, nonzero=True, methods_name='remove_outlier') + __check_if_positive_nonzero( + dif, "dif", positive=True, nonzero=True, methods_name="remove_outlier" + ) ################################### return median_filter(data, kernel_size, dif) diff --git a/httomolibgpu/misc/utils.py b/httomolibgpu/misc/utils.py index 96220a7b..6336e28e 100644 --- a/httomolibgpu/misc/utils.py +++ b/httomolibgpu/misc/utils.py @@ -145,41 +145,50 @@ def __naninfs_check( ) return data -def __check_variable_type(variable, expected_datatype: type, variable_name:str, literals: list, methods_name: str): + +def __check_variable_type( + variable, + expected_datatype: type, + variable_name: str, + literals: list, + methods_name: str, +): if type(variable) != expected_datatype: # compare variable types and raise error - err_str = (f"Variable '{variable_name}' of '{methods_name}' method given as {variable} must have a type {expected_datatype.__name__}." - ) + err_str = f"Variable '{variable_name}' of '{methods_name}' method given as {variable} must have a type {expected_datatype.__name__}." raise ValueError(err_str) if literals: # check if variable should be a literal if variable not in literals: - err_str = (f"Variable '{variable_name}' of '{methods_name}' method given as {variable} must be provided as {literals}." - ) + err_str = f"Variable '{variable_name}' of '{methods_name}' method given as {variable} must be provided as {literals}." raise ValueError(err_str) + def __check_if_data_3D_array(data: cp.ndarray, methods_name: str): if data.ndim == 3: if 0 in data.shape: - err_str = (f"The length of one of input data dimensions of method '{methods_name}' is equal to zero." - ) + err_str = f"The length of one of input data dimensions of method '{methods_name}' is equal to zero." raise ValueError(err_str) else: - raise ValueError(f"The input data for method '{methods_name}' must be a 3D CuPy array.") + raise ValueError( + f"The input data for method '{methods_name}' must be a 3D CuPy array." + ) -def __check_if_data_correct_type(data: cp.ndarray, accepted_type:list, methods_name: str): + +def __check_if_data_correct_type( + data: cp.ndarray, accepted_type: list, methods_name: str +): if data.dtype not in accepted_type: - err_str = (f"The input data type of method '{methods_name}' must be of {accepted_type}." - ) + err_str = f"The input data type of method '{methods_name}' must be of {accepted_type}." raise ValueError(err_str) -def __check_if_positive_nonzero(variable, variable_name:str, positive: bool, nonzero: bool, methods_name: str): + +def __check_if_positive_nonzero( + variable, variable_name: str, positive: bool, nonzero: bool, methods_name: str +): if positive and variable < 0: - err_str = (f"Variable '{variable_name}' of '{methods_name}' must be positive." - ) + err_str = f"Variable '{variable_name}' of '{methods_name}' must be positive." raise ValueError(err_str) if nonzero and variable == 0: - err_str = (f"Variable '{variable_name}' of '{methods_name}' must be nonzero." - ) + err_str = f"Variable '{variable_name}' of '{methods_name}' must be nonzero." raise ValueError(err_str) - \ No newline at end of file diff --git a/httomolibgpu/recon/rotation.py b/httomolibgpu/recon/rotation.py index 2f7bf178..09c866a9 100644 --- a/httomolibgpu/recon/rotation.py +++ b/httomolibgpu/recon/rotation.py @@ -49,7 +49,12 @@ import math from typing import List, Literal, Optional, Tuple, Union -from httomolibgpu.misc.utils import data_checker, __check_variable_type, __check_if_data_3D_array, __check_if_data_correct_type +from httomolibgpu.misc.utils import ( + data_checker, + __check_variable_type, + __check_if_data_3D_array, + __check_if_data_correct_type, +) __all__ = [ "find_center_vo", @@ -462,17 +467,19 @@ def find_center_360( correlation metric. """ ### Data and parameters checks ### - methods_name = 'find_center_360' + methods_name = "find_center_360" __check_if_data_3D_array(data, methods_name) - __check_if_data_correct_type(data, accepted_type=['float32', 'uint16'], methods_name=methods_name) + __check_if_data_correct_type( + data, accepted_type=["float32", "uint16"], methods_name=methods_name + ) if ind is not None: - __check_variable_type(ind, int, 'ind', [], methods_name) + __check_variable_type(ind, int, "ind", [], methods_name) if side is not None: - __check_variable_type(side, str, 'side', ['left','right'], methods_name) - __check_variable_type(win_width, int, 'win_width', [], methods_name) - __check_variable_type(denoise, bool, 'denoise', [], methods_name) - __check_variable_type(norm, bool, 'norm', [], methods_name) - __check_variable_type(use_overlap, bool, 'use_overlap', [], methods_name) + __check_variable_type(side, str, "side", ["left", "right"], methods_name) + __check_variable_type(win_width, int, "win_width", [], methods_name) + __check_variable_type(denoise, bool, "denoise", [], methods_name) + __check_variable_type(norm, bool, "norm", [], methods_name) + __check_variable_type(use_overlap, bool, "use_overlap", [], methods_name) ################################### if ind is None: diff --git a/tests/test_recon/test_rotation.py b/tests/test_recon/test_rotation.py index 2734af84..273977a4 100644 --- a/tests/test_recon/test_rotation.py +++ b/tests/test_recon/test_rotation.py @@ -99,12 +99,16 @@ def test_find_center_360_data(data): assert side == "right" assert_allclose(overlap_pos, 111.906334, rtol=eps) + def test_find_center_360_data2(data): eps = 1e-5 - cor, overlap, side, overlap_pos = find_center_360(data, ind=3, win_width = 5, side = 'right', norm=True, denoise=False) + cor, overlap, side, overlap_pos = find_center_360( + data, ind=3, win_width=5, side="right", norm=True, denoise=False + ) assert_allclose(cor, 160, rtol=eps) assert_allclose(overlap, 16.9026184, rtol=eps) + def test_find_center_360_1D_raises(data): #: 360-degree sinogram must be a 3d array with pytest.raises(ValueError): @@ -113,33 +117,38 @@ def test_find_center_360_1D_raises(data): with pytest.raises(ValueError): find_center_360(cp.ones(10)) + def test_find_center_360_raises_win_width(data): # wrong win_width passed with pytest.raises(ValueError): - find_center_360(data, win_width = True) + find_center_360(data, win_width=True) + def test_find_center_360_raises_ind(data): # wrong ind passed with pytest.raises(ValueError): - find_center_360(data, ind = 'mid') + find_center_360(data, ind="mid") + @pytest.mark.parametrize( "side", [ ("side", 1), - ("side", 'lefft'), - ("side", 'rightt'), + ("side", "lefft"), + ("side", "rightt"), ], ) -def test_find_center_360_raises_side(data,side): +def test_find_center_360_raises_side(data, side): # wrong side passed with pytest.raises(ValueError): find_center_360(data, win_width=10, side=side) + def test_find_center_360_raises_denoise(data): # wrong denoise passed with pytest.raises(ValueError): - find_center_360(data, denoise='str') + find_center_360(data, denoise="str") + def test_find_center_360_NaN_infs_raises(data, flats, darks): #: find_center_360 raises if the data with NaNs or Infs given From 281771981d85f940078f6370183b09a6a87841a5 Mon Sep 17 00:00:00 2001 From: algol Date: Thu, 26 Feb 2026 15:40:33 +0000 Subject: [PATCH 4/7] data and parameters check added to all methods --- httomolibgpu/misc/corr.py | 18 +- httomolibgpu/misc/denoise.py | 61 ++++- httomolibgpu/misc/morph.py | 73 ++++-- httomolibgpu/misc/rescale.py | 34 ++- httomolibgpu/misc/utils.py | 16 +- httomolibgpu/prep/alignment.py | 48 +++- httomolibgpu/prep/normalize.py | 48 +++- httomolibgpu/prep/phase.py | 138 ++++++----- httomolibgpu/prep/stripe.py | 109 ++++++-- httomolibgpu/recon/algorithm.py | 300 +++++++++++++++++++---- httomolibgpu/recon/rotation.py | 55 ++++- tests/test_recon/test_algorithm.py | 2 +- zenodo-tests/test_prep/test_phase.py | 2 +- zenodo-tests/test_recon/test_rotation.py | 2 +- 14 files changed, 701 insertions(+), 205 deletions(-) diff --git a/httomolibgpu/misc/corr.py b/httomolibgpu/misc/corr.py index 7c11074b..71318c2d 100644 --- a/httomolibgpu/misc/corr.py +++ b/httomolibgpu/misc/corr.py @@ -31,6 +31,7 @@ from httomolibgpu.cuda_kernels import load_cuda_module else: load_cuda_module = Mock() +from typing import Union from httomolibgpu.misc.utils import ( __check_variable_type, @@ -48,7 +49,7 @@ def median_filter( data: cp.ndarray, kernel_size: int = 3, - dif: float = 0.0, + dif: Union[float, int] = 0.0, ) -> cp.ndarray: """ Applies 3D median filter to a 3D CuPy array. For more detailed information, see :ref:`method_median_filter`. @@ -57,9 +58,9 @@ def median_filter( ---------- data : cp.ndarray Input CuPy 3D array either float32 or uint16 data type. - kernel_size : int, optional + kernel_size : int The size of the filter's kernel (a "diameter"). - dif : float, optional + dif : float, int Expected difference value between outlier value and the median value of the array, leave equal to 0 for classical median. @@ -80,9 +81,9 @@ def median_filter( data, accepted_type=["float32", "uint16"], methods_name=methods_name ) __check_variable_type( - kernel_size, int, "kernel_size", [3, 5, 7, 9, 11, 13], methods_name + kernel_size, [int], "kernel_size", [3, 5, 7, 9, 11, 13], methods_name ) - __check_variable_type(dif, float, "dif", [], methods_name) + __check_variable_type(dif, [int, float], "dif", [], methods_name) ################################### dz, dy, dx = data.shape @@ -111,7 +112,7 @@ def median_filter( def remove_outlier( - data: cp.ndarray, kernel_size: int = 3, dif: float = 1000 + data: cp.ndarray, kernel_size: int = 3, dif: Union[float, int] = 1000 ) -> cp.ndarray: """Selectively applies 3D median filter to a 3D CuPy array to remove outliers. Also called a dezinger. For more detailed information, see :ref:`method_outlier_removal`. @@ -120,9 +121,9 @@ def remove_outlier( ---------- data : cp.ndarray Input CuPy 3D array either float32 or uint16 data type. - kernel_size : int, optional + kernel_size : int The size of the filter's kernel (a "diameter"). - dif : float, optional + dif : float, int Expected difference value between the outlier value (central voxel) and the median value of the neighbourhood. Lower values lead to median filtering. @@ -141,5 +142,4 @@ def remove_outlier( dif, "dif", positive=True, nonzero=True, methods_name="remove_outlier" ) ################################### - return median_filter(data, kernel_size, dif) diff --git a/httomolibgpu/misc/denoise.py b/httomolibgpu/misc/denoise.py index 4dded10f..1d7f4ee1 100644 --- a/httomolibgpu/misc/denoise.py +++ b/httomolibgpu/misc/denoise.py @@ -20,8 +20,6 @@ # --------------------------------------------------------------------------- """Module for data denoising. For more detailed information see :ref:`data_denoising_module`.""" -import numpy as np - from httomolibgpu import cupywrapper cp = cupywrapper.cp @@ -35,6 +33,13 @@ ROF_TV = Mock() PD_TV = Mock() +from typing import Union + +from httomolibgpu.misc.utils import ( + __check_variable_type, + __check_if_data_3D_array, + __check_if_data_correct_type, +) __all__ = [ "total_variation_ROF", @@ -44,7 +49,7 @@ def total_variation_ROF( data: cp.ndarray, - regularisation_parameter: float = 1e-05, + regularisation_parameter: Union[float, int] = 1e-05, iterations: int = 3000, time_marching_parameter: float = 0.001, gpu_id: int = 0, @@ -55,7 +60,6 @@ def total_variation_ROF( This is a gradient-based algorithm for a smoothed TV term which requires a small time marching parameter and a significant number of iterations. See more in :ref:`method_total_variation_ROF`. - Parameters ---------- data : cp.ndarray @@ -81,6 +85,26 @@ def total_variation_ROF( ValueError If the input array is not float32 data type. """ + ### Data and parameters checks ### + methods_name = "total_variation_ROF" + __check_if_data_3D_array(data, methods_name) + __check_if_data_correct_type( + data, accepted_type=["float32"], methods_name=methods_name + ) + __check_variable_type( + regularisation_parameter, + [float, int], + "regularisation_parameter", + [], + methods_name, + ) + __check_variable_type(iterations, [int], "iterations", [], methods_name) + __check_variable_type( + time_marching_parameter, [float], "time_marching_parameter", [], methods_name + ) + __check_variable_type(gpu_id, [int], "gpu_id", [], methods_name) + __check_variable_type(half_precision, [bool], "half_precision", [], methods_name) + ################################### return ROF_TV_cupy( data, @@ -94,11 +118,11 @@ def total_variation_ROF( def total_variation_PD( data: cp.ndarray, - regularisation_parameter: float = 1e-05, + regularisation_parameter: Union[float, int] = 1e-05, iterations: int = 1000, isotropic: bool = True, nonnegativity: bool = False, - lipschitz_const: float = 8.0, + lipschitz_const: Union[float, int] = 8.0, gpu_id: int = 0, half_precision: bool = False, ) -> cp.ndarray: @@ -109,7 +133,7 @@ def total_variation_PD( ---------- data : cp.ndarray Input CuPy 3D array of float32 data type. - regularisation_parameter : float + regularisation_parameter : float, int Regularisation parameter to control the level of smoothing. Defaults to 1e-05. iterations : int The number of iterations. Defaults to 1000. @@ -117,7 +141,7 @@ def total_variation_PD( Choose between isotropic or anisotropic TV norm. Defaults to isotropic. nonnegativity : bool Enable non-negativity in iterations. Defaults to False. - lipschitz_const : float + lipschitz_const : float,int Lipschitz constant to control convergence. Defaults to 8. gpu_id : int GPU device index to perform processing on. Defaults to 0. @@ -135,6 +159,27 @@ def total_variation_PD( If the input array is not float32 data type. """ + ### Data and parameters checks ### + methods_name = "total_variation_PD" + __check_if_data_3D_array(data, methods_name) + __check_if_data_correct_type( + data, accepted_type=["float32"], methods_name=methods_name + ) + __check_variable_type( + regularisation_parameter, + [float, int], + "regularisation_parameter", + [], + methods_name, + ) + __check_variable_type(iterations, [int], "iterations", [], methods_name) + __check_variable_type(isotropic, [bool], "isotropic", [], methods_name) + __check_variable_type(nonnegativity, [bool], "nonnegativity", [], methods_name) + __check_variable_type(lipschitz_const, [float], "lipschitz_const", [], methods_name) + __check_variable_type(gpu_id, [int], "gpu_id", [], methods_name) + __check_variable_type(half_precision, [bool], "half_precision", [], methods_name) + ################################### + methodTV = 0 if not isotropic: methodTV = 1 diff --git a/httomolibgpu/misc/morph.py b/httomolibgpu/misc/morph.py index c86280f6..3b30901f 100644 --- a/httomolibgpu/misc/morph.py +++ b/httomolibgpu/misc/morph.py @@ -33,7 +33,14 @@ else: interpn = Mock() -from typing import Literal +from typing import Literal, Union + +from httomolibgpu.misc.utils import ( + __check_variable_type, + __check_if_data_3D_array, + __check_if_data_correct_type, + __check_if_positive_nonzero, +) __all__ = [ "sino_360_to_180", @@ -42,7 +49,9 @@ def sino_360_to_180( - data: cp.ndarray, overlap: float = 0, side: Literal["left", "right"] = "left" + data: cp.ndarray, + overlap: Union[float, int] = 1, + side: Literal["left", "right"] = "left", ) -> cp.ndarray: """ Converts 0-360 degrees sinogram to a 0-180 sinogram. @@ -53,7 +62,7 @@ def sino_360_to_180( ---------- data : cp.ndarray Input 3D data. - overlap : float + overlap : float,int Overlapping number of pixels. Floats will be converted to integers. side : string 'left' if rotation center is close to the left of the @@ -63,26 +72,28 @@ def sino_360_to_180( cp.ndarray Output 3D data. """ - if data.ndim != 3: - raise ValueError("only 3D data is supported") - + ### Data and parameters checks ### + methods_name = "sino_360_to_180" + __check_if_data_3D_array(data, methods_name) + __check_if_data_correct_type( + data, accepted_type=["float32", "uint16"], methods_name=methods_name + ) + __check_variable_type(overlap, [int, float], "overlap", [], methods_name) + __check_variable_type(side, [str], "side", ["left", "right"], methods_name) + __check_if_positive_nonzero( + overlap, "overlap", positive=True, nonzero=True, methods_name=methods_name + ) + ################################### + # dx, dy, dz = data.shape overlap = int(np.round(overlap)) if overlap >= dz - 1: raise ValueError("Overlap must be less than size of the horizontal detector") - if overlap <= 0: - raise ValueError("Only positive non-zero overlaps are allowed.") if overlap % 2 != 0: overlap += 1 - if side not in ["left", "right"]: - raise ValueError( - f'The value {side} is invalid, only "left" or "right" strings are accepted' - ) - n = dx // 2 - out = cp.empty((n, dy, 2 * dz - overlap), dtype=data.dtype) if side == "left": @@ -114,7 +125,12 @@ def sino_360_to_180( def data_resampler( - data: cp.ndarray, newshape: list, axis: int = 1, interpolation: str = "linear" + data: cp.ndarray, + newshape: list, + axis: int = 1, + interpolation: Literal[ + "linear", "nearest", "slinear", "cubic", "quintic", "pchip" + ] = "linear", ) -> cp.ndarray: """ Down/Up-resampler of the input data implemented through interpn function. @@ -131,7 +147,8 @@ def data_resampler( axis : int, optional Axis along which the scaling is applied. Defaults to 1. interpolation : str, optional - Selection of interpolation method. Defaults to 'linear'. + Selection of interpolation method. Defaults to 'linear'. Supported "linear", + "nearest", "slinear", "cubic", "quintic" and "pchip". Raises ---------- @@ -141,6 +158,24 @@ def data_resampler( ------- cp.ndarray: Up/Down-scaled 3D cupy array """ + + ### Data and parameters checks ### + methods_name = "data_resampler" + __check_if_data_3D_array(data, methods_name) + __check_if_data_correct_type( + data, accepted_type=["float32", "uint16"], methods_name=methods_name + ) + __check_variable_type(newshape, [list], "newshape", [], methods_name) + __check_variable_type(axis, [int], "axis", [0, 1, 2], methods_name) + __check_variable_type( + interpolation, + [str], + "interpolation", + ["linear", "nearest", "slinear", "cubic", "quintic", "pchip"], + methods_name, + ) + ################################### + # expanded = False # if 2d data is given it is extended into a 3D array along the vertical dimension if data.ndim == 2: @@ -156,20 +191,18 @@ def data_resampler( step_x = M / newshape[0] step_y = Z / newshape[1] scaled_data = cp.empty((N, newshape[0], newshape[1]), dtype=cp.float32) - elif axis == 1: + if axis == 1: xaxis = cp.arange(N) - N / 2 yaxis = cp.arange(Z) - Z / 2 step_x = N / newshape[0] step_y = Z / newshape[1] scaled_data = cp.empty((newshape[0], M, newshape[1]), dtype=cp.float32) - elif axis == 2: + if axis == 2: xaxis = cp.arange(N) - N / 2 yaxis = cp.arange(M) - M / 2 step_x = N / newshape[0] step_y = M / newshape[1] scaled_data = cp.empty((newshape[0], newshape[1], Z), dtype=cp.float32) - else: - raise ValueError("Only 0,1,2 values for axes are supported") points = (xaxis, yaxis) diff --git a/httomolibgpu/misc/rescale.py b/httomolibgpu/misc/rescale.py index e701c8c9..2510a7fe 100644 --- a/httomolibgpu/misc/rescale.py +++ b/httomolibgpu/misc/rescale.py @@ -31,11 +31,17 @@ "rescale_to_int", ] +from httomolibgpu.misc.utils import ( + __check_variable_type, + __check_if_data_3D_array, + __check_if_data_correct_type, +) + def rescale_to_int( data: cp.ndarray, - perc_range_min: float = 0.0, - perc_range_max: float = 100.0, + perc_range_min: Union[float, int] = 0.0, + perc_range_max: Union[float, int] = 100.0, bits: Literal[8, 16, 32] = 8, glob_stats: Optional[Tuple[float, float, float, int]] = None, ) -> cp.ndarray: @@ -47,13 +53,13 @@ def rescale_to_int( ---------- data : cp.ndarray Input data as a cupy array - perc_range_min: float, optional + perc_range_min: float, int The lower cutoff point in the input data, in percent of the data range (defaults to 0). The lower bound is computed as min + perc_range_min/100*(max-min) - perc_range_max: float, optional + perc_range_max: float, int The upper cutoff point in the input data, in percent of the data range (defaults to 100). The upper bound is computed as min + perc_range_max/100*(max-min) - bits: Literal[8, 16, 32], optional + bits: Literal[8, 16, 32], The number of bits in the output integer range (defaults to 8). Allowed values are: - 8 -> uint8 @@ -70,6 +76,24 @@ def rescale_to_int( The original data, clipped to the range specified with the perc_range_min and perc_range_max, and scaled to the full range of the output integer type """ + + ### Data and parameters checks ### + methods_name = "rescale_to_int" + __check_if_data_correct_type( + data, accepted_type=["float32"], methods_name=methods_name + ) + __check_variable_type( + perc_range_min, [int, float], "perc_range_min", [], methods_name + ) + __check_variable_type( + perc_range_max, [int, float], "perc_range_max", [], methods_name + ) + __check_variable_type(bits, [int], "bits", [8, 16, 32], methods_name) + __check_variable_type( + glob_stats, [tuple, type(None)], "glob_stats", [], methods_name + ) + ################################### + # if bits == 8: output_dtype: Union[type[np.uint8], type[np.uint16], type[np.uint32]] = np.uint8 elif bits == 16: diff --git a/httomolibgpu/misc/utils.py b/httomolibgpu/misc/utils.py index 6336e28e..18ff73dc 100644 --- a/httomolibgpu/misc/utils.py +++ b/httomolibgpu/misc/utils.py @@ -148,36 +148,34 @@ def __naninfs_check( def __check_variable_type( variable, - expected_datatype: type, + expected_datatype: list, variable_name: str, literals: list, methods_name: str, ): - if type(variable) != expected_datatype: + if type(variable) not in expected_datatype: # compare variable types and raise error - err_str = f"Variable '{variable_name}' of '{methods_name}' method given as {variable} must have a type {expected_datatype.__name__}." + err_str = f"Variable '{variable_name}' of '{methods_name}' method given as {variable} must have a type {expected_datatype}." raise ValueError(err_str) - if literals: + if literals and variable is not None: # check if variable should be a literal if variable not in literals: err_str = f"Variable '{variable_name}' of '{methods_name}' method given as {variable} must be provided as {literals}." raise ValueError(err_str) -def __check_if_data_3D_array(data: cp.ndarray, methods_name: str): +def __check_if_data_3D_array(data, methods_name: str): if data.ndim == 3: if 0 in data.shape: err_str = f"The length of one of input data dimensions of method '{methods_name}' is equal to zero." raise ValueError(err_str) else: raise ValueError( - f"The input data for method '{methods_name}' must be a 3D CuPy array." + f"The input data for method '{methods_name}' must be a 3D array." ) -def __check_if_data_correct_type( - data: cp.ndarray, accepted_type: list, methods_name: str -): +def __check_if_data_correct_type(data, accepted_type: list, methods_name: str): if data.dtype not in accepted_type: err_str = f"The input data type of method '{methods_name}' must be of {accepted_type}." raise ValueError(err_str) diff --git a/httomolibgpu/prep/alignment.py b/httomolibgpu/prep/alignment.py index fe90dd2f..96dc14c6 100644 --- a/httomolibgpu/prep/alignment.py +++ b/httomolibgpu/prep/alignment.py @@ -20,7 +20,6 @@ # --------------------------------------------------------------------------- """Modules for data correction""" -import numpy as np from httomolibgpu import cupywrapper cp = cupywrapper.cp @@ -33,12 +32,17 @@ else: map_coordinates = Mock() -from typing import Dict, List, Tuple +from typing import Literal, List __all__ = [ "distortion_correction_proj_discorpy", ] +from httomolibgpu.misc.utils import ( + __check_variable_type, + __check_if_data_correct_type, +) + # CuPy implementation of distortion correction from Discorpy # https://github.com/DiamondLightSource/discorpy/blob/67743842b60bf5dd45b21b8460e369d4a5e94d67/discorpy/post/postprocessing.py#L111-L148 @@ -51,7 +55,16 @@ def distortion_correction_proj_discorpy( shift_xy: List[int] = [0, 0], step_xy: List[int] = [1, 1], order: int = 3, - mode: str = "constant", + mode: Literal[ + "constant", + "nearest", + "mirror", + "reflect", + "wrap", + "grid-mirror", + "grid-wrap", + "grid-constant", + ] = "constant", ): """Unwarp a stack of images using a backward model. See :cite:`vo2015radial`. @@ -75,13 +88,40 @@ def distortion_correction_proj_discorpy( mode : str, optional Points outside the boundaries of the input are filled according to the given mode - ('constant', 'nearest', 'mirror', 'reflect', 'wrap', 'grid-mirror', 'grid-wrap', 'grid-constant' or 'opencv'). + ('constant', 'nearest', 'mirror', 'reflect', 'wrap', 'grid-mirror', 'grid-wrap', 'grid-constant'). Returns ------- cp.ndarray 3D array. Distortion-corrected array. """ + ### Data and parameters checks ### + methods_name = "distortion_correction_proj_discorpy" + __check_if_data_correct_type( + data, accepted_type=["float32", "uint16"], methods_name=methods_name + ) + __check_variable_type(metadata_path, [str], "metadata_path", [], methods_name) + __check_variable_type(shift_xy, [list], "shift_xy", [], methods_name) + __check_variable_type(step_xy, [list], "step_xy", [], methods_name) + __check_variable_type(order, [int], "order", [], methods_name) + __check_variable_type( + mode, + [str], + "mode", + [ + "constant", + "nearest", + "mirror", + "reflect", + "wrap", + "grid-mirror", + "grid-wrap", + "grid-constant", + ], + methods_name, + ) + ################################### + # Check if it's a stack of 2D images, or only a single 2D image if len(data.shape) == 2: data = cp.expand_dims(data, axis=0) diff --git a/httomolibgpu/prep/normalize.py b/httomolibgpu/prep/normalize.py index e82e13ae..8a1a3a3c 100644 --- a/httomolibgpu/prep/normalize.py +++ b/httomolibgpu/prep/normalize.py @@ -32,7 +32,13 @@ else: mean = Mock() +from typing import Union from numpy import float32 +from httomolibgpu.misc.utils import ( + __check_variable_type, + __check_if_data_3D_array, + __check_if_data_correct_type, +) __all__ = ["dark_flat_field_correction", "minus_log"] @@ -41,9 +47,9 @@ def dark_flat_field_correction( data: cp.ndarray, flats: cp.ndarray, darks: cp.ndarray, - flats_multiplier: float = 1.0, - darks_multiplier: float = 1.0, - cutoff: float = 10.0, + flats_multiplier: Union[float, int] = 1.0, + darks_multiplier: Union[float, int] = 1.0, + cutoff: Union[float, int] = 10.0, ) -> cp.ndarray: """ Perform dark/flat field correction of raw projection data. @@ -56,11 +62,11 @@ def dark_flat_field_correction( 3D flat field data as a CuPy array. darks : cp.ndarray 3D dark field data as a CuPy array. - flats_multiplier: float + flats_multiplier: float,int A multiplier to apply to flats, can work as an intensity compensation constant. - darks_multiplier: float + darks_multiplier: float,int A multiplier to apply to darks, can work as an intensity compensation constant. - cutoff : float + cutoff : float,int Permitted maximum value for the normalised data. Returns @@ -68,7 +74,28 @@ def dark_flat_field_correction( cp.ndarray dark/flat field corrected 3D tomographic data as a CuPy array. """ - _check_valid_input_normalise(data, flats, darks) + ### Data and parameters checks ### + methods_name = "dark_flat_field_correction" + __check_if_data_3D_array(data, methods_name) + __check_if_data_correct_type( + data, accepted_type=["float32", "uint16"], methods_name=methods_name + ) + __check_if_data_correct_type( + flats, accepted_type=["float32", "uint16"], methods_name=methods_name + ) + __check_if_data_correct_type( + darks, accepted_type=["float32", "uint16"], methods_name=methods_name + ) + __check_variable_type( + flats_multiplier, [int, float], "flats_multiplier", [], methods_name + ) + __check_variable_type( + darks_multiplier, [int, float], "darks_multiplier", [], methods_name + ) + __check_variable_type(cutoff, [int, float], "cutoff", [], methods_name) + ################################### + + _check_valid_input_flats_darks(flats, darks) dark0 = cp.empty(darks.shape[1:], dtype=float32) flat0 = cp.empty(flats.shape[1:], dtype=float32) @@ -124,11 +151,8 @@ def minus_log(data: cp.ndarray) -> cp.ndarray: return -cp.log(data) -def _check_valid_input_normalise(data, flats, darks) -> None: - """Helper function to check the validity of inputs to normalisation functions""" - if data.ndim != 3: - raise ValueError("Input data must be a 3D stack of projections") - +def _check_valid_input_flats_darks(flats, darks) -> None: + """Helper function to check the validity of darks and flats""" if flats.ndim not in (2, 3): raise ValueError("Input flats must be 2D or 3D data only") diff --git a/httomolibgpu/prep/phase.py b/httomolibgpu/prep/phase.py index 41f8468a..9dc53854 100644 --- a/httomolibgpu/prep/phase.py +++ b/httomolibgpu/prep/phase.py @@ -38,10 +38,15 @@ ifft2 = Mock() fftshift = Mock() -from numpy import float32 -from typing import Literal, Optional, Tuple +from typing import Literal, Optional, Tuple, Union import math +from httomolibgpu.misc.utils import ( + __check_variable_type, + __check_if_data_3D_array, + __check_if_data_correct_type, +) + __all__ = [ "paganin_filter", "paganin_filter_savu_legacy", @@ -52,11 +57,11 @@ # different unit standards and also control of the filter driven by 'delta/beta' ratio # as opposed to 'alpha' in the TomoPy's implementation. def paganin_filter( - tomo: cp.ndarray, - pixel_size: float = 1.28, - distance: float = 1.0, - energy: float = 53.0, - ratio_delta_beta: float = 250, + data: cp.ndarray, + pixel_size: Union[float, int] = 1.28, + distance: Union[float, int] = 1.0, + energy: Union[float, int] = 53.0, + ratio_delta_beta: Union[float, int] = 250, calculate_padding_value_method: Literal[ "next_power_of_2", "next_fast_length", "use_pad_x_y" ] = "next_power_of_2", @@ -69,15 +74,15 @@ def paganin_filter( Parameters ---------- - tomo : cp.ndarray + data : cp.ndarray 3D array of f/d corrected tomographic projections. - pixel_size : float + pixel_size : float,int Detector pixel size (resolution) in micron units. - distance : float + distance : float,int Propagation distance of the wavefront from sample to detector in metre units. - energy : float + energy : float,int Beam energy in keV. - ratio_delta_beta : float + ratio_delta_beta : float,int The ratio of delta/beta, where delta is the phase shift and real part of the complex material refractive index and beta is the absorption. calculate_padding_value_method: str Method to calculate the padded size of the input data. Accepted values are 'next_power_of_2', 'next_fast_length' and 'use_pad_x_y`. @@ -91,31 +96,52 @@ def paganin_filter( cp.ndarray The 3D array of Paganin phase-filtered projection images. """ + ### Data and parameters checks ### + methods_name = "paganin_filter" + __check_variable_type(pixel_size, [int, float], "pixel_size", [], methods_name) + __check_variable_type(distance, [int, float], "distance", [], methods_name) + __check_variable_type(energy, [int, float], "energy", [], methods_name) + __check_variable_type( + ratio_delta_beta, [int, float], "ratio_delta_beta", [], methods_name + ) + __check_variable_type( + calculate_padding_value_method, + [str], + "calculate_padding_value_method", + ["next_power_of_2", "next_fast_length", "use_pad_x_y"], + methods_name, + ) + __check_variable_type(pad_x_y, [list, type(None)], "pad_x_y", [], methods_name) + __check_variable_type( + calc_peak_gpu_mem, [bool], "calc_peak_gpu_mem", [], methods_name + ) + ################################### + mem_stack = _DeviceMemStack() if calc_peak_gpu_mem else None - # Check the input data is valid - if not mem_stack and tomo.ndim != 3: - raise ValueError( - f"Invalid number of dimensions in data: {tomo.ndim}," - " please provide a stack of 2D projections." + if not mem_stack: + # Check the input data is valid + __check_if_data_3D_array(data, methods_name) + __check_if_data_correct_type( + data, accepted_type=["float32", "uint16"], methods_name=methods_name ) if mem_stack: - mem_stack.malloc(np.prod(tomo) * np.float32().itemsize) - dz_orig, dy_orig, dx_orig = tomo.shape if not mem_stack else tomo + mem_stack.malloc(np.prod(data) * np.float32().itemsize) + dz_orig, dy_orig, dx_orig = data.shape if not mem_stack else data - padded_tomo, pad_tup = _pad_projections( - tomo, calculate_padding_value_method, pad_x_y, mem_stack + padded_data, pad_tup = _pad_projections( + data, calculate_padding_value_method, pad_x_y, mem_stack ) - dz, dy, dx = padded_tomo.shape if not mem_stack else padded_tomo + dz, dy, dx = padded_data.shape if not mem_stack else padded_data - # 3D FFT of tomo data + # 3D FFT of data data if mem_stack: - mem_stack.malloc(np.prod(padded_tomo) * np.complex64().itemsize) - mem_stack.free(np.prod(padded_tomo) * np.float32().itemsize) - fft_input = cp.empty(padded_tomo, dtype=cp.complex64) + mem_stack.malloc(np.prod(padded_data) * np.complex64().itemsize) + mem_stack.free(np.prod(padded_data) * np.float32().itemsize) + fft_input = cp.empty(padded_data, dtype=cp.complex64) else: - padded_tomo = cp.asarray(padded_tomo, dtype=cp.complex64) - fft_input = padded_tomo + padded_data = cp.asarray(padded_data, dtype=cp.complex64) + fft_input = padded_data fft_plan = get_fft_plan(fft_input, axes=(-2, -1)) if mem_stack: @@ -123,8 +149,8 @@ def paganin_filter( mem_stack.free(fft_plan.work_area.mem.size) else: with fft_plan: - fft_tomo = fft2(padded_tomo, axes=(-2, -1), overwrite_x=True) - del padded_tomo + fft_data = fft2(padded_data, axes=(-2, -1), overwrite_x=True) + del padded_data del fft_input del fft_plan @@ -168,12 +194,12 @@ def paganin_filter( phase_filter = phase_filter / phase_filter.max() # normalisation # Filter projections - fft_tomo *= phase_filter + fft_data *= phase_filter del phase_filter # Apply filter and take inverse FFT ifft_input = ( - fft_tomo if not mem_stack else cp.empty(padded_tomo, dtype=cp.complex64) + fft_data if not mem_stack else cp.empty(padded_data, dtype=cp.complex64) ) ifft_plan = get_fft_plan(ifft_input, axes=(-2, -1)) if mem_stack: @@ -181,8 +207,8 @@ def paganin_filter( mem_stack.free(ifft_plan.work_area.mem.size) else: with ifft_plan: - ifft_filtered_tomo = ifft2(fft_tomo, axes=(-2, -1), overwrite_x=True).real - del fft_tomo + ifft_filtered_data = ifft2(fft_data, axes=(-2, -1), overwrite_x=True).real + del fft_data del ifft_plan del ifft_input @@ -194,28 +220,28 @@ def paganin_filter( ) if mem_stack: - mem_stack.malloc(np.prod(tomo) * np.float32().itemsize) # astype(cp.float32) + mem_stack.malloc(np.prod(data) * np.float32().itemsize) # astype(cp.float32) mem_stack.free( - np.prod(padded_tomo) * np.complex64().itemsize - ) # ifft_filtered_tomo + np.prod(padded_data) * np.complex64().itemsize + ) # ifft_filtered_data mem_stack.malloc( - np.prod(tomo) * np.float32().itemsize - ) # return _log_kernel(tomo) + np.prod(data) * np.float32().itemsize + ) # return _log_kernel(data) return mem_stack.highwater # crop the padded filtered data: - tomo = ifft_filtered_tomo[slc_indices].astype(cp.float32) - del ifft_filtered_tomo + data = ifft_filtered_data[slc_indices].astype(cp.float32) + del ifft_filtered_data # taking the negative log _log_kernel = cp.ElementwiseKernel( - "C tomo", + "C data", "C out", - "out = -log(tomo)", + "out = -log(data)", name="log_kernel", ) - return _log_kernel(tomo) + return _log_kernel(data) def _calculate_alpha(energy, distance_micron, ratio_delta_beta): @@ -296,7 +322,7 @@ def _calculate_pad_size( def _pad_projections( - tomo: cp.ndarray, + data: cp.ndarray, calculate_padding_value_method: Literal[ "next_power_of_2", "next_fast_length", "use_pad_x_y" ], @@ -309,7 +335,7 @@ def _pad_projections( Parameters ---------- - tomo : cp.ndarray + data : cp.ndarray 3d projection data calculate_padding_value_method: str Method to calculate the padded size of the input data. Accepted values are 'next_power_of_2', 'next_fast_length' and 'use_pad_x_y`. @@ -323,21 +349,21 @@ def _pad_projections( ndarray: padded 3d projection data tuple: a tuple with padding dimensions """ - full_shape_tomo = cp.shape(tomo) if not mem_stack else tomo + full_shape_data = cp.shape(data) if not mem_stack else data pad_list = _calculate_pad_size( - full_shape_tomo, calculate_padding_value_method, pad_x_y + full_shape_data, calculate_padding_value_method, pad_x_y ) if mem_stack: - padded_tomo = [ - sh + pad[0] + pad[1] for sh, pad in zip(full_shape_tomo, pad_list) + padded_data = [ + sh + pad[0] + pad[1] for sh, pad in zip(full_shape_data, pad_list) ] - mem_stack.malloc(np.prod(padded_tomo) * np.float32().itemsize) + mem_stack.malloc(np.prod(padded_data) * np.float32().itemsize) else: - padded_tomo = cp.pad(tomo, tuple(pad_list), "edge") + padded_data = cp.pad(data, tuple(pad_list), "edge") - return padded_tomo, tuple(pad_list) + return padded_data, tuple(pad_list) def _wavelength_micron(energy: float) -> float: @@ -370,7 +396,7 @@ def _reciprocal_coord(pixel_size: float, num_grid: int) -> np.ndarray: def paganin_filter_savu_legacy( - tomo: cp.ndarray, + data: cp.ndarray, pixel_size: float = 1.28, distance: float = 1.0, energy: float = 53.0, @@ -384,7 +410,7 @@ def paganin_filter_savu_legacy( Parameters ---------- - tomo : cp.ndarray + data : cp.ndarray 3D array of f/d corrected tomographic projections. pixel_size : float Detector pixel size (resolution) in micron units. @@ -404,7 +430,7 @@ def paganin_filter_savu_legacy( """ return paganin_filter( - tomo, + data, pixel_size, distance, energy, diff --git a/httomolibgpu/prep/stripe.py b/httomolibgpu/prep/stripe.py index 3418b89a..7e3a935c 100644 --- a/httomolibgpu/prep/stripe.py +++ b/httomolibgpu/prep/stripe.py @@ -42,8 +42,13 @@ ifft2 = Mock() fftshift = Mock() +from typing import Optional, Tuple, Union, Literal -from typing import Optional, Tuple, Union +from httomolibgpu.misc.utils import ( + __check_variable_type, + __check_if_data_3D_array, + __check_if_data_correct_type, +) __all__ = [ "remove_stripe_based_sorting", @@ -56,8 +61,8 @@ def remove_stripe_based_sorting( data: Union[cp.ndarray, np.ndarray], - size: int = 11, - dim: int = 1, + size: Optional[int] = 11, + dim: Literal[1, 2] = 1, ) -> Union[cp.ndarray, np.ndarray]: """ Remove full and partial stripe artifacts from sinogram using Nghia Vo's @@ -73,9 +78,9 @@ def remove_stripe_based_sorting( data : ndarray 3D tomographic data as a CuPy or NumPy array. size : int, optional - Window size of the median filter. - dim : {1, 2}, optional - Dimension of the window. + Window size of the median filter. When None, size is estimated based on the input data size + dim : int + Dimension of the window. {1, 2} Returns ------- @@ -83,6 +88,15 @@ def remove_stripe_based_sorting( Corrected 3D tomographic data as a CuPy or NumPy array. """ + ### Data and parameters checks ### + methods_name = "remove_stripe_based_sorting" + __check_if_data_3D_array(data, methods_name) + __check_if_data_correct_type( + data, accepted_type=["float32", "uint16"], methods_name=methods_name + ) + __check_variable_type(size, [int, type(None)], "size", [], methods_name) + __check_variable_type(dim, [int], "dim", [1, 2], methods_name) + ################################### if size is None: if data.shape[2] > 2000: @@ -119,7 +133,7 @@ def _rs_sort(sinogram, size, dim): def remove_stripe_ti( data: Union[cp.ndarray, np.ndarray], - beta: float = 0.1, + beta: Union[float, int] = 0.1, ) -> Union[cp.ndarray, np.ndarray]: """ Removes stripes with the method of V. Titarenko (TomoCuPy implementation). @@ -129,7 +143,7 @@ def remove_stripe_ti( ---------- data : ndarray 3D stack of projections as a CuPy array. - beta : float, optional + beta : float, int filter parameter, lower values increase the filter strength. Default is 0.1. @@ -138,6 +152,14 @@ def remove_stripe_ti( ndarray 3D array of de-striped projections. """ + ### Data and parameters checks ### + methods_name = "remove_stripe_ti" + __check_if_data_3D_array(data, methods_name) + __check_if_data_correct_type( + data, accepted_type=["float32", "uint16"], methods_name=methods_name + ) + __check_variable_type(beta, [int, float], "beta", [], methods_name) + ################################### _, _, dx_orig = data.shape if (dx_orig % 2) != 0: @@ -624,7 +646,7 @@ def _repair_memory_fragmentation_if_needed(fragmentation_threshold: float = 0.2) def remove_stripe_fw( data: cp.ndarray, - sigma: float = 2, + sigma: Union[float, int] = 2, wname: str = "db5", level: Optional[int] = None, calc_peak_gpu_mem: bool = False, @@ -637,13 +659,13 @@ def remove_stripe_fw( ---------- data : ndarray 3D tomographic data as a CuPy array. - sigma : float + sigma : float, int Damping parameter in Fourier space. wname : str - Type of the wavelet filter: select from 'db5', 'db7', 'haar', 'sym5', 'sym16' 'bior4.4'. + Type of the wavelet filter: select from 'db5', 'db7', 'haar', 'sym5', 'sym16' 'bior4.4'. See more in PyWavelets documentation. level : int, optional Number of discrete wavelet transform levels. - calc_peak_gpu_mem: str: + calc_peak_gpu_mem: bool: Parameter to support memory estimation in HTTomo. Irrelevant to the method itself and can be ignored by user. Returns @@ -651,6 +673,17 @@ def remove_stripe_fw( ndarray Stripe-corrected 3D tomographic data as a CuPy array. """ + ### Data and parameters checks ### + methods_name = "remove_stripe_fw" + if not calc_peak_gpu_mem: + __check_if_data_3D_array(data, methods_name) + __check_if_data_correct_type( + data, accepted_type=["float32"], methods_name=methods_name + ) + __check_variable_type(sigma, [int, float], "sigma", [], methods_name) + __check_variable_type(wname, [str], "wname", [], methods_name) + __check_variable_type(level, [int, type(None)], "level", [], methods_name) + ################################### if level is None: if calc_peak_gpu_mem: @@ -783,10 +816,10 @@ def remove_stripe_fw( # *************************************************************************** # def remove_all_stripe( data: cp.ndarray, - snr: float = 3.0, + snr: Union[float, int] = 3.0, la_size: int = 61, sm_size: int = 21, - dim: int = 1, + dim: Literal[1, 2] = 1, ) -> cp.ndarray: """ Remove all types of stripe artifacts from sinogram using Nghia Vo's @@ -796,14 +829,14 @@ def remove_all_stripe( ---------- data : ndarray 3D tomographic data as a CuPy array. - snr : float, optional + snr : float, int Ratio used to locate large stripes. Greater is less sensitive. - la_size : int, optional + la_size : int, Window size of the median filter to remove large stripes. - sm_size : int, optional + sm_size : int, Window size of the median filter to remove small-to-medium stripes. - dim : {1, 2}, optional + dim : {1, 2}, Dimension of the window. Returns @@ -813,6 +846,18 @@ def remove_all_stripe( """ + ### Data and parameters checks ### + methods_name = "remove_all_stripe" + __check_if_data_3D_array(data, methods_name) + __check_if_data_correct_type( + data, accepted_type=["float32"], methods_name=methods_name + ) + __check_variable_type(snr, [int, float], "snr", [], methods_name) + __check_variable_type(la_size, [int], "la_size", [], methods_name) + __check_variable_type(sm_size, [int], "sm_size", [], methods_name) + __check_variable_type(dim, [int], "dim", [1, 2], methods_name) + ################################### + matindex = _create_matindex(data.shape[2], data.shape[0]) for m in range(data.shape[1]): sino = data[:, m, :] @@ -956,22 +1001,22 @@ def raven_filter( data : cp.ndarray Input CuPy 3D array either float32 or uint16 data type. - pad_y : int, optional + pad_y : int Pad the top and bottom of projections. - pad_x : int, optional + pad_x : int Pad the left and right of projections. - pad_method : str, optional + pad_method : str Numpy pad method to use. - uvalue : int, optional + uvalue : int Cut-off frequency. To control the strength of filter, e.g., strong=10, moderate=20, weak=50. - nvalue : int, optional + nvalue : int The shape of filter. - vvalue : int, optional + vvalue : int Number of image-rows around the zero-frequency to be applied the filter. Returns @@ -984,9 +1029,19 @@ def raven_filter( ValueError If the input array is not three dimensional. """ - if data.dtype != cp.float32: - raise ValueError("The input data should be float32 data type") - + ### Data and parameters checks ### + methods_name = "raven_filter" + __check_if_data_3D_array(data, methods_name) + __check_if_data_correct_type( + data, accepted_type=["float32"], methods_name=methods_name + ) + __check_variable_type(pad_y, [int], "pad_y", [], methods_name) + __check_variable_type(pad_x, [int], "pad_x", [], methods_name) + __check_variable_type(pad_method, [str], "pad_method", [], methods_name) + __check_variable_type(uvalue, [int], "uvalue", [], methods_name) + __check_variable_type(nvalue, [int], "nvalue", [], methods_name) + __check_variable_type(vvalue, [int], "vvalue", [], methods_name) + ################################### # Padding of the sinogram data = cp.pad(data, ((pad_y, pad_y), (0, 0), (pad_x, pad_x)), mode=pad_method) diff --git a/httomolibgpu/recon/algorithm.py b/httomolibgpu/recon/algorithm.py index 83c9c6c4..ed074488 100644 --- a/httomolibgpu/recon/algorithm.py +++ b/httomolibgpu/recon/algorithm.py @@ -38,7 +38,14 @@ RecToolsIRCuPy = Mock() from numpy import float32 -from typing import Optional, Type, Union +from typing import Optional, Type, Union, Literal + + +from httomolibgpu.misc.utils import ( + __check_variable_type, + __check_if_data_3D_array, + __check_if_data_correct_type, +) __all__ = [ "FBP2d_astra", @@ -63,7 +70,7 @@ def FBP2d_astra( filter_parameter: Optional[float] = None, filter_d: Optional[float] = None, recon_size: Optional[int] = None, - recon_mask_radius: float = 0.95, + recon_mask_radius: Optional[float] = None, gpu_id: int = 0, ) -> np.ndarray: """ @@ -89,12 +96,11 @@ def FBP2d_astra( filter_d: float, optional D parameter value for 'shepp-logan', 'cosine', 'hamming' and 'hann' filter types. recon_size : int, optional - The [recon_size, recon_size] shape of the reconstructed slice in pixels. - By default (None), the reconstructed size will be the dimension of the horizontal detector. - recon_mask_radius: float + The squared size of the reconstructed slice. By default (None), the reconstructed size will be the dimension of the horizontal detector. + recon_mask_radius: float, optional The radius of the circular mask that applies to the reconstructed slice in order to crop out some undesirable artifacts. The values outside the given diameter will be set to zero. - To implement the cropping one can use the range [0.7-1.0] or set to 2.0 when no cropping required. + To implement the cropping one can use the range [0.7-1.0] or set to None (2.0) when no cropping is needed. gpu_id : int A GPU device index to perform operation on. @@ -103,6 +109,32 @@ def FBP2d_astra( np.ndarray The FBP reconstructed volume as a numpy array. """ + ### Data and parameters checks ### + methods_name = "FBP2d_astra" + __common_data_parameters_check( + data, + angles, + methods_name, + center, + detector_pad, + recon_size, + recon_mask_radius, + gpu_id, + ) + __check_variable_type( + filter_type, + [str], + "filter_type", + ["none", "shepp-logan", "tukey", "gaussian", "blackman", "kaiser"], + methods_name, + ) + __check_variable_type( + filter_parameter, [float, int, type(None)], "filter_parameter", [], methods_name + ) + __check_variable_type( + filter_d, [float, int, type(None)], "filter_d", [], methods_name + ) + ################################### data_shape = np.shape(data) if recon_size is None: @@ -161,12 +193,11 @@ def FBP3d_tomobar( filter_freq_cutoff : float Cutoff frequency parameter for the SINC filter, the lower values may produce better contrast but noisy reconstruction. The filter change will also affect the dynamic range of the reconstructed image. recon_size : int, optional - The [recon_size, recon_size] shape of the reconstructed slice in pixels. - By default (None), the reconstructed size will be the dimension of the horizontal detector. + The squared size of the reconstructed slice. By default (None), the reconstructed size will be the dimension of the horizontal detector. recon_mask_radius: float, optional The radius of the circular mask that applies to the reconstructed slice in order to crop out some undesirable artifacts. The values outside the given diameter will be set to zero. - To implement the cropping one can use the range [0.7-1.0] or set to 2.0 when no cropping required. + To implement the cropping one can use the range [0.7-1.0] or set to None (2.0) when no cropping is needed. gpu_id : int A GPU device index to perform operation on. @@ -175,6 +206,22 @@ def FBP3d_tomobar( cp.ndarray FBP reconstructed volume as a CuPy array. """ + ### Data and parameters checks ### + methods_name = "FBP3d_tomobar" + __common_data_parameters_check( + data, + angles, + methods_name, + center, + detector_pad, + recon_size, + recon_mask_radius, + gpu_id, + ) + __check_variable_type( + filter_freq_cutoff, [float, int], "filter_freq_cutoff", [], methods_name + ) + ################################### RecToolsCP = _instantiate_direct_recon_class( data, angles, center, detector_pad, recon_size, gpu_id @@ -199,11 +246,11 @@ def LPRec3d_tomobar( filter_type: str = "shepp", filter_freq_cutoff: float = 1.0, recon_size: Optional[int] = None, - recon_mask_radius: float = 0.95, - power_of_2_oversampling: Optional[bool] = True, - power_of_2_cropping: Optional[bool] = False, - min_mem_usage_filter: Optional[bool] = True, - min_mem_usage_ifft2: Optional[bool] = True, + recon_mask_radius: Optional[float] = 0.95, + power_of_2_oversampling: bool = True, + power_of_2_cropping: bool = False, + min_mem_usage_filter: bool = True, + min_mem_usage_ifft2: bool = True, ) -> cp.ndarray: """ Fourier direct inversion in 3D on unequally spaced (also called as Log-Polar) grids using @@ -226,18 +273,40 @@ def LPRec3d_tomobar( filter_freq_cutoff : float Cutoff frequency parameter for a filter. recon_size : int, optional - The [recon_size, recon_size] shape of the reconstructed slice in pixels. - By default (None), the reconstructed size will be the dimension of the horizontal detector. - recon_mask_radius: float + The squared size of the reconstructed slice. By default (None), the reconstructed size will be the dimension of the horizontal detector. + recon_mask_radius: float, optional The radius of the circular mask that applies to the reconstructed slice in order to crop out some undesirable artifacts. The values outside the given diameter will be set to zero. - To implement the cropping one can use the range [0.7-1.0] or set to 2.0 when no cropping required. + To implement the cropping one can use the range [0.7-1.0] or set to None (2.0) when no cropping is needed. Returns ------- cp.ndarray The Log-polar Fourier reconstructed volume as a CuPy array. """ + ### Data and parameters checks ### + methods_name = "LPRec3d_tomobar" + __common_data_parameters_check( + data, + angles, + methods_name, + center, + detector_pad, + recon_size, + recon_mask_radius, + gpu_id=0, + ) + __check_variable_type( + filter_type, + [str], + "filter_type", + ["none", "ramp", "shepp", "cosine", "cosine2", "hamming", "hann", "parzen"], + methods_name, + ) + __check_variable_type( + filter_freq_cutoff, [float, int], "filter_freq_cutoff", [], methods_name + ) + ################################### RecToolsCP = _instantiate_direct_recon_class( data, angles, center, detector_pad, recon_size, 0 @@ -265,7 +334,7 @@ def SIRT3d_tomobar( center: Optional[float] = None, detector_pad: Union[bool, int] = False, recon_size: Optional[int] = None, - recon_mask_radius: float = 0.95, + recon_mask_radius: Optional[float] = 0.95, iterations: int = 300, nonnegativity: bool = True, gpu_id: int = 0, @@ -288,10 +357,10 @@ def SIRT3d_tomobar( recon_size : int, optional The [recon_size, recon_size] shape of the reconstructed slice in pixels. By default (None), the reconstructed size will be the dimension of the horizontal detector. - recon_mask_radius: float + recon_mask_radius: float, optional The radius of the circular mask that applies to the reconstructed slice in order to crop out some undesirable artifacts. The values outside the given diameter will be set to zero. - To implement the cropping one can use the range [0.7-1.0] or set to 2.0 when no cropping required. + To implement the cropping one can use the range [0.7-1.0] or set to None (2.0) when no cropping is needed. iterations : int The number of SIRT iterations. nonnegativity : bool @@ -304,6 +373,20 @@ def SIRT3d_tomobar( cp.ndarray The SIRT reconstructed volume as a CuPy array. """ + ### Data and parameters checks ### + methods_name = "SIRT3d_tomobar" + __common_data_parameters_check( + data, + angles, + methods_name, + center, + detector_pad, + recon_size, + recon_mask_radius, + gpu_id, + ) + __common_iterative_basic_parameters_check(methods_name, iterations, nonnegativity) + ################################### RecToolsCP = _instantiate_iterative_recon_class( data, @@ -336,7 +419,7 @@ def CGLS3d_tomobar( center: Optional[float] = None, detector_pad: Union[bool, int] = False, recon_size: Optional[int] = None, - recon_mask_radius: float = 0.95, + recon_mask_radius: Optional[float] = 0.95, iterations: int = 20, nonnegativity: bool = True, gpu_id: int = 0, @@ -357,12 +440,11 @@ def CGLS3d_tomobar( Detector width padding with edge values to remove circle/arc type artifacts in the reconstruction. Set to True to perform an automated padding or specify a certain value as an integer. recon_size : int, optional - The [recon_size, recon_size] shape of the reconstructed slice in pixels. - By default (None), the reconstructed size will be the dimension of the horizontal detector. - recon_mask_radius: float + The squared size of the reconstructed slice. By default (None), the reconstructed size will be the dimension of the horizontal detector. + recon_mask_radius: float, optional The radius of the circular mask that applies to the reconstructed slice in order to crop out some undesirable artifacts. The values outside the given diameter will be set to zero. - To implement the cropping one can use the range [0.7-1.0] or set to 2.0 when no cropping required. + To implement the cropping one can use the range [0.7-1.0] or set to None (2.0) when no cropping is needed. iterations : int The number of CGLS iterations. nonnegativity : bool @@ -375,6 +457,20 @@ def CGLS3d_tomobar( cp.ndarray The CGLS reconstructed volume as a CuPy array. """ + ### Data and parameters checks ### + methods_name = "CGLS3d_tomobar" + __common_data_parameters_check( + data, + angles, + methods_name, + center, + detector_pad, + recon_size, + recon_mask_radius, + gpu_id, + ) + __common_iterative_basic_parameters_check(methods_name, iterations, nonnegativity) + ################################### RecToolsCP = _instantiate_iterative_recon_class( data, angles, center, detector_pad, recon_size, gpu_id, datafidelity="LS" @@ -401,10 +497,10 @@ def FISTA3d_tomobar( center: Optional[float] = None, detector_pad: Union[bool, int] = False, recon_size: Optional[int] = None, - recon_mask_radius: float = 0.95, + recon_mask_radius: Optional[float] = 0.95, iterations: int = 20, subsets_number: int = 6, - regularisation_type: str = "PD_TV", + regularisation_type: Literal["ROF_TV", "PD_TV"] = "PD_TV", regularisation_parameter: float = 0.000001, regularisation_iterations: int = 50, regularisation_half_precision: bool = True, @@ -428,12 +524,11 @@ def FISTA3d_tomobar( Detector width padding with edge values to remove circle/arc type artifacts in the reconstruction. Set to True to perform an automated padding or specify a certain value as an integer. recon_size : int, optional - The [recon_size, recon_size] shape of the reconstructed slice in pixels. - By default (None), the reconstructed size will be the dimension of the horizontal detector. - recon_mask_radius: float + The squared size of the reconstructed slice. By default (None), the reconstructed size will be the dimension of the horizontal detector. + recon_mask_radius: float, optional The radius of the circular mask that applies to the reconstructed slice in order to crop out some undesirable artifacts. The values outside the given diameter will be set to zero. - To implement the cropping one can use the range [0.7-1.0] or set to 2.0 when no cropping required. + To implement the cropping one can use the range [0.7-1.0] or set to None (2.0) when no cropping is needed. iterations : int The number of FISTA algorithm iterations. subsets_number: int @@ -456,6 +551,29 @@ def FISTA3d_tomobar( cp.ndarray The FISTA reconstructed volume as a CuPy array. """ + ### Data and parameters checks ### + methods_name = "FISTA3d_tomobar" + __common_data_parameters_check( + data, + angles, + methods_name, + center, + detector_pad, + recon_size, + recon_mask_radius, + gpu_id, + ) + __common_iterative_basic_parameters_check(methods_name, iterations, nonnegativity) + __common_iterative_parameters_check( + methods_name, + subsets_number, + regularisation_type, + regularisation_parameter, + regularisation_iterations, + regularisation_half_precision, + ) + ################################### + RecToolsCP = _instantiate_iterative_recon_class( data, angles, center, detector_pad, recon_size, gpu_id, datafidelity="LS" ) @@ -493,10 +611,10 @@ def ADMM3d_tomobar( center: Optional[float] = None, detector_pad: Union[bool, int] = False, recon_size: Optional[int] = None, - recon_mask_radius: float = 0.95, + recon_mask_radius: Optional[float] = 0.95, iterations: int = 3, subsets_number: int = 24, - initialisation: Optional[str] = "FBP", + initialisation: Literal["FBP", "CGLS", None] = "FBP", ADMM_rho_const: float = 1.0, ADMM_relax_par: float = 1.7, regularisation_type: str = "PD_TV", @@ -523,12 +641,11 @@ def ADMM3d_tomobar( Detector width padding with edge values to remove circle/arc type artifacts in the reconstruction. Set to True to perform an automated padding or specify a certain value as an integer. recon_size : int, optional - The [recon_size, recon_size] shape of the reconstructed slice in pixels. - By default (None), the reconstructed size will be the dimension of the horizontal detector. - recon_mask_radius: float + The squared size of the reconstructed slice. By default (None), the reconstructed size will be the dimension of the horizontal detector. + recon_mask_radius: float, optional The radius of the circular mask that applies to the reconstructed slice in order to crop out some undesirable artifacts. The values outside the given diameter will be set to zero. - To implement the cropping one can use the range [0.7-1.0] or set to 2.0 when no cropping required. + To implement the cropping one can use the range [0.7-1.0] or set to None (2.0) when no cropping is needed. iterations : int The number of ADMM algorithm iterations. The recommended range is between 3 to 5 with initialisation and more than 10 without. Assuming that the subsets_number is reasonably large (>12). @@ -558,10 +675,37 @@ def ADMM3d_tomobar( cp.ndarray The ADMM reconstructed volume as a CuPy array. """ - if initialisation not in ["FBP", "CGLS", None]: - raise ValueError( - "The acceptable values for initialisation are 'FBP','CGLS' and None" - ) + ### Data and parameters checks ### + methods_name = "ADMM3d_tomobar" + __common_data_parameters_check( + data, + angles, + methods_name, + center, + detector_pad, + recon_size, + recon_mask_radius, + gpu_id, + ) + __common_iterative_basic_parameters_check(methods_name, iterations, nonnegativity) + __common_iterative_parameters_check( + methods_name, + subsets_number, + regularisation_type, + regularisation_parameter, + regularisation_iterations, + regularisation_half_precision, + ) + ################################### + __check_variable_type( + initialisation, + [str, type(None)], + "initialisation", + ["FBP", "CGLS"], + methods_name, + ) + __check_variable_type(ADMM_rho_const, [float], "ADMM_rho_const", [], methods_name) + __check_variable_type(ADMM_relax_par, [float], "ADMM_relax_par", [], methods_name) if initialisation is not None: if detector_pad == True: @@ -777,3 +921,75 @@ def __estimate_detectorHoriz_padding(detX_size) -> int: padded_value_exact = int(np.sqrt(2 * (det_half**2))) - det_half padded_add_margin = padded_value_exact // 2 return padded_value_exact + padded_add_margin + + +def __common_data_parameters_check( + data, + angles, + methods_name, + center, + detector_pad, + recon_size, + recon_mask_radius, + gpu_id, +): + ### Data and parameters checks ### + __check_if_data_3D_array(data, methods_name) + __check_if_data_correct_type( + data, accepted_type=["float32", "uint16"], methods_name=methods_name + ) + if len(angles) != data.shape[0]: + err_str = f"The angles length {len(angles)} is not equal to the input data angles dimension {data.shape[0]} for method '{methods_name}'." + raise ValueError(err_str) + __check_variable_type(center, [float, int, type(None)], "center", [], methods_name) + __check_variable_type(detector_pad, [bool, int], "detector_pad", [], methods_name) + __check_variable_type(recon_size, [int, type(None)], "recon_size", [], methods_name) + __check_variable_type( + recon_mask_radius, + [float, int, type(None)], + "recon_mask_radius", + [], + methods_name, + ) + __check_variable_type(gpu_id, [int], "gpu_id", [], methods_name) + ################################### + + +def __common_iterative_basic_parameters_check(methods_name, iterations, nonnegativity): + __check_variable_type(iterations, [int], "iterations", [], methods_name) + __check_variable_type(nonnegativity, [bool], "nonnegativity", [], methods_name) + + +def __common_iterative_parameters_check( + methods_name, + subsets_number, + regularisation_type, + regularisation_parameter, + regularisation_iterations, + regularisation_half_precision, +): + __check_variable_type(subsets_number, [int], "subsets_number", [], methods_name) + __check_variable_type( + regularisation_type, + [str], + "regularisation_type", + ["ROF_TV", "PD_TV"], + methods_name, + ) + __check_variable_type( + regularisation_parameter, + [float, int], + "regularisation_parameter", + [], + methods_name, + ) + __check_variable_type( + regularisation_iterations, [int], "regularisation_iterations", [], methods_name + ) + __check_variable_type( + regularisation_half_precision, + [bool], + "regularisation_half_precision", + [], + methods_name, + ) diff --git a/httomolibgpu/recon/rotation.py b/httomolibgpu/recon/rotation.py index 09c866a9..1c1e6dbb 100644 --- a/httomolibgpu/recon/rotation.py +++ b/httomolibgpu/recon/rotation.py @@ -110,6 +110,28 @@ def find_center_vo( float32 Rotation axis location with a subpixel precision. """ + ### Data and parameters checks ### + methods_name = "find_center_vo" + __check_if_data_correct_type( + data, accepted_type=["float32", "uint16"], methods_name=methods_name + ) + __check_variable_type(ind, [int, type(None)], "ind", [], methods_name) + __check_variable_type(average_radius, [int], "average_radius", [], methods_name) + __check_variable_type( + cor_initialisation_value, + [float, int, type(None)], + "cor_initialisation_value", + [], + methods_name, + ) + __check_variable_type(smin, [int], "smin", [], methods_name) + __check_variable_type(smax, [int], "smax", [], methods_name) + __check_variable_type(srad, [int, float], "srad", [], methods_name) + __check_variable_type(step, [int, float], "step", [], methods_name) + __check_variable_type(ratio, [int, float], "ratio", [], methods_name) + __check_variable_type(drop, [int], "drop", [], methods_name) + ################################### + # if 2d sinogram is given it is extended into a 3D array along the vertical dimension if data.ndim == 2: data = cp.expand_dims(data, 1) @@ -444,8 +466,7 @@ def find_center_360( win_width : int, optional Window width used for finding the overlap area. side : {None, left, right}, optional - Chose between "left", "right" or "None" which corresponds to fully - automated determination of the side. + Chose between "left", "right" and "None" corresponds to fully automated determination of the side. denoise : bool, optional Apply the Gaussian filter if True. norm : bool, optional @@ -472,14 +493,14 @@ def find_center_360( __check_if_data_correct_type( data, accepted_type=["float32", "uint16"], methods_name=methods_name ) - if ind is not None: - __check_variable_type(ind, int, "ind", [], methods_name) - if side is not None: - __check_variable_type(side, str, "side", ["left", "right"], methods_name) - __check_variable_type(win_width, int, "win_width", [], methods_name) - __check_variable_type(denoise, bool, "denoise", [], methods_name) - __check_variable_type(norm, bool, "norm", [], methods_name) - __check_variable_type(use_overlap, bool, "use_overlap", [], methods_name) + __check_variable_type(ind, [int, type(None)], "ind", [], methods_name) + __check_variable_type( + side, [str, type(None)], "side", ["left", "right"], methods_name + ) + __check_variable_type(win_width, [int], "win_width", [], methods_name) + __check_variable_type(denoise, [bool], "denoise", [], methods_name) + __check_variable_type(norm, [bool], "norm", [], methods_name) + __check_variable_type(use_overlap, [bool], "use_overlap", [], methods_name) ################################### if ind is None: @@ -798,6 +819,20 @@ def find_center_pc( np.float32 Rotation axis location. """ + ### Data and parameters checks ### + methods_name = "find_center_pc" + __check_if_data_correct_type( + proj1, accepted_type=["float32", "uint16"], methods_name=methods_name + ) + __check_if_data_correct_type( + proj2, accepted_type=["float32", "uint16"], methods_name=methods_name + ) + __check_variable_type(tol, [int, float], "tol", [], methods_name) + __check_variable_type( + rotc_guess, [int, float, type(None)], "rotc_guess", [], methods_name + ) + ################################### + # proj1 = data_checker( proj1, infsnans_correct=True, diff --git a/tests/test_recon/test_algorithm.py b/tests/test_recon/test_algorithm.py index 91c85448..2e0045ab 100644 --- a/tests/test_recon/test_algorithm.py +++ b/tests/test_recon/test_algorithm.py @@ -93,7 +93,7 @@ def test_reconstruct_FBP3d_tomobar_2(data, flats, darks, ensure_clean_memory): np.linspace(5.0 * np.pi / 360.0, 180.0 * np.pi / 360.0, data.shape[0]), 15.5, filter_freq_cutoff=1.1, - recon_mask_radius=None, + recon_mask_radius=2.0, ) recon_data = recon_data.get() diff --git a/zenodo-tests/test_prep/test_phase.py b/zenodo-tests/test_prep/test_phase.py index 55cf3f0e..67b68956 100644 --- a/zenodo-tests/test_prep/test_phase.py +++ b/zenodo-tests/test_prep/test_phase.py @@ -18,7 +18,7 @@ ) def test_paganin_filter_i12_dataset3(i12_dataset3, test_case, ensure_clean_memory): padding_method, pad_x_y, test_max, test_min = test_case - inputdata = cp.empty((3, 2050, 2560)) + inputdata = cp.empty((3, 2050, 2560)).astype(cp.float32) inputdata[0, :, :] = i12_dataset3[0] inputdata[1, :, :] = i12_dataset3[1] inputdata[2, :, :] = i12_dataset3[2] diff --git a/zenodo-tests/test_recon/test_rotation.py b/zenodo-tests/test_recon/test_rotation.py index 4a9a5d11..d6c74959 100644 --- a/zenodo-tests/test_recon/test_rotation.py +++ b/zenodo-tests/test_recon/test_rotation.py @@ -111,7 +111,7 @@ def test_center_pc_i12_dataset3(i12_dataset3, ensure_clean_memory): darks = i12_dataset3[3] del i12_dataset3 - projdata = cp.empty((2, np.shape(proj1)[0], np.shape(proj1)[1])) + projdata = cp.empty((2, np.shape(proj1)[0], np.shape(proj1)[1])).astype(cp.float32) projdata[0, :, :] = proj1 projdata[1, :, :] = proj2 From bf08466af67c898e5d03ed7f667e8aef32aa7b5f Mon Sep 17 00:00:00 2001 From: algol Date: Thu, 26 Feb 2026 16:34:55 +0000 Subject: [PATCH 5/7] extension of data types --- httomolibgpu/misc/utils.py | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/httomolibgpu/misc/utils.py b/httomolibgpu/misc/utils.py index 18ff73dc..feb6784c 100644 --- a/httomolibgpu/misc/utils.py +++ b/httomolibgpu/misc/utils.py @@ -20,6 +20,7 @@ # --------------------------------------------------------------------------- """Various utilities for data inspection and correction""" +import numpy as np from httomolibgpu import cupywrapper from typing import Optional @@ -153,14 +154,21 @@ def __check_variable_type( literals: list, methods_name: str, ): + datatype_int_extended = [np.int8, np.int16, np.int32, np.int64] + datatype_float_extended = [np.float16, np.float32, np.float64] + if int in expected_datatype: + expected_datatype.extend(datatype_int_extended) + if float in expected_datatype: + expected_datatype.extend(datatype_float_extended) + if type(variable) not in expected_datatype: # compare variable types and raise error - err_str = f"Variable '{variable_name}' of '{methods_name}' method given as {variable} must have a type {expected_datatype}." + err_str = f"Variable '{variable_name}' of '{methods_name}' method given as '{variable}' (type '{type(variable)}') must have a type {expected_datatype}." raise ValueError(err_str) if literals and variable is not None: # check if variable should be a literal if variable not in literals: - err_str = f"Variable '{variable_name}' of '{methods_name}' method given as {variable} must be provided as {literals}." + err_str = f"Variable '{variable_name}' of '{methods_name}' method given as '{variable}' must be provided as {literals}." raise ValueError(err_str) From c6f72a02cf8e79e458bd24191b6e4d3666ae5301 Mon Sep 17 00:00:00 2001 From: algol Date: Thu, 26 Feb 2026 17:59:19 +0000 Subject: [PATCH 6/7] adding the filter --- httomolibgpu/recon/algorithm.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/httomolibgpu/recon/algorithm.py b/httomolibgpu/recon/algorithm.py index ed074488..c461665f 100644 --- a/httomolibgpu/recon/algorithm.py +++ b/httomolibgpu/recon/algorithm.py @@ -125,7 +125,7 @@ def FBP2d_astra( filter_type, [str], "filter_type", - ["none", "shepp-logan", "tukey", "gaussian", "blackman", "kaiser"], + ["none", "ram-lak", "shepp-logan", "tukey", "gaussian", "blackman", "kaiser"], methods_name, ) __check_variable_type( From 97f9716ed4eb9886c7d042cab9f13b7dc2668231 Mon Sep 17 00:00:00 2001 From: algol Date: Thu, 26 Feb 2026 23:07:52 +0000 Subject: [PATCH 7/7] reverting mask radius to 0.95 --- httomolibgpu/recon/algorithm.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/httomolibgpu/recon/algorithm.py b/httomolibgpu/recon/algorithm.py index c461665f..3a266990 100644 --- a/httomolibgpu/recon/algorithm.py +++ b/httomolibgpu/recon/algorithm.py @@ -70,7 +70,7 @@ def FBP2d_astra( filter_parameter: Optional[float] = None, filter_d: Optional[float] = None, recon_size: Optional[int] = None, - recon_mask_radius: Optional[float] = None, + recon_mask_radius: Optional[float] = 0.95, gpu_id: int = 0, ) -> np.ndarray: """