diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml new file mode 100644 index 000000000..b737c85f3 --- /dev/null +++ b/.pre-commit-config.yaml @@ -0,0 +1,6 @@ +repos: + - repo: https://github.com/psf/black + # Version can be updated by running "pre-commit autoupdate" + rev: 23.10.1 + hooks: + - id: black diff --git a/doc/conf.py b/doc/conf.py index 089ec6613..6305cbfc1 100644 --- a/doc/conf.py +++ b/doc/conf.py @@ -91,11 +91,13 @@ # The name of an image file (relative to this directory) to place at the top # of the sidebar. -html_logo = '_static/hyperspy_logo.png' +html_logo = "_static/hyperspy_logo.png" # -- Options for sphinx_favicon extension ----------------------------------- -favicons = ["hyperspy.ico", ] +favicons = [ + "hyperspy.ico", +] # Check links to API when building documentation nitpicky = False @@ -110,10 +112,10 @@ if Version(numpydoc.__version__) >= Version("1.6.0rc0"): numpydoc_validation_checks = {"all", "ES01", "EX01", "GL02", "GL03", "SA01", "SS06"} -autoclass_content = 'both' +autoclass_content = "both" autodoc_default_options = { - 'show-inheritance': True, + "show-inheritance": True, } toc_object_entries_show_parents = "hide" diff --git a/examples/model_fitting/EELS_curve_fitting.py b/examples/model_fitting/EELS_curve_fitting.py index 9bc1e7de0..d35979758 100644 --- a/examples/model_fitting/EELS_curve_fitting.py +++ b/examples/model_fitting/EELS_curve_fitting.py @@ -14,10 +14,8 @@ s.add_elements(("Mn", "O")) s.set_microscope_parameters( - beam_energy=300, - convergence_angle=24.6, - collection_angle=13.6 - ) + beam_energy=300, convergence_angle=24.6, collection_angle=13.6 +) m = s.create_model(ll=ll) m.enable_fine_structure() diff --git a/examples/model_fitting/plot_residual.py b/examples/model_fitting/plot_residual.py index e499356fb..86354d34a 100644 --- a/examples/model_fitting/plot_residual.py +++ b/examples/model_fitting/plot_residual.py @@ -9,28 +9,28 @@ import numpy as np import hyperspy.api as hs -#%% +# %% # Create a signal: data = np.arange(1000, dtype=np.int64).reshape((10, 100)) s = hs.signals.Signal1D(data) -#%% +# %% # Add noise: s.add_poissonian_noise(random_state=0) -#%% +# %% # Create model: m = s.create_model() line = hs.model.components1D.Expression("a * x + b", name="Affine") m.append(line) -#%% +# %% # Fit for all navigation positions: m.multifit() -#%% +# %% # Plot the fitted model with residual: m.plot(plot_residual=True) -#%% +# %% # sphinx_gallery_thumbnail_number = 2 diff --git a/exspy/__init__.py b/exspy/__init__.py index b6019c657..181d85404 100644 --- a/exspy/__init__.py +++ b/exspy/__init__.py @@ -1,4 +1,3 @@ - from . import components from . import data from . import models @@ -11,7 +10,7 @@ "components", "data", "preferences", - "material" + "material", "models", "signals", ] diff --git a/exspy/_defaults_parser.py b/exspy/_defaults_parser.py index 87879aaad..41b2a7b22 100644 --- a/exspy/_defaults_parser.py +++ b/exspy/_defaults_parser.py @@ -27,7 +27,7 @@ config_path = Path("~/.exspy").expanduser() config_path.mkdir(parents=True, exist_ok=True) -defaults_file = Path(config_path, 'exspyrc') +defaults_file = Path(config_path, "exspyrc") _logger = logging.getLogger(__name__) @@ -37,52 +37,56 @@ def guess_gos_path(): # If DM is installed, use the GOS tables from the default # installation # location in windows - program_files = os.environ['PROGRAMFILES'] - gos = 'Gatan\\DigitalMicrograph\\EELS Reference Data\\H-S GOS Tables' + program_files = os.environ["PROGRAMFILES"] + gos = "Gatan\\DigitalMicrograph\\EELS Reference Data\\H-S GOS Tables" gos_path = Path(program_files, gos) # Else, use the default location in the .hyperspy forlder - if not gos_path.is_dir() and 'PROGRAMFILES(X86)' in os.environ: - program_files = os.environ['PROGRAMFILES(X86)'] + if not gos_path.is_dir() and "PROGRAMFILES(X86)" in os.environ: + program_files = os.environ["PROGRAMFILES(X86)"] gos_path = Path(program_files, gos) if not gos_path.is_dir(): - gos_path = Path(config_path, 'EELS_GOS') + gos_path = Path(config_path, "EELS_GOS") else: - gos_path = Path(config_path, 'EELS_GOS') + gos_path = Path(config_path, "EELS_GOS") return gos_path class EELSConfig(t.HasTraits): eels_gos_files_path = t.Directory( guess_gos_path(), - label='Hartree-Slater GOS directory', - desc='The GOS files are used to create the EELS edge components') + label="Hartree-Slater GOS directory", + desc="The GOS files are used to create the EELS edge components", + ) class EDSConfig(t.HasTraits): - eds_mn_ka = t.CFloat(130., - label='Energy resolution at Mn Ka (eV)', - desc='default value for FWHM of the Mn Ka peak in eV,' - 'This value is used as a first approximation' - 'of the energy resolution of the detector.') + eds_mn_ka = t.CFloat( + 130.0, + label="Energy resolution at Mn Ka (eV)", + desc="default value for FWHM of the Mn Ka peak in eV," + "This value is used as a first approximation" + "of the energy resolution of the detector.", + ) eds_tilt_stage = t.CFloat( - 0., - label='Stage tilt', - desc='default value for the stage tilt in degree.') + 0.0, label="Stage tilt", desc="default value for the stage tilt in degree." + ) eds_detector_azimuth = t.CFloat( - 0., - label='Azimuth angle', - desc='default value for the azimuth angle in degree. If the azimuth' - ' is zero, the detector is perpendicular to the tilt axis.') + 0.0, + label="Azimuth angle", + desc="default value for the azimuth angle in degree. If the azimuth" + " is zero, the detector is perpendicular to the tilt axis.", + ) eds_detector_elevation = t.CFloat( - 35., - label='Elevation angle', - desc='default value for the elevation angle in degree.') + 35.0, + label="Elevation angle", + desc="default value for the elevation angle in degree.", + ) template = { - 'EELS': EELSConfig(), - 'EDS': EDSConfig(), + "EELS": EELSConfig(), + "EDS": EDSConfig(), } @@ -97,9 +101,9 @@ def config2template(template, config): for section, traited_class in template.items(): config_dict = {} for name, value in config.items(section): - if value == 'True': + if value == "True": value = True - elif value == 'False': + elif value == "False": value = False config_dict[name] = value traited_class.trait_set(True, **config_dict) @@ -128,7 +132,7 @@ def config2template(template, config): rewrite = True if not defaults_file_exists or rewrite is True: - _logger.info('Writing the config file') + _logger.info("Writing the config file") with open(defaults_file, "w") as df: config.write(df) @@ -144,10 +148,10 @@ class Preferences(t.HasTraits): def save(self): config = configparser.ConfigParser(allow_no_value=True) template2config(template, config) - config.write(open(defaults_file, 'w')) + config.write(open(defaults_file, "w")) preferences = Preferences( - EELS=template['EELS'], - EDS=template['EDS'], + EELS=template["EELS"], + EDS=template["EDS"], ) diff --git a/exspy/components/__init__.py b/exspy/components/__init__.py index 1d6f55ec2..a7dccc8ff 100644 --- a/exspy/components/__init__.py +++ b/exspy/components/__init__.py @@ -19,7 +19,7 @@ "SEE", "Vignetting", "VolumePlasmonDrude", - ] +] def __dir__(): diff --git a/exspy/components/eels_arctan.py b/exspy/components/eels_arctan.py index bc7924d50..333d4edea 100644 --- a/exspy/components/eels_arctan.py +++ b/exspy/components/eels_arctan.py @@ -21,7 +21,6 @@ class EELSArctan(Expression): - r"""Arctan function component for EELS (with minimum at zero). .. math:: @@ -52,7 +51,7 @@ class EELSArctan(Expression): """ - def __init__(self, A=1., k=1., x0=1., module=["numpy", "scipy"], **kwargs): + def __init__(self, A=1.0, k=1.0, x0=1.0, module=["numpy", "scipy"], **kwargs): # To be able to still read old file versions that contain this argument if "minimum_at_zero" in kwargs: del kwargs["minimum_at_zero"] diff --git a/exspy/components/eels_cl_edge.py b/exspy/components/eels_cl_edge.py index 0fa75a1d2..2d82b788d 100644 --- a/exspy/components/eels_cl_edge.py +++ b/exspy/components/eels_cl_edge.py @@ -17,7 +17,6 @@ # along with exSpy. If not, see . - import functools import logging import math @@ -141,7 +140,9 @@ class EELSCLEdge(Component): fine_structure_components : set, default ``set()`` A set containing components to model the fine structure region of the EELS ionization edge. - """.format(_GOSH_DOI) + """.format( + _GOSH_DOI + ) _fine_structure_smoothing = 0.3 _fine_structure_coeff_free = True @@ -200,11 +201,9 @@ def __init__(self, element_subshell, GOS="gosh", gos_file_path=None): self._whitelist["fine_structure_spline_onset"] = None self._whitelist["fine_structure_spline_active"] = None self._whitelist["_fine_structure_coeff_free"] = None - self.effective_angle.events.value_changed.connect( - self._integrate_GOS, []) + self.effective_angle.events.value_changed.connect(self._integrate_GOS, []) self.onset_energy.events.value_changed.connect(self._integrate_GOS, []) - self.onset_energy.events.value_changed.connect( - self._calculate_knots, []) + self.onset_energy.events.value_changed.connect(self._calculate_knots, []) self._fine_structure_spline_onset = 0 self.events.active_changed.connect(self._set_active_fine_structure_components) @@ -249,25 +248,25 @@ def fine_structure_width(self, arg): @property def E0(self): return self.__E0 - + @E0.setter def E0(self, arg): self.__E0 = arg self._calculate_effective_angle() - + @property def collection_angle(self): return self.__collection_angle - + @collection_angle.setter def collection_angle(self, arg): self.__collection_angle = arg self._calculate_effective_angle() - + @property def convergence_angle(self): return self.__convergence_angle - + @convergence_angle.setter def convergence_angle(self, arg): self.__convergence_angle = arg @@ -306,7 +305,7 @@ def fine_structure_smoothing(self, value): if 0 <= value <= 1: self._fine_structure_smoothing = value self._set_fine_structure_coeff() - if self.fine_structure_active and self.model: + if self.fine_structure_active and self.model: self.model.update_plot() else: raise ValueError("The value must be a number between 0 and 1") @@ -324,7 +323,7 @@ def fine_structure_spline_onset(self, value): if not np.allclose(value, self._fine_structure_spline_onset): self._fine_structure_spline_onset = value self._set_fine_structure_coeff() - if self.fine_structure_active and self.model: + if self.fine_structure_active and self.model: self.model.update_plot() @property @@ -341,16 +340,22 @@ def fine_structure_spline_active(self, value): self._fine_structure_coeff_free = self.fine_structure_coeff.free self.fine_structure_coeff.free = False self._fine_structure_spline_active = value - if self.fine_structure_active and self.model: + if self.fine_structure_active and self.model: self.model.update_plot() def _set_fine_structure_coeff(self): if self.energy_scale is None: return - self.fine_structure_coeff._number_of_elements = int( - round(self.fine_structure_smoothing * - (self.fine_structure_width - self.fine_structure_spline_onset) / - self.energy_scale)) + 4 + self.fine_structure_coeff._number_of_elements = ( + int( + round( + self.fine_structure_smoothing + * (self.fine_structure_width - self.fine_structure_spline_onset) + / self.energy_scale + ) + ) + + 4 + ) self.fine_structure_coeff.bmin = None self.fine_structure_coeff.bmax = None self._calculate_knots() @@ -376,10 +381,10 @@ def fix_fine_structure(self): def free_fine_structure(self): """Frees the parameters of the fine structure - If there are fine structure components, only the + If there are fine structure components, only the parameters that have been previously fixed with ``fix_fine_structure`` will be set free. - + The spline parameters set free only if ``fine_structure_spline_active`` is ``True``. @@ -468,7 +473,8 @@ def function(self, E): if np.any(bifs): cts[bifs] = splev( E[bifs], - (self.__knots, self.fine_structure_coeff.value + (0,) * 4, 3)) + (self.__knots, self.fine_structure_coeff.value + (0,) * 4, 3), + ) # The cross-section is set to 0 in the fine structure region itab = (E < Emax) & (E >= ifsx2) else: @@ -530,6 +536,8 @@ def get_fine_structure_as_signal1D(self): def as_dictionary(self, fullcopy=True): dic = super().as_dictionary(fullcopy=fullcopy) - dic["fine_structure_components"] = [t.name for t in self.fine_structure_components] + dic["fine_structure_components"] = [ + t.name for t in self.fine_structure_components + ] dic["_whitelist"]["fine_structure_components"] = "" return dic diff --git a/exspy/components/eels_double_power_law.py b/exspy/components/eels_double_power_law.py index 93b440237..dadf12f2f 100644 --- a/exspy/components/eels_double_power_law.py +++ b/exspy/components/eels_double_power_law.py @@ -24,7 +24,6 @@ class DoublePowerLaw(Expression): - r"""Double power law component for EELS spectra. .. math:: @@ -63,9 +62,18 @@ class DoublePowerLaw(Expression): For x <= left_cutoff, the function returns 0. Default value is 0.0. """ - def __init__(self, A=1e-5, r=3., origin=0., shift=20., ratio=1., - left_cutoff=0.0, module="numexpr", compute_gradients=False, - **kwargs): + def __init__( + self, + A=1e-5, + r=3.0, + origin=0.0, + shift=20.0, + ratio=1.0, + left_cutoff=0.0, + module="numexpr", + compute_gradients=False, + **kwargs + ): super().__init__( expression="where(x > left_cutoff, \ A * (ratio * (x - origin - shift) ** -r \ @@ -81,24 +89,22 @@ def __init__(self, A=1e-5, r=3., origin=0., shift=20., ratio=1., autodoc=False, module=module, compute_gradients=compute_gradients, - linear_parameter_list=['A'], + linear_parameter_list=["A"], check_parameter_linearity=False, **kwargs, ) - + # Boundaries - self.A.bmin = 0. + self.A.bmin = 0.0 self.A.bmax = None - self.r.bmin = 1. - self.r.bmax = 5. + self.r.bmin = 1.0 + self.r.bmax = 5.0 self.isbackground = True self.convolved = True def function_nd(self, axis): - """%s - - """ + """%s""" return super().function_nd(axis) function_nd.__doc__ %= FUNCTION_ND_DOCSTRING @@ -108,25 +114,45 @@ def grad_A(self, x): return self.function(x) / self.A.value def grad_r(self, x): - return np.where(x > self.left_cutoff.value, -self.A.value * - self.ratio.value * (x - self.origin.value - - self.shift.value) ** (-self.r.value) * - np.log(x - self.origin.value - self.shift.value) - - self.A.value * (x - self.origin.value) ** - (-self.r.value) * np.log(x - self.origin.value), 0) + return np.where( + x > self.left_cutoff.value, + -self.A.value + * self.ratio.value + * (x - self.origin.value - self.shift.value) ** (-self.r.value) + * np.log(x - self.origin.value - self.shift.value) + - self.A.value + * (x - self.origin.value) ** (-self.r.value) + * np.log(x - self.origin.value), + 0, + ) def grad_origin(self, x): - return np.where(x > self.left_cutoff.value, self.A.value * self.r.value - * self.ratio.value * (x - self.origin.value - self.shift.value) - ** (-self.r.value - 1) + self.A.value * self.r.value - * (x - self.origin.value) ** (-self.r.value - 1), 0) + return np.where( + x > self.left_cutoff.value, + self.A.value + * self.r.value + * self.ratio.value + * (x - self.origin.value - self.shift.value) ** (-self.r.value - 1) + + self.A.value + * self.r.value + * (x - self.origin.value) ** (-self.r.value - 1), + 0, + ) def grad_shift(self, x): - return np.where(x > self.left_cutoff.value, self.A.value * self.r.value - * self.ratio.value * (x - self.origin.value - - self.shift.value) ** (-self.r.value - 1), 0) + return np.where( + x > self.left_cutoff.value, + self.A.value + * self.r.value + * self.ratio.value + * (x - self.origin.value - self.shift.value) ** (-self.r.value - 1), + 0, + ) def grad_ratio(self, x): - return np.where(x > self.left_cutoff.value, self.A.value * - (x - self.origin.value - self.shift.value) ** - (-self.r.value), 0) + return np.where( + x > self.left_cutoff.value, + self.A.value + * (x - self.origin.value - self.shift.value) ** (-self.r.value), + 0, + ) diff --git a/exspy/components/eels_vignetting.py b/exspy/components/eels_vignetting.py index 718a74b3f..b5b7c9cd3 100644 --- a/exspy/components/eels_vignetting.py +++ b/exspy/components/eels_vignetting.py @@ -30,24 +30,28 @@ class Vignetting(Component): """ def __init__(self): - Component.__init__(self, - ['optical_center', - 'height', - 'period', - 'left_slope', - 'right_slope', - 'left', - 'right', - 'sigma']) + Component.__init__( + self, + [ + "optical_center", + "height", + "period", + "left_slope", + "right_slope", + "left", + "right", + "sigma", + ], + ) self.left.value = np.nan self.right.value = np.nan self.side_vignetting = False self.fix_side_vignetting() self.gaussian = Gaussian() self.gaussian.centre.free, self.gaussian.A.free = False, False - self.sigma.value = 1. - self.gaussian.A.value = 1. - self.period.value = 1. + self.sigma.value = 1.0 + self.gaussian.A.value = 1.0 + self.period.value = 1.0 self.extension_nch = 100 self._position = self.optical_center @@ -62,20 +66,20 @@ def function(self, x): r = self.right.value ex = self.extension_nch if self.side_vignetting is True: - x = x.tolist() - x = list(range(-ex, 0)) + x + \ - list(range(int(x[-1]) + 1, int(x[-1]) + ex + 1)) + x = ( + list(range(-ex, 0)) + + x + + list(range(int(x[-1]) + 1, int(x[-1]) + ex + 1)) + ) x = np.array(x) v1 = A * np.cos((x - x0) / (2 * np.pi * period)) ** 4 - v2 = np.where(x < l, - 1. - (l - x) * la, - np.where(x < r, - 1., - 1. - (x - r) * ra)) + v2 = np.where( + x < l, 1.0 - (l - x) * la, np.where(x < r, 1.0, 1.0 - (x - r) * ra) + ) self.gaussian.sigma.value = sigma self.gaussian.origin.value = (x[-1] + x[0]) / 2 - result = np.convolve(self.gaussian.function(x), v1 * v2, 'same') + result = np.convolve(self.gaussian.function(x), v1 * v2, "same") return result[ex:-ex] else: return A * np.cos((x - x0) / (2 * np.pi * period)) ** 4 diff --git a/exspy/components/pes_core_line_shape.py b/exspy/components/pes_core_line_shape.py index a11c89b55..4f4b62e18 100644 --- a/exspy/components/pes_core_line_shape.py +++ b/exspy/components/pes_core_line_shape.py @@ -33,11 +33,10 @@ def _calculate_shirley_background(values): class PESCoreLineShape(Component): - """ - """ + """ """ - def __init__(self, A=1., FWHM=1., origin=0., ab=0.0, shirley=0.0): - Component.__init__(self, ['A', 'FWHM', 'origin', 'ab', 'shirley']) + def __init__(self, A=1.0, FWHM=1.0, origin=0.0, ab=0.0, shirley=0.0): + Component.__init__(self, ["A", "FWHM", "origin", "ab", "shirley"]) self.ab.value = 0 self.ab.free = False self.A.value = A @@ -46,7 +45,7 @@ def __init__(self, A=1., FWHM=1., origin=0., ab=0.0, shirley=0.0): self._position = self.origin # Boundaries - self.A.bmin = 0. + self.A.bmin = 0.0 self.A.bmax = None self.FWHM.bmin = None self.FWHM.bmax = None @@ -62,7 +61,7 @@ def __init__(self, A=1., FWHM=1., origin=0., ab=0.0, shirley=0.0): # Options self.Shirley = False - self._whitelist['Shirley'] = None + self._whitelist["Shirley"] = None @property def Shirley(self): @@ -86,23 +85,24 @@ def _function(self, x, A, origin, FWHM, ab, shirley): return f def function(self, x): - return self._function(x, self.A.value, - self.origin.value, - self.FWHM.value, - self.ab.value, - self.shirley.value) + return self._function( + x, + self.A.value, + self.origin.value, + self.FWHM.value, + self.ab.value, + self.shirley.value, + ) def function_nd(self, axis): - """%s - - """ + """%s""" if self._is_navigation_multidimensional: x = axis[np.newaxis, :] - A = self.A.map['values'][..., np.newaxis] - origin = self.origin.map['values'][..., np.newaxis] - FWHM = self.FWHM.map['values'][..., np.newaxis] - ab = self.ab.map['values'][..., np.newaxis] - shirley = self.shirley.map['values'][..., np.newaxis] + A = self.A.map["values"][..., np.newaxis] + origin = self.origin.map["values"][..., np.newaxis] + FWHM = self.FWHM.map["values"][..., np.newaxis] + ab = self.ab.map["values"][..., np.newaxis] + shirley = self.shirley.map["values"][..., np.newaxis] return self._function(x, A, origin, FWHM, ab, shirley) else: return self.function(axis) @@ -117,16 +117,26 @@ def grad_FWHM(self, x): a1 = self.origin.value a2 = self.FWHM.value a3 = self.ab.value - return (2 * math.log(2) * a0 * (x + a3 - a1) ** 2 * - np.exp(-(math.log(2) * (x + a3 - a1) ** 2) / a2 ** 2)) / a2 ** 3 + return ( + 2 + * math.log(2) + * a0 + * (x + a3 - a1) ** 2 + * np.exp(-(math.log(2) * (x + a3 - a1) ** 2) / a2**2) + ) / a2**3 def grad_origin(self, x): a0 = self.A.value a1 = self.origin.value a2 = self.FWHM.value a3 = self.ab.value - return (2 * math.log(2) * a0 * (x + a3 - a1) * - np.exp(-(math.log(2) * (x + a3 - a1) ** 2) / a2 ** 2)) / a2 ** 2 + return ( + 2 + * math.log(2) + * a0 + * (x + a3 - a1) + * np.exp(-(math.log(2) * (x + a3 - a1) ** 2) / a2**2) + ) / a2**2 def grad_ab(self, x): return -self.grad_origin(x) diff --git a/exspy/components/pes_see.py b/exspy/components/pes_see.py index 698638d44..3e65f48db 100644 --- a/exspy/components/pes_see.py +++ b/exspy/components/pes_see.py @@ -26,7 +26,6 @@ class SEE(Expression): - r"""Secondary electron emission component for Photoemission Spectroscopy. .. math:: @@ -62,11 +61,13 @@ class SEE(Expression): """ - def __init__(self, A=1., Phi=1., B=0., module="numexpr", - compute_gradients=False, **kwargs): - if kwargs.pop('sigma', False): - _logger.warning('The `sigma` parameter was broken and it has been ' - 'removed.') + def __init__( + self, A=1.0, Phi=1.0, B=0.0, module="numexpr", compute_gradients=False, **kwargs + ): + if kwargs.pop("sigma", False): + _logger.warning( + "The `sigma` parameter was broken and it has been " "removed." + ) super().__init__( expression="where(x > Phi, A * (x - Phi) / (x - Phi + B) ** 4, 0)", @@ -78,34 +79,39 @@ def __init__(self, A=1., Phi=1., B=0., module="numexpr", module=module, autodoc=False, compute_gradients=compute_gradients, - linear_parameter_list=['A'], + linear_parameter_list=["A"], check_parameter_linearity=False, **kwargs, ) # Boundaries - self.A.bmin = 0. + self.A.bmin = 0.0 self.A.bmax = None self.convolved = True def grad_A(self, x): - """ - """ - return np.where(x > self.Phi.value, (x - self.Phi.value) / - (x - self.Phi.value + self.B.value) ** 4, 0) + """ """ + return np.where( + x > self.Phi.value, + (x - self.Phi.value) / (x - self.Phi.value + self.B.value) ** 4, + 0, + ) def grad_Phi(self, x): - """ - """ + """ """ return np.where( - x > self.Phi.value, - (4 * (x - self.Phi.value) * self.A.value) / - (self.B.value + x - self.Phi.value) ** 5 - - self.A.value / (self.B.value + x - self.Phi.value) ** 4, 0) + x > self.Phi.value, + (4 * (x - self.Phi.value) * self.A.value) + / (self.B.value + x - self.Phi.value) ** 5 + - self.A.value / (self.B.value + x - self.Phi.value) ** 4, + 0, + ) def grad_B(self, x): return np.where( - x > self.Phi.value, - -(4 * (x - self.Phi.value) * self.A.value) / - (self.B.value + x - self.Phi.value) ** 5, 0) + x > self.Phi.value, + -(4 * (x - self.Phi.value) * self.A.value) + / (self.B.value + x - self.Phi.value) ** 5, + 0, + ) diff --git a/exspy/components/pes_voigt.py b/exspy/components/pes_voigt.py index 9262ae3f8..de17fe2e2 100644 --- a/exspy/components/pes_voigt.py +++ b/exspy/components/pes_voigt.py @@ -75,6 +75,7 @@ def voigt(x, FWHM=1, gamma=1, center=0, scale=1): """ # wofz function = w(z) = Fad[d][e][y]eva function = exp(-z**2)erfc(-iz) from scipy.special import wofz + sigma = FWHM / 2.3548200450309493 z = (np.asarray(x) - center + 1j * gamma) / (sigma * math.sqrt(2)) V = wofz(z) / (math.sqrt(2 * np.pi) * sigma) @@ -82,7 +83,6 @@ def voigt(x, FWHM=1, gamma=1, center=0, scale=1): class PESVoigt(Component): - r"""Voigt component for photoemission spectroscopy data analysis. Voigt profile component with support for shirley background, @@ -124,15 +124,19 @@ class PESVoigt(Component): """ def __init__(self): - Component.__init__(self, ( - 'area', - 'centre', - 'FWHM', - 'gamma', - 'resolution', - 'shirley_background', - 'non_isochromaticity', - 'transmission_function')) + Component.__init__( + self, + ( + "area", + "centre", + "FWHM", + "gamma", + "resolution", + "shirley_background", + "non_isochromaticity", + "transmission_function", + ), + ) self._position = self.centre self.FWHM.value = 1 self.gamma.value = 0 @@ -159,16 +163,20 @@ def function(self, x): if self.resolution.value == 0: FWHM = self.FWHM.value else: - FWHM = math.sqrt(self.FWHM.value ** 2 + self.resolution.value ** 2) + FWHM = math.sqrt(self.FWHM.value**2 + self.resolution.value**2) gamma = self.gamma.value k = self.shirley_background.value - f = voigt(x, - FWHM=FWHM, gamma=gamma, center=centre - ab, scale=area) + f = voigt(x, FWHM=FWHM, gamma=gamma, center=centre - ab, scale=area) if self.spin_orbit_splitting: ratio = self.spin_orbit_branching_ratio shift = self.spin_orbit_splitting_energy - f2 = voigt(x, FWHM=FWHM, gamma=gamma, - center=centre - ab - shift, scale=area * ratio) + f2 = voigt( + x, + FWHM=FWHM, + gamma=gamma, + center=centre - ab - shift, + scale=area * ratio, + ) f += f2 if self.shirley_background.active: cf = np.cumsum(f) @@ -220,8 +228,9 @@ def estimate_parameters(self, signal, E1, E2, only_current=False): """ super()._estimate_parameters(signal) axis = signal.axes_manager.signal_axes[0] - centre, height, sigma = _estimate_gaussian_parameters(signal, E1, E2, - only_current) + centre, height, sigma = _estimate_gaussian_parameters( + signal, E1, E2, only_current + ) scaling_factor = _get_scaling_factor(signal, axis, centre) if only_current is True: @@ -234,13 +243,13 @@ def estimate_parameters(self, signal, E1, E2, only_current=False): else: if self.area.map is None: self._create_arrays() - self.area.map['values'][:] = height * sigma * sqrt2pi + self.area.map["values"][:] = height * sigma * sqrt2pi if axis.is_binned: - self.area.map['values'][:] /= scaling_factor - self.area.map['is_set'][:] = True - self.FWHM.map['values'][:] = sigma * sigma2fwhm - self.FWHM.map['is_set'][:] = True - self.centre.map['values'][:] = centre - self.centre.map['is_set'][:] = True + self.area.map["values"][:] /= scaling_factor + self.area.map["is_set"][:] = True + self.FWHM.map["values"][:] = sigma * sigma2fwhm + self.FWHM.map["is_set"][:] = True + self.centre.map["values"][:] = centre + self.centre.map["is_set"][:] = True self.fetch_stored_values() return True diff --git a/exspy/components/volume_plasmon_drude.py b/exspy/components/volume_plasmon_drude.py index ef488e916..f39d4dda4 100644 --- a/exspy/components/volume_plasmon_drude.py +++ b/exspy/components/volume_plasmon_drude.py @@ -22,7 +22,6 @@ class VolumePlasmonDrude(hs.model.components1D.Expression): - r""" Drude volume plasmon energy loss function component, the energy loss function is defined as: @@ -55,8 +54,15 @@ class VolumePlasmonDrude(hs.model.components1D.Expression): for details, including original equations. """ - def __init__(self, intensity=1., plasmon_energy=15., fwhm=1.5, - module="numexpr", compute_gradients=False, **kwargs): + def __init__( + self, + intensity=1.0, + plasmon_energy=15.0, + fwhm=1.5, + module="numexpr", + compute_gradients=False, + **kwargs + ): super().__init__( expression="where(x > 0, intensity * (pe2 * x * fwhm) \ / ((x ** 2 - pe2) ** 2 + (x * fwhm) ** 2), 0); \ @@ -69,7 +75,7 @@ def __init__(self, intensity=1., plasmon_energy=15., fwhm=1.5, module=module, autodoc=False, compute_gradients=compute_gradients, - linear_parameter_list=['intensity'], + linear_parameter_list=["intensity"], check_parameter_linearity=False, **kwargs, ) @@ -82,11 +88,22 @@ def grad_plasmon_energy(self, x): return np.where( x > 0, - 2 * x * fwhm * plasmon_energy * intensity * ( - (x ** 4 + (x * fwhm) ** 2 - plasmon_energy ** 4) / - (x ** 4 + x ** 2 * (fwhm ** 2 - 2 * plasmon_energy ** 2) + - plasmon_energy ** 4) ** 2), - 0) + 2 + * x + * fwhm + * plasmon_energy + * intensity + * ( + (x**4 + (x * fwhm) ** 2 - plasmon_energy**4) + / ( + x**4 + + x**2 * (fwhm**2 - 2 * plasmon_energy**2) + + plasmon_energy**4 + ) + ** 2 + ), + 0, + ) # Partial derivative with respect to the plasmon linewidth delta_E_p def grad_fwhm(self, x): @@ -96,12 +113,24 @@ def grad_fwhm(self, x): return np.where( x > 0, - x * plasmon_energy * intensity * ( - (x ** 4 - x ** 2 * (2 * plasmon_energy ** 2 + fwhm ** 2) + - plasmon_energy ** 4) / - (x ** 4 + x ** 2 * (fwhm ** 2 - 2 * plasmon_energy ** 2) + - plasmon_energy ** 4) ** 2), - 0) + x + * plasmon_energy + * intensity + * ( + ( + x**4 + - x**2 * (2 * plasmon_energy**2 + fwhm**2) + + plasmon_energy**4 + ) + / ( + x**4 + + x**2 * (fwhm**2 - 2 * plasmon_energy**2) + + plasmon_energy**4 + ) + ** 2 + ), + 0, + ) def grad_intensity(self, x): return self.function(x) / self.intensity.value diff --git a/exspy/conftest.py b/exspy/conftest.py index b2d114d4b..12e1ae351 100644 --- a/exspy/conftest.py +++ b/exspy/conftest.py @@ -21,6 +21,7 @@ # Capture error when toolkit is already previously set which typically # occurs when building the doc locally from traits.etsconfig.api import ETSConfig + ETSConfig.toolkit = "null" except ValueError: # in case ETSConfig.toolkit was already set previously. @@ -38,9 +39,9 @@ @pytest.fixture(autouse=True) def add_np(doctest_namespace): - doctest_namespace['np'] = np - doctest_namespace['plt'] = plt - doctest_namespace['hs'] = hs + doctest_namespace["np"] = np + doctest_namespace["plt"] = plt + doctest_namespace["hs"] = hs @pytest.fixture @@ -51,8 +52,10 @@ def pdb_cmdopt(request): def setup_module(mod, pdb_cmdopt): if pdb_cmdopt: import dask + dask.set_options(get=dask.local.get_sync) + from matplotlib.testing.conftest import mpl_test_settings @@ -64,5 +67,5 @@ def pytest_configure(config): config.addinivalue_line( "markers", "mpl_image_compare: dummy marker registration to allow running " - "without the pytest-mpl plugin." + "without the pytest-mpl plugin.", ) diff --git a/exspy/data/__init__.py b/exspy/data/__init__.py index b50d0cc07..da919842d 100644 --- a/exspy/data/__init__.py +++ b/exspy/data/__init__.py @@ -43,8 +43,7 @@ def __dir__(): return sorted(__all__) -_ADD_NOISE_DOCSTRING = \ -"""add_noise : bool +_ADD_NOISE_DOCSTRING = """add_noise : bool If True, add noise to the signal. See note to seed the noise to generate reproducible noise. random_state : None or int or RandomState instance, default None @@ -52,8 +51,7 @@ def __dir__(): """ -_RETURNS_DOCSTRING = \ -"""Returns +_RETURNS_DOCSTRING = """Returns ------- :py:class:`~hyperspy._signals.eels.EELSSpectrum` """ @@ -99,7 +97,7 @@ def EDS_TEM_FePt_nanoparticles(): return hs.load(file_path, mode="r", reader="hspy") -def EELS_low_loss(add_noise=True, random_state=None, navigation_shape=(10, )): +def EELS_low_loss(add_noise=True, random_state=None, navigation_shape=(10,)): """ Get an artificial low loss electron energy loss spectrum. @@ -133,18 +131,18 @@ def EELS_low_loss(add_noise=True, random_state=None, navigation_shape=(10, )): s = hs.signals.Signal1D(np.ones(navigation_shape[::-1] + x.shape)) plasmon._axes_manager = s.axes_manager plasmon._create_arrays() - plasmon.centre.map['values'][:] = random_state.uniform( + plasmon.centre.map["values"][:] = random_state.uniform( low=14.5, high=15.5, size=navigation_shape[::-1] - ) - plasmon.centre.map['is_set'][:] = True - plasmon.A.map['values'][:] = random_state.uniform( + ) + plasmon.centre.map["is_set"][:] = True + plasmon.A.map["values"][:] = random_state.uniform( low=50, high=70, size=navigation_shape[::-1] - ) - plasmon.A.map['is_set'][:] = True - plasmon.sigma.map['values'][:] = random_state.uniform( + ) + plasmon.A.map["is_set"][:] = True + plasmon.sigma.map["values"][:] = random_state.uniform( low=1.8, high=2.2, size=navigation_shape[::-1] - ) - plasmon.sigma.map['is_set'][:] = True + ) + plasmon.sigma.map["is_set"][:] = True data = np.broadcast_to(zero_loss.function(x), navigation_shape[::-1] + x.shape) data = data + plasmon.function_nd(x) @@ -153,14 +151,16 @@ def EELS_low_loss(add_noise=True, random_state=None, navigation_shape=(10, )): data = data + random_state.uniform(size=len(x)) from exspy.signals import EELSSpectrum + s = EELSSpectrum(data) s.axes_manager[-1].offset = x[0] s.axes_manager[-1].scale = x[1] - x[0] - s.metadata.General.title = 'Artifical low loss EEL spectrum' - s.axes_manager[-1].name = 'Electron energy loss' - s.axes_manager[-1].units = 'eV' + s.metadata.General.title = "Artifical low loss EEL spectrum" + s.axes_manager[-1].name = "Electron energy loss" + s.axes_manager[-1].units = "eV" s.set_microscope_parameters( - beam_energy=200, convergence_angle=26, collection_angle=20) + beam_energy=200, convergence_angle=26, collection_angle=20 + ) return s @@ -169,11 +169,8 @@ def EELS_low_loss(add_noise=True, random_state=None, navigation_shape=(10, )): def EELS_MnFe( - add_powerlaw=True, - add_noise=True, - random_state=None, - navigation_shape=(10, ) - ): + add_powerlaw=True, add_noise=True, random_state=None, navigation_shape=(10,) +): """ Get an artificial core loss electron energy loss spectrum. @@ -222,7 +219,7 @@ def EELS_MnFe( Fe_l3 = hs.model.components1D.Gaussian(A=150, centre=708, sigma=4) Fe_l2 = hs.model.components1D.Gaussian(A=50, centre=730, sigma=3) - if len(navigation_shape) == 0 or navigation_shape == (1, ): + if len(navigation_shape) == 0 or navigation_shape == (1,): Mn = 0.5 Fe = 0.5 else: @@ -230,8 +227,8 @@ def EELS_MnFe( Fe = np.array([0, 0, 0.25, 0.5, 1]) Mn_interpolate = interpolate.interp1d(np.arange(0, len(Mn)), Mn) Fe_interpolate = interpolate.interp1d(np.arange(0, len(Fe)), Fe) - Mn = Mn_interpolate(np.linspace(0, len(Mn)-1, navigation_shape[0])) - Fe = Fe_interpolate(np.linspace(0, len(Fe)-1, navigation_shape[0])) + Mn = Mn_interpolate(np.linspace(0, len(Mn) - 1, navigation_shape[0])) + Fe = Fe_interpolate(np.linspace(0, len(Fe) - 1, navigation_shape[0])) def get_data(component, element_distribution): data_ = np.broadcast_to(component.function(x), navigation_shape + x.shape) @@ -244,26 +241,31 @@ def get_data(component, element_distribution): Fe_l3_data = get_data(Fe_l3, Fe) Fe_l2_data = get_data(Fe_l2, Fe) - data = arctan_Mn_data + Mn_l3_data + Mn_l2_data + arctan_Fe_data + Fe_l3_data + Fe_l2_data + data = ( + arctan_Mn_data + + Mn_l3_data + + Mn_l2_data + + arctan_Fe_data + + Fe_l3_data + + Fe_l2_data + ) if add_noise: data += random_state.uniform(size=navigation_shape + x.shape) * 0.7 if add_powerlaw: powerlaw = hs.model.components1D.PowerLaw(A=10e8, r=2.9, origin=0) - data = data + np.broadcast_to( - powerlaw.function(x), navigation_shape + x.shape - ) + data = data + np.broadcast_to(powerlaw.function(x), navigation_shape + x.shape) s = exspy.signals.EELSSpectrum(data) s.axes_manager[-1].offset = x[0] s.axes_manager[-1].scale = x[1] - x[0] - s.metadata.General.title = 'Artifical core loss EEL spectrum' - s.axes_manager[-1].name = 'Electron energy loss' - s.axes_manager[-1].units = 'eV' + s.metadata.General.title = "Artifical core loss EEL spectrum" + s.axes_manager[-1].name = "Electron energy loss" + s.axes_manager[-1].units = "eV" s.set_microscope_parameters( beam_energy=200, convergence_angle=26, collection_angle=20 - ) + ) s.add_elements(["Fe", "Mn"]) return s.squeeze() diff --git a/exspy/docstrings/model.py b/exspy/docstrings/model.py index dd0e5ff93..2a90ae712 100644 --- a/exspy/docstrings/model.py +++ b/exspy/docstrings/model.py @@ -22,16 +22,16 @@ """ from exspy.misc.eels.gosh_gos import _GOSH_DOI -GOS_PARAMETER = \ - """GOS : 'hydrogenic', 'gosh', 'Hartree-Slater'. +GOS_PARAMETER = """GOS : 'hydrogenic', 'gosh', 'Hartree-Slater'. The GOS to use. Default is ``'gosh'``. gos_file_path : str, None Only with GOS='gosh'. Specify the file path of the gosh file - to use. If None, use the file from doi:{}""".format(_GOSH_DOI) + to use. If None, use the file from doi:{}""".format( + _GOSH_DOI +) -EELSMODEL_PARAMETERS = \ - """ll : None or EELSSpectrum +EELSMODEL_PARAMETERS = """ll : None or EELSSpectrum If an EELSSpectrum is provided, it will be assumed that it is a low-loss EELS spectrum, and it will be used to simulate the effect of multiple scattering by convolving it with the EELS @@ -49,4 +49,6 @@ {} dictionary : None or dict A dictionary to be used to recreate a model. Usually generated using - :meth:`~.model.BaseModel.as_dictionary`""".format(GOS_PARAMETER) \ No newline at end of file + :meth:`~.model.BaseModel.as_dictionary`""".format( + GOS_PARAMETER +) diff --git a/exspy/misc/eds/eds.py b/exspy/misc/eds/eds.py index ddefccd77..cf4db0b11 100644 --- a/exspy/misc/eds/eds.py +++ b/exspy/misc/eds/eds.py @@ -23,18 +23,18 @@ get_xray_lines_near_energy, take_off_angle, xray_range, - zeta_to_edx_cross_section - ) + zeta_to_edx_cross_section, +) __all__ = [ - 'edx_cross_section_to_zeta', - 'electron_range', - 'get_xray_lines_near_energy', - 'take_off_angle', - 'xray_range', - 'zeta_to_edx_cross_section', - ] + "edx_cross_section_to_zeta", + "electron_range", + "get_xray_lines_near_energy", + "take_off_angle", + "xray_range", + "zeta_to_edx_cross_section", +] def __dir__(): diff --git a/exspy/misc/eds/utils.py b/exspy/misc/eds/utils.py index 2afe96dea..559ea7818 100644 --- a/exspy/misc/eds/utils.py +++ b/exspy/misc/eds/utils.py @@ -25,12 +25,11 @@ from functools import reduce -eV2keV = 1000. +eV2keV = 1000.0 sigma2fwhm = 2 * math.sqrt(2 * math.log(2)) -_ABSORPTION_CORRECTION_DOCSTRING = \ -"""absorption_correction : numpy.ndarray or None +_ABSORPTION_CORRECTION_DOCSTRING = """absorption_correction : numpy.ndarray or None If None (default), absorption correction is ignored, otherwise, the array must contain values between 0 and 1 to correct the intensities based on estimated absorption. @@ -44,10 +43,10 @@ def _get_element_and_line(xray_line): By example, if xray_line = 'Mn_Ka' this function returns ('Mn', 'Ka') """ - lim = xray_line.find('_') + lim = xray_line.find("_") if lim == -1: raise ValueError(f"Invalid xray-line: {xray_line}") - return xray_line[:lim], xray_line[lim + 1:] + return xray_line[:lim], xray_line[lim + 1 :] def _get_energy_xray_line(xray_line): @@ -57,8 +56,7 @@ def _get_energy_xray_line(xray_line): By example, if xray_line = 'Mn_Ka' this function returns 5.8987 """ element, line = _get_element_and_line(xray_line) - return elements_db[element]['Atomic_properties']['Xray_lines'][ - line]['energy (keV)'] + return elements_db[element]["Atomic_properties"]["Xray_lines"][line]["energy (keV)"] def _get_xray_lines_family(xray_line): @@ -67,23 +65,23 @@ def _get_xray_lines_family(xray_line): By example, if xray_line = 'Mn_Ka' this function returns 'Mn_K' """ - return xray_line[:xray_line.find('_') + 2] + return xray_line[: xray_line.find("_") + 2] def _parse_only_lines(only_lines): if isinstance(only_lines, str): pass - elif hasattr(only_lines, '__iter__'): + elif hasattr(only_lines, "__iter__"): if any(isinstance(line, str) is False for line in only_lines): return only_lines else: return only_lines only_lines = list(only_lines) for only_line in only_lines: - if only_line == 'a': - only_lines.extend(['Ka', 'La', 'Ma']) - elif only_line == 'b': - only_lines.extend(['Kb', 'Lb1', 'Mb']) + if only_line == "a": + only_lines.extend(["Ka", "La", "Ma"]) + elif only_line == "b": + only_lines.extend(["Kb", "Lb1", "Mb"]) return only_lines @@ -108,21 +106,20 @@ def get_xray_lines_near_energy(energy, width=0.2, only_lines=None): """ only_lines = _parse_only_lines(only_lines) valid_lines = [] - E_min, E_max = energy - width / 2., energy + width / 2. + E_min, E_max = energy - width / 2.0, energy + width / 2.0 for element, el_props in elements_db.items(): # Not all elements in the DB have the keys, so catch KeyErrors try: - lines = el_props['Atomic_properties']['Xray_lines'] + lines = el_props["Atomic_properties"]["Xray_lines"] except KeyError: continue for line, l_props in lines.items(): if only_lines and line not in only_lines: continue - line_energy = l_props['energy (keV)'] + line_energy = l_props["energy (keV)"] if E_min <= line_energy <= E_max: # Store line in Element_Line format, and energy difference - valid_lines.append((element + "_" + line, - abs(line_energy - energy))) + valid_lines.append((element + "_" + line, abs(line_energy - energy))) # Sort by energy difference, but return only the line names return [line for line, _ in sorted(valid_lines, key=lambda x: x[1])] @@ -158,14 +155,14 @@ def get_FWHM_at_Energy(energy_resolution_MnKa, E): """ FWHM_ref = energy_resolution_MnKa - E_ref = _get_energy_xray_line('Mn_Ka') + E_ref = _get_energy_xray_line("Mn_Ka") FWHM_e = 2.5 * (E - E_ref) * eV2keV + FWHM_ref * FWHM_ref - return math.sqrt(FWHM_e) / 1000. # In mrad + return math.sqrt(FWHM_e) / 1000.0 # In mrad -def xray_range(xray_line, beam_energy, density='auto'): +def xray_range(xray_line, beam_energy, density="auto"): """Return the maximum range of X-ray generation according to the Anderson-Hasler parameterization. @@ -205,19 +202,15 @@ def xray_range(xray_line, beam_energy, density='auto'): """ element, line = _get_element_and_line(xray_line) - if density == 'auto': - density = elements_db[ - element][ - 'Physical_properties'][ - 'density (g/cm^3)'] + if density == "auto": + density = elements_db[element]["Physical_properties"]["density (g/cm^3)"] Xray_energy = _get_energy_xray_line(xray_line) # Note: magic numbers here are from Andersen-Hasler parameterization. See # docstring for associated references. - return 0.064 / density * (np.power(beam_energy, 1.68) - - np.power(Xray_energy, 1.68)) + return 0.064 / density * (np.power(beam_energy, 1.68) - np.power(Xray_energy, 1.68)) -def electron_range(element, beam_energy, density='auto', tilt=0): +def electron_range(element, beam_energy, density="auto", tilt=0): """Returns the maximum electron range for a pure bulk material according to the Kanaya-Okayama parameterziation. @@ -252,15 +245,20 @@ def electron_range(element, beam_energy, density='auto', tilt=0): """ - if density == 'auto': - density = elements_db[ - element]['Physical_properties']['density (g/cm^3)'] - Z = elements_db[element]['General_properties']['Z'] - A = elements_db[element]['General_properties']['atomic_weight'] + if density == "auto": + density = elements_db[element]["Physical_properties"]["density (g/cm^3)"] + Z = elements_db[element]["General_properties"]["Z"] + A = elements_db[element]["General_properties"]["atomic_weight"] # Note: magic numbers here are from Kanaya-Okayama parameterization. See # docstring for associated references. - return (0.0276 * A / np.power(Z, 0.89) / density * - np.power(beam_energy, 1.67) * math.cos(math.radians(tilt))) + return ( + 0.0276 + * A + / np.power(Z, 0.89) + / density + * np.power(beam_energy, 1.67) + * math.cos(math.radians(tilt)) + ) def take_off_angle(tilt_stage, azimuth_angle, elevation_angle, beta_tilt=0.0): @@ -326,11 +324,14 @@ def take_off_angle(tilt_stage, azimuth_angle, elevation_angle, beta_tilt=0.0): ) ) -def xray_lines_model(elements, - beam_energy=200, - weight_percents=None, - energy_resolution_MnKa=130, - energy_axis=None): + +def xray_lines_model( + elements, + beam_energy=200, + weight_percents=None, + energy_resolution_MnKa=130, + energy_axis=None, +): """ Generate a model of X-ray lines using a Gaussian distribution for each peak. @@ -359,46 +360,56 @@ def xray_lines_model(elements, """ from exspy.signals.eds_tem import EDSTEMSpectrum from hyperspy import components1d + if energy_axis is None: - energy_axis = {'name': 'E', 'scale': 0.01, 'units': 'keV', - 'offset': -0.1, 'size': 1024} - s = EDSTEMSpectrum(np.zeros(energy_axis['size']), axes=[energy_axis]) + energy_axis = { + "name": "E", + "scale": 0.01, + "units": "keV", + "offset": -0.1, + "size": 1024, + } + s = EDSTEMSpectrum(np.zeros(energy_axis["size"]), axes=[energy_axis]) s.set_microscope_parameters( - beam_energy=beam_energy, - energy_resolution_MnKa=energy_resolution_MnKa) + beam_energy=beam_energy, energy_resolution_MnKa=energy_resolution_MnKa + ) s.add_elements(elements) - counts_rate = 1. - live_time = 1. + counts_rate = 1.0 + live_time = 1.0 if weight_percents is None: - weight_percents = [100. / len(elements)] * len(elements) + weight_percents = [100.0 / len(elements)] * len(elements) m = s.create_model() if len(elements) == len(weight_percents): - for (element, weight_percent) in zip(elements, weight_percents): - for line, properties in elements_db[ - element]['Atomic_properties']['Xray_lines'].items(): - line_energy = properties['energy (keV)'] - ratio_line = properties['weight'] - if s._get_xray_lines_in_spectral_range( - [element + '_' + line])[1] == []: + for element, weight_percent in zip(elements, weight_percents): + for line, properties in elements_db[element]["Atomic_properties"][ + "Xray_lines" + ].items(): + line_energy = properties["energy (keV)"] + ratio_line = properties["weight"] + if s._get_xray_lines_in_spectral_range([element + "_" + line])[1] == []: g = components1d.Gaussian() g.centre.value = line_energy - g.sigma.value = get_FWHM_at_Energy( - energy_resolution_MnKa, line_energy) / sigma2fwhm - g.A.value = live_time * counts_rate * \ - weight_percent / 100 * ratio_line + g.sigma.value = ( + get_FWHM_at_Energy(energy_resolution_MnKa, line_energy) + / sigma2fwhm + ) + g.A.value = ( + live_time * counts_rate * weight_percent / 100 * ratio_line + ) m.append(g) else: - raise ValueError("The number of elements specified is not the same " - "as the number of weight_percents") + raise ValueError( + "The number of elements specified is not the same " + "as the number of weight_percents" + ) s.data = m.as_signal().data return s -def quantification_cliff_lorimer(intensities, - kfactors, - absorption_correction=None, - mask=None): +def quantification_cliff_lorimer( + intensities, kfactors, absorption_correction=None, mask=None +): """ Quantification using Cliff-Lorimer @@ -437,32 +448,35 @@ def quantification_cliff_lorimer(intensities, if len(index) > 1: ref_index, ref_index2 = index[:2] intens[:, i] = _quantification_cliff_lorimer( - intens[:, i], kfactors, absorption_correction[:, i], - ref_index, ref_index2) + intens[:, i], + kfactors, + absorption_correction[:, i], + ref_index, + ref_index2, + ) else: intens[:, i] = np.zeros_like(intens[:, i]) if len(index) == 1: - intens[index[0], i] = 1. - + intens[index[0], i] = 1.0 + intens = intens.reshape(dim) if mask is not None: from hyperspy.signals import BaseSignal + if isinstance(mask, BaseSignal): mask = mask.data for i in range(dim[0]): - intens[i][(mask==True)] = 0 + intens[i][(mask == True)] = 0 return intens -quantification_cliff_lorimer.__doc__ %= (_ABSORPTION_CORRECTION_DOCSTRING) + +quantification_cliff_lorimer.__doc__ %= _ABSORPTION_CORRECTION_DOCSTRING -def _quantification_cliff_lorimer(intensities, - kfactors, - absorption_correction, - ref_index=0, - ref_index2=1 - ): +def _quantification_cliff_lorimer( + intensities, kfactors, absorption_correction, ref_index=0, ref_index2=1 +): """ Quantification using Cliff-Lorimer @@ -487,24 +501,28 @@ def _quantification_cliff_lorimer(intensities, shape as intensities. """ if len(intensities) != len(kfactors): - raise ValueError('The number of kfactors must match the size of the ' - 'first axis of intensities.') + raise ValueError( + "The number of kfactors must match the size of the " + "first axis of intensities." + ) - ab = np.zeros_like(intensities, dtype='float') - composition = np.ones_like(intensities, dtype='float') + ab = np.zeros_like(intensities, dtype="float") + composition = np.ones_like(intensities, dtype="float") # ab = Ia/Ib / kab other_index = list(range(len(kfactors))) other_index.pop(ref_index) for i in other_index: - ab[i] = (intensities[ref_index] * absorption_correction[ref_index]) \ - / (intensities[i] * absorption_correction[i]) \ - *( kfactors[ref_index] / kfactors[i]) + ab[i] = ( + (intensities[ref_index] * absorption_correction[ref_index]) + / (intensities[i] * absorption_correction[i]) + * (kfactors[ref_index] / kfactors[i]) + ) # Ca = ab /(1 + ab + ab/ac + ab/ad + ...) for i in other_index: if i == ref_index2: composition[ref_index] += ab[ref_index2] else: - composition[ref_index] += (ab[ref_index2] / ab[i]) + composition[ref_index] += ab[ref_index2] / ab[i] composition[ref_index] = ab[ref_index2] / composition[ref_index] # Cb = Ca / ab for i in other_index: @@ -512,10 +530,7 @@ def _quantification_cliff_lorimer(intensities, return composition -def quantification_zeta_factor(intensities, - zfactors, - dose, - absorption_correction=None): +def quantification_zeta_factor(intensities, zfactors, dose, absorption_correction=None): """ Quantification using the zeta-factor method @@ -540,19 +555,21 @@ def quantification_zeta_factor(intensities, """ if absorption_correction is None: # default to ones - absorption_correction = np.ones_like(intensities, dtype='float') + absorption_correction = np.ones_like(intensities, dtype="float") - sumzi = np.zeros_like(intensities[0], dtype='float') - composition = np.zeros_like(intensities, dtype='float') + sumzi = np.zeros_like(intensities[0], dtype="float") + composition = np.zeros_like(intensities, dtype="float") for intensity, zfactor, acf in zip(intensities, zfactors, absorption_correction): sumzi = sumzi + (intensity * zfactor * acf) - for i, (intensity, zfactor, acf) in enumerate(zip(intensities, zfactors, absorption_correction)): + for i, (intensity, zfactor, acf) in enumerate( + zip(intensities, zfactors, absorption_correction) + ): composition[i] = intensity * zfactor * acf / sumzi mass_thickness = sumzi / dose return composition, mass_thickness -quantification_zeta_factor.__doc__ %= (_ABSORPTION_CORRECTION_DOCSTRING) +quantification_zeta_factor.__doc__ %= _ABSORPTION_CORRECTION_DOCSTRING def get_abs_corr_zeta(weight_percent, mass_thickness, take_off_angle): @@ -571,21 +588,23 @@ def get_abs_corr_zeta(weight_percent, mass_thickness, take_off_angle): from exspy.misc import material toa_rad = np.radians(take_off_angle) - csc_toa = 1.0/np.sin(toa_rad) + csc_toa = 1.0 / np.sin(toa_rad) # convert from cm^2/g to m^2/kg - mac = stack( - material.mass_absorption_mixture(weight_percent=weight_percent), - show_progressbar=False - ) * 0.1 + mac = ( + stack( + material.mass_absorption_mixture(weight_percent=weight_percent), + show_progressbar=False, + ) + * 0.1 + ) expo = mac.data * mass_thickness.data * csc_toa - acf = expo/(1.0 - np.exp(-(expo))) + acf = expo / (1.0 - np.exp(-(expo))) return acf -def quantification_cross_section(intensities, - cross_sections, - dose, - absorption_correction=None): +def quantification_cross_section( + intensities, cross_sections, dose, absorption_correction=None +): """ Quantification using EDX cross sections Calculate the atomic compostion and the number of atoms per pixel @@ -613,7 +632,6 @@ def quantification_cross_section(intensities, shape as the intensity input. """ - if absorption_correction is None: # default to ones absorption_correction = np.ones_like(intensities, dtype=float) @@ -627,11 +645,13 @@ def quantification_cross_section(intensities, return composition, number_of_atoms -quantification_cross_section.__doc__ %= (_ABSORPTION_CORRECTION_DOCSTRING) + +quantification_cross_section.__doc__ %= _ABSORPTION_CORRECTION_DOCSTRING -def get_abs_corr_cross_section(composition, number_of_atoms, take_off_angle, - probe_area): +def get_abs_corr_cross_section( + composition, number_of_atoms, take_off_angle, probe_area +): """ Calculate absorption correction terms. @@ -648,26 +668,29 @@ def get_abs_corr_cross_section(composition, number_of_atoms, take_off_angle, Av = constants.Avogadro elements = [intensity.metadata.Sample.elements[0] for intensity in number_of_atoms] atomic_weights = np.array( - [elements_db[element]['General_properties']['atomic_weight'] - for element in elements]) + [ + elements_db[element]["General_properties"]["atomic_weight"] + for element in elements + ] + ) number_of_atoms = stack(number_of_atoms, show_progressbar=False).data - #calculate the total_mass in kg/m^2, or mass thickness. - total_mass = np.zeros_like(number_of_atoms[0], dtype = 'float') + # calculate the total_mass in kg/m^2, or mass thickness. + total_mass = np.zeros_like(number_of_atoms[0], dtype="float") for i, (weight) in enumerate(atomic_weights): - total_mass += (number_of_atoms[i] * weight / Av / 1E3 / probe_area / 1E-18) + total_mass += number_of_atoms[i] * weight / Av / 1e3 / probe_area / 1e-18 # determine mass absorption coefficients and convert from cm^2/g to m^2/kg. to_stack = material.mass_absorption_mixture( weight_percent=material.atomic_to_weight(composition) - ) + ) mac = stack(to_stack, show_progressbar=False) * 0.1 acf = np.zeros_like(number_of_atoms) - csc_toa = 1/math.sin(toa_rad) - #determine an absorption coeficient per element per pixel. + csc_toa = 1 / math.sin(toa_rad) + # determine an absorption coeficient per element per pixel. for i, (weight) in enumerate(atomic_weights): - expo = (mac.data[i] * total_mass * csc_toa) - acf[i] = expo/(1 - np.exp(-expo)) + expo = mac.data[i] * total_mass * csc_toa + acf[i] = expo / (1 - np.exp(-expo)) return acf @@ -690,12 +713,12 @@ def edx_cross_section_to_zeta(cross_sections, elements): """ if len(elements) != len(cross_sections): raise ValueError( - 'The number of elements must match the number of cross sections.') + "The number of elements must match the number of cross sections." + ) zeta_factors = [] for i, element in enumerate(elements): - atomic_weight = elements_db[element]['General_properties'][ - 'atomic_weight'] - zeta = atomic_weight / (cross_sections[i] * constants.Avogadro * 1E-25) + atomic_weight = elements_db[element]["General_properties"]["atomic_weight"] + zeta = atomic_weight / (cross_sections[i] * constants.Avogadro * 1e-25) zeta_factors.append(zeta) return zeta_factors @@ -719,11 +742,11 @@ def zeta_to_edx_cross_section(zfactors, elements): """ if len(elements) != len(zfactors): raise ValueError( - 'The number of elements must match the number of cross sections.') + "The number of elements must match the number of cross sections." + ) cross_sections = [] for i, element in enumerate(elements): - atomic_weight = elements_db[element]['General_properties'][ - 'atomic_weight'] - xsec = atomic_weight / (zfactors[i] * constants.Avogadro * 1E-25) + atomic_weight = elements_db[element]["General_properties"]["atomic_weight"] + xsec = atomic_weight / (zfactors[i] * constants.Avogadro * 1e-25) cross_sections.append(xsec) return cross_sections diff --git a/exspy/misc/eels/__init__.py b/exspy/misc/eels/__init__.py index f25030c02..76a0d5eb9 100644 --- a/exspy/misc/eels/__init__.py +++ b/exspy/misc/eels/__init__.py @@ -2,6 +2,4 @@ from exspy.misc.eels.gosh_gos import GoshGOS from exspy.misc.eels.hartree_slater_gos import HartreeSlaterGOS -__all__ = ['HydrogenicGOS', - 'GoshGOS', - 'HartreeSlaterGOS'] +__all__ = ["HydrogenicGOS", "GoshGOS", "HartreeSlaterGOS"] diff --git a/exspy/misc/eels/base_gos.py b/exspy/misc/eels/base_gos.py index 24593d8f4..1c6471b1b 100644 --- a/exspy/misc/eels/base_gos.py +++ b/exspy/misc/eels/base_gos.py @@ -26,7 +26,7 @@ from hyperspy.misc.export_dictionary import ( export_to_dictionary, load_from_dictionary, - ) +) from hyperspy.misc.math_tools import get_linear_interpolation @@ -35,30 +35,25 @@ class BaseGOS: - def read_elements(self): element = self.element subshell = self.subshell # Convert to the "GATAN" nomenclature if (element in elements) is not True: - raise ValueError( - f"The given element {element} is not in the database." - ) - elif subshell not in elements[element]['Atomic_properties']['Binding_energies']: - subshells = ", ".join(list(elements[element]['Atomic_properties']['Binding_energies'].keys())) + raise ValueError(f"The given element {element} is not in the database.") + elif subshell not in elements[element]["Atomic_properties"]["Binding_energies"]: + subshells = ", ".join( + list(elements[element]["Atomic_properties"]["Binding_energies"].keys()) + ) raise ValueError( f"The given subshell {subshell} is not in the database. The " f"available subshells are:\n{subshells}" - ) + ) - self.onset_energy = \ - elements[ - element][ - 'Atomic_properties'][ - 'Binding_energies'][ - subshell][ - 'onset_energy (eV)'] - self.Z = elements[element]['General_properties']['Z'] + self.onset_energy = elements[element]["Atomic_properties"]["Binding_energies"][ + subshell + ]["onset_energy (eV)"] + self.Z = elements[element]["General_properties"]["Z"] self.element_dict = elements[element] def get_parametrized_qaxis(self, k1, k2, n): @@ -78,24 +73,28 @@ def get_qaxis_and_gos(self, ienergy, qmin, qmax): qgosi = np.hstack((qgosi, gosqmax)) else: index = self.qaxis.searchsorted(qmax) - g1, g2 = qgosi[index - 1:index + 1] - q1, q2 = self.qaxis[index - 1: index + 1] + g1, g2 = qgosi[index - 1 : index + 1] + q1, q2 = self.qaxis[index - 1 : index + 1] gosqmax = get_linear_interpolation((q1, g1), (q2, g2), qmax) qaxis = np.hstack((self.qaxis[:index], qmax)) - qgosi = np.hstack((qgosi[:index, ], gosqmax)) + qgosi = np.hstack((qgosi[:index,], gosqmax)) if qmin > 0: index = self.qaxis.searchsorted(qmin) - g1, g2 = qgosi[index - 1:index + 1] - q1, q2 = qaxis[index - 1:index + 1] + g1, g2 = qgosi[index - 1 : index + 1] + q1, q2 = qaxis[index - 1 : index + 1] gosqmin = get_linear_interpolation((q1, g1), (q2, g2), qmin) qaxis = np.hstack((qmin, qaxis[index:])) - qgosi = np.hstack((gosqmin, qgosi[index:],)) + qgosi = np.hstack( + ( + gosqmin, + qgosi[index:], + ) + ) return qaxis, qgosi.clip(0) class TabulatedGOS(BaseGOS): - def __init__(self, element_subshell): """ Parameters @@ -104,14 +103,14 @@ def __init__(self, element_subshell): For example, 'Ti_L3' for the GOS of the titanium L3 subshell """ - self.subshell_factor = 1. + self.subshell_factor = 1.0 if isinstance(element_subshell, dict): - self.element = element_subshell['element'] - self.subshell = element_subshell['subshell'] + self.element = element_subshell["element"] + self.subshell = element_subshell["subshell"] self.read_elements() self._load_dictionary(element_subshell) else: - self.element, self.subshell = element_subshell.split('_') + self.element, self.subshell = element_subshell.split("_") self.read_elements() self.read_gos_data() @@ -120,8 +119,7 @@ def _load_dictionary(self, dictionary): self.energy_axis = self.rel_energy_axis + self.onset_energy def as_dictionary(self, fullcopy=True): - """Export the GOS as a dictionary. - """ + """Export the GOS as a dictionary.""" dic = {} export_to_dictionary(self, self._whitelist, dic, fullcopy) return dic @@ -133,16 +131,14 @@ def integrateq(self, onset_energy, angle, E0): # Calculate the cross section at each energy position of the # tabulated GOS gamma = 1 + E0 / 511.06 - T = 511060 * (1 - 1 / gamma ** 2) / 2 + T = 511060 * (1 - 1 / gamma**2) / 2 for i in range(0, self.gos_array.shape[0]): E = self.energy_axis[i] + energy_shift # Calculate the limits of the q integral - qa0sqmin = (E ** 2) / (4 * R * T) + (E ** 3) / ( - 8 * gamma ** 3 * R * T ** 2) + qa0sqmin = (E**2) / (4 * R * T) + (E**3) / (8 * gamma**3 * R * T**2) p02 = T / (R * (1 - 2 * T / 511060)) pp2 = p02 - E / R * (gamma - E / 1022120) - qa0sqmax = qa0sqmin + 4 * np.sqrt(p02 * pp2) * \ - (math.sin(angle / 2)) ** 2 + qa0sqmax = qa0sqmin + 4 * np.sqrt(p02 * pp2) * (math.sin(angle / 2)) ** 2 qmin = math.sqrt(qa0sqmin) / a0 qmax = math.sqrt(qa0sqmax) / a0 # Perform the integration in a log grid @@ -151,7 +147,6 @@ def integrateq(self, onset_energy, angle, E0): qint[i] = integrate.simps(gos, logsqa0qaxis) E = self.energy_axis + energy_shift # Energy differential cross section in (barn/eV/atom) - qint *= (4.0 * np.pi * a0 ** 2.0 * R ** 2 / E / T * - self.subshell_factor) * 1e28 + qint *= (4.0 * np.pi * a0**2.0 * R**2 / E / T * self.subshell_factor) * 1e28 self.qint = qint return interpolate.make_interp_spline(E, qint, k=3) diff --git a/exspy/misc/eels/eelsdb.py b/exspy/misc/eels/eelsdb.py index f994937cd..e4c003efb 100644 --- a/exspy/misc/eels/eelsdb.py +++ b/exspy/misc/eels/eelsdb.py @@ -31,12 +31,26 @@ _logger = logging.getLogger(__name__) -def eelsdb(spectrum_type=None, title=None, author=None, element=None, formula=None, - edge=None, min_energy=None, max_energy=None, resolution=None, - min_energy_compare="gt", max_energy_compare="lt", - resolution_compare="lt", max_n=-1, monochromated=None, order=None, - order_direction="ASC", verify_certificate=True, - show_progressbar=None): +def eelsdb( + spectrum_type=None, + title=None, + author=None, + element=None, + formula=None, + edge=None, + min_energy=None, + max_energy=None, + resolution=None, + min_energy_compare="gt", + max_energy_compare="lt", + resolution_compare="lt", + max_n=-1, + monochromated=None, + order=None, + order_direction="ASC", + verify_certificate=True, + show_progressbar=None, +): r"""Download spectra from the EELS Data Base. Parameters @@ -134,11 +148,16 @@ def eelsdb(spectrum_type=None, title=None, author=None, element=None, formula=No """ # Verify arguments if spectrum_type is not None and spectrum_type not in { - 'coreloss', 'lowloss', 'zeroloss', 'xrayabs'}: - raise ValueError("spectrum_type must be one of \'coreloss\', \'lowloss\', " - "\'zeroloss\', \'xrayabs\'.") - valid_edges = [ - 'K', 'L1', 'L2,3', 'M2,3', 'M4,5', 'N2,3', 'N4,5', 'O2,3', 'O4,5'] + "coreloss", + "lowloss", + "zeroloss", + "xrayabs", + }: + raise ValueError( + "spectrum_type must be one of 'coreloss', 'lowloss', " + "'zeroloss', 'xrayabs'." + ) + valid_edges = ["K", "L1", "L2,3", "M2,3", "M4,5", "N2,3", "N4,5", "O2,3", "O4,5"] valid_order_keys = [ "spectrumType", "spectrumMin", @@ -184,22 +203,22 @@ def eelsdb(spectrum_type=None, title=None, author=None, element=None, formula=No "ref_page", "ref_year", "ref_title", - "otherURLs"] + "otherURLs", + ] if edge is not None and edge not in valid_edges: raise ValueError("`edge` must be one of %s." % ", ".join(valid_edges)) if order is not None and order not in valid_order_keys: - raise ValueError("`order` must be one of %s." % ", ".join( - valid_order_keys)) + raise ValueError("`order` must be one of %s." % ", ".join(valid_order_keys)) if order_direction is not None and order_direction not in ["ASC", "DESC"]: - raise ValueError("`order_direction` must be \"ASC\" or \"DESC\".") + raise ValueError('`order_direction` must be "ASC" or "DESC".') for kwarg, label in ( - (resolution_compare, "resolution_compare"), - (min_energy_compare, "min_energy_compare"), - (max_energy_compare, "max_energy_compare")): + (resolution_compare, "resolution_compare"), + (min_energy_compare, "min_energy_compare"), + (max_energy_compare, "max_energy_compare"), + ): if kwarg not in ("lt", "gt", "eq"): - raise ValueError("`%s` must be \"lt\", \"eq\" or \"gt\"." % - label) + raise ValueError('`%s` must be "lt", "eq" or "gt".' % label) if monochromated is not None: monochromated = 1 if monochromated else 0 params = { @@ -228,7 +247,9 @@ def eelsdb(spectrum_type=None, title=None, author=None, element=None, formula=No if show_progressbar is None: show_progressbar = preferences.General.show_progressbar - request = requests.get('https://api.eelsdb.eu/spectra', params=params, verify=verify_certificate) + request = requests.get( + "https://api.eelsdb.eu/spectra", params=params, verify=verify_certificate + ) spectra = [] jsons = request.json() if "message" in jsons: @@ -236,26 +257,24 @@ def eelsdb(spectrum_type=None, title=None, author=None, element=None, formula=No raise IOError( "Please report the following error to the exSpy developers: " f"{jsons['message']}." - ) + ) for json_spectrum in progressbar(jsons, disable=not show_progressbar): - download_link = json_spectrum['download_link'] - if download_link.split('.')[-1].lower() != 'msa': + download_link = json_spectrum["download_link"] + if download_link.split(".")[-1].lower() != "msa": _logger.exception( "The source file is not a msa file, please report this error " "to https://eelsdb.eu/about with the following details:\n" f"Title: {json_spectrum['title']}\nid: {json_spectrum['id']}\n" f"Download link: {download_link}\n" f"Permalink: {json_spectrum['permalink']}" - ) + ) continue msa_string = requests.get(download_link, verify=verify_certificate).text try: s = dict2signal(parse_msa_string(msa_string)[0]) emsa = s.original_metadata - s._original_metadata = type(s.original_metadata)( - {'json': json_spectrum} - ) + s._original_metadata = type(s.original_metadata)({"json": json_spectrum}) s.original_metadata.emsa = emsa spectra.append(s) @@ -265,14 +284,16 @@ def eelsdb(spectrum_type=None, title=None, author=None, element=None, formula=No _logger.exception( "Failed to load the spectrum.\n" "Title: %s id: %s.\n" - "Please report this error to https://eelsdb.eu/about \n" % - (json_spectrum["title"], json_spectrum["id"])) + "Please report this error to https://eelsdb.eu/about \n" + % (json_spectrum["title"], json_spectrum["id"]) + ) if not spectra: _logger.info( "The EELS database does not contain any spectra matching your query" ". If you have some, why not submitting them " - "https://eelsdb.eu/submit-data/ ?\n") + "https://eelsdb.eu/submit-data/ ?\n" + ) else: # Add some info from json to metadata # Values with units are not yet supported by HyperSpy (v0.8) so @@ -283,11 +304,11 @@ def eelsdb(spectrum_type=None, title=None, author=None, element=None, formula=No json_md = s.original_metadata.json s.metadata.General.title = json_md.title if s.metadata.Signal.signal_type == "EELS": - if json_md.get_item('elements'): + if json_md.get_item("elements"): try: # When 'No' is in the list of elements # https://api.eelsdb.eu/spectra/zero-loss-c-feg-hitachi-disp-0-214-ev/ - if json_md.elements[0].lower() != 'no': + if json_md.elements[0].lower() != "no": s.add_elements(json_md.elements) except ValueError: _logger.exception( @@ -295,29 +316,35 @@ def eelsdb(spectrum_type=None, title=None, author=None, element=None, formula=No "element information:\n" "Title: %s id: %s. Elements: %s.\n" "Please report this error in " - "https://eelsdb.eu/about \n" % - (json_md.title, json_md.id, json_md.elements)) + "https://eelsdb.eu/about \n" + % (json_md.title, json_md.id, json_md.elements) + ) if "collection" in json_md and " mrad" in json_md.collection: beta = float(json_md.collection.replace(" mrad", "")) s.metadata.set_item( "Acquisition_instrument.TEM.Detector.EELS.collection_angle", - beta) + beta, + ) if "convergence" in json_md and " mrad" in json_md.convergence: alpha = float(json_md.convergence.replace(" mrad", "")) s.metadata.set_item( - "Acquisition_instrument.TEM.convergence_angle", alpha) + "Acquisition_instrument.TEM.convergence_angle", alpha + ) if "beamenergy" in json_md and " kV" in json_md.beamenergy: beam_energy = float(json_md.beamenergy.replace(" kV", "")) s.metadata.set_item( - "Acquisition_instrument.TEM.beam_energy", beam_energy) + "Acquisition_instrument.TEM.beam_energy", beam_energy + ) # We don't yet support units, so we cannot map the thickness # s.metadata.set_item("Sample.thickness", json_md.thickness) s.metadata.set_item("Sample.description", json_md.description) s.metadata.set_item("Sample.chemical_formula", json_md.formula) s.metadata.set_item("General.author", json_md.author.name) - s.metadata.set_item("Acquisition_instrument.TEM.microscope", - json_md.microscope) + s.metadata.set_item( + "Acquisition_instrument.TEM.microscope", json_md.microscope + ) return spectra + eelsdb.__doc__ %= SHOW_PROGRESSBAR_ARG diff --git a/exspy/misc/eels/effective_angle.py b/exspy/misc/eels/effective_angle.py index 0440e975b..94fcde51d 100644 --- a/exspy/misc/eels/effective_angle.py +++ b/exspy/misc/eels/effective_angle.py @@ -46,26 +46,34 @@ def effective_angle(E0, E, alpha, beta): E0 *= 1e3 # keV to eV if alpha == 0: return beta - E0 *= 10. ** -3 # In KeV + E0 *= 10.0**-3 # In KeV E = float(E) alpha = float(alpha) beta = float(beta) - TGT = E0 * (1. + E0 / 1022.) / (1. + E0 / 511.) + TGT = E0 * (1.0 + E0 / 1022.0) / (1.0 + E0 / 511.0) thetaE = E / TGT A2 = alpha * alpha * 1e-6 B2 = beta * beta * 1e-6 T2 = thetaE * thetaE * 1e-6 - eta1 = math.sqrt((A2 + B2 + T2) ** 2 - 4. * A2 * B2) - A2 - B2 - T2 - eta2 = 2. * B2 * \ - math.log( - 0.5 / T2 * (math.sqrt((A2 + T2 - B2) ** 2 + 4. * B2 * T2) + A2 + T2 - B2)) - eta3 = 2. * A2 * \ - math.log( - 0.5 / T2 * (math.sqrt((B2 + T2 - A2) ** 2 + 4. * A2 * T2) + B2 + T2 - A2)) -# ETA=(eta1+eta2+eta3)/A2/math.log(4./T2) - F1 = (eta1 + eta2 + eta3) / 2 / A2 / math.log(1. + B2 / T2) + eta1 = math.sqrt((A2 + B2 + T2) ** 2 - 4.0 * A2 * B2) - A2 - B2 - T2 + eta2 = ( + 2.0 + * B2 + * math.log( + 0.5 / T2 * (math.sqrt((A2 + T2 - B2) ** 2 + 4.0 * B2 * T2) + A2 + T2 - B2) + ) + ) + eta3 = ( + 2.0 + * A2 + * math.log( + 0.5 / T2 * (math.sqrt((B2 + T2 - A2) ** 2 + 4.0 * A2 * T2) + B2 + T2 - A2) + ) + ) + # ETA=(eta1+eta2+eta3)/A2/math.log(4./T2) + F1 = (eta1 + eta2 + eta3) / 2 / A2 / math.log(1.0 + B2 / T2) F2 = F1 if (alpha / beta) > 1: F2 = F1 * A2 / B2 - BSTAR = thetaE * math.sqrt(math.exp(F2 * math.log(1. + B2 / T2)) - 1.) + BSTAR = thetaE * math.sqrt(math.exp(F2 * math.log(1.0 + B2 / T2)) - 1.0) return BSTAR # In mrad diff --git a/exspy/misc/eels/electron_inelastic_mean_free_path.py b/exspy/misc/eels/electron_inelastic_mean_free_path.py index 3732836e6..003e65a1c 100644 --- a/exspy/misc/eels/electron_inelastic_mean_free_path.py +++ b/exspy/misc/eels/electron_inelastic_mean_free_path.py @@ -20,11 +20,14 @@ import numpy as np import math + def _F(electron_energy): return (1 + electron_energy / 1022) / (1 + electron_energy / 511) ** 2 + def _theta_E(density, electron_energy): - return 5.5 * density ** 0.3 / (_F(electron_energy) * electron_energy) + return 5.5 * density**0.3 / (_F(electron_energy) * electron_energy) + def iMFP_Iakoubovskii(density, electron_energy): """Estimate electron inelastic mean free path from density @@ -50,8 +53,13 @@ def iMFP_Iakoubovskii(density, electron_energy): float Inelastic mean free path in nanometers """ - theta_C = 20 # mrad - inv_lambda = 11 * density ** 0.3 / (200 * _F(electron_energy) * electron_energy) * np.log(theta_C ** 2 / _theta_E(density, electron_energy) ** 2) + theta_C = 20 # mrad + inv_lambda = ( + 11 + * density**0.3 + / (200 * _F(electron_energy) * electron_energy) + * np.log(theta_C**2 / _theta_E(density, electron_energy) ** 2) + ) return 1 / inv_lambda @@ -89,16 +97,21 @@ def iMFP_TPP2M(electron_energy, density, M, N_v, E_g): """ E = electron_energy * 1e3 rho = density - alpha = (1 + E / 1021999.8) / (1 + E / 510998.9)**2 + alpha = (1 + E / 1021999.8) / (1 + E / 510998.9) ** 2 E_p = 28.816 * math.sqrt(N_v * rho / M) gamma = 0.191 / math.sqrt(rho) U = (E_p / 28.816) ** 2 C = 19.7 - 9.1 * U D = 534 - 208 * U - beta = -1 + 9.44 / math.sqrt(E_p **2 + E_g**2) + 0.69 * rho ** 0.1 - iMFP = alpha * E / (E_p ** 2 * (beta * math.log(gamma * alpha * E) - C / E + D / E**2)) + beta = -1 + 9.44 / math.sqrt(E_p**2 + E_g**2) + 0.69 * rho**0.1 + iMFP = ( + alpha + * E + / (E_p**2 * (beta * math.log(gamma * alpha * E) - C / E + D / E**2)) + ) return iMFP + def iMFP_angular_correction(density, beam_energy, alpha, beta): """Estimate the effect of limited collection angle on EELS mean free path @@ -120,8 +133,14 @@ def iMFP_angular_correction(density, beam_energy, alpha, beta): Microscopy Research and Technique 71, no. 8 (2008): 626–31. https://onlinelibrary.wiley.com/doi/10.1002/jemt.20597 """ - theta_C = 20 # mrad - A = alpha ** 2 + beta ** 2 + 2 * _theta_E(density, beam_energy) ** 2 + abs(alpha ** 2 - beta ** 2) - B = alpha ** 2 + beta ** 2 + 2 * theta_C ** 2 + abs(alpha ** 2 - beta ** 2) - return np.log(theta_C ** 2 / _theta_E(density, beam_energy) ** 2) / np.log(A * theta_C ** 2 / B / _theta_E(density, beam_energy) ** 2) - + theta_C = 20 # mrad + A = ( + alpha**2 + + beta**2 + + 2 * _theta_E(density, beam_energy) ** 2 + + abs(alpha**2 - beta**2) + ) + B = alpha**2 + beta**2 + 2 * theta_C**2 + abs(alpha**2 - beta**2) + return np.log(theta_C**2 / _theta_E(density, beam_energy) ** 2) / np.log( + A * theta_C**2 / B / _theta_E(density, beam_energy) ** 2 + ) diff --git a/exspy/misc/eels/gosh_gos.py b/exspy/misc/eels/gosh_gos.py index 6ab95ad39..dd0a12380 100644 --- a/exspy/misc/eels/gosh_gos.py +++ b/exspy/misc/eels/gosh_gos.py @@ -69,14 +69,14 @@ class GoshGOS(TabulatedGOS): """ - _name = 'gosh' + _name = "gosh" _whitelist = { - 'gos_array': None, - 'rel_energy_axis': None, - 'qaxis': None, - 'element': None, - 'subshell': None, - } + "gos_array": None, + "rel_energy_axis": None, + "qaxis": None, + "element": None, + "subshell": None, + } def __init__(self, element_subshell, gos_file_path=None): """ @@ -113,20 +113,20 @@ def read_gos_data(self): f"of {element}. Please select a different database." ) - with h5py.File(self.gos_file_path, 'r') as h: - conventions = h['metadata/edges_info'] + with h5py.File(self.gos_file_path, "r") as h: + conventions = h["metadata/edges_info"] if subshell not in conventions: raise ValueError(error_message) - table = conventions[subshell].attrs['table'] - self.subshell_factor = conventions[subshell].attrs['occupancy_ratio'] - stem = f'/{element}/{table}' + table = conventions[subshell].attrs["table"] + self.subshell_factor = conventions[subshell].attrs["occupancy_ratio"] + stem = f"/{element}/{table}" if stem not in h: raise ValueError(error_message) gos_group = h[stem] - gos = gos_group['data'][:] - q = gos_group['q'][:] - free_energies = gos_group['free_energies'][:] - doi = h['/metadata/data_ref'].attrs['data_doi'] + gos = gos_group["data"][:] + q = gos_group["q"][:] + free_energies = gos_group["free_energies"][:] + doi = h["/metadata/data_ref"].attrs["data_doi"] gos = np.squeeze(gos.T) self.doi = doi diff --git a/exspy/misc/eels/hartree_slater_gos.py b/exspy/misc/eels/hartree_slater_gos.py index 738c98402..bfed4be03 100644 --- a/exspy/misc/eels/hartree_slater_gos.py +++ b/exspy/misc/eels/hartree_slater_gos.py @@ -34,19 +34,37 @@ # This dictionary accounts for conventions chosen in naming the data files, as well as normalisation. # These cross sections contain only odd-number edges such as N3, or M5, and are normalised accordingly. # Other edges can be obtained as scaled copies of the provided ones. -conventions = { 'K' : {'table': 'K1', 'factor': 1}, - 'L1': {'table': 'L1', 'factor': 1}, - 'L2,3': {'table':'L3', 'factor': 3/2}, 'L2': {'table':'L3', 'factor': 1/2}, 'L3': {'table':'L3', 'factor': 1}, - 'M1': {'table': 'M1', 'factor': 1}, - 'M2,3': {'table':'M3', 'factor': 3/2}, 'M2': {'table':'M3', 'factor': 1/2}, 'M3': {'table':'M3', 'factor': 1}, - 'M4,5': {'table':'M5', 'factor': 5/3}, 'M4': {'table':'M5', 'factor': 2/3}, 'M5': {'table':'M5', 'factor': 1}, - 'N1': {'table': 'N1', 'factor': 1}, - 'N2,3': {'table':'N3', 'factor': 3/2}, 'N2': {'table':'N3', 'factor': 1/2}, 'N3': {'table':'N3', 'factor': 1}, - 'N4,5': {'table':'N5', 'factor': 5/3}, 'N4': {'table':'N5', 'factor': 2/3}, 'N5': {'table':'N5', 'factor': 1}, - 'N6,7': {'table': 'N7', 'factor': 7/4}, 'N6': {'table':'N7', 'factor': 4/7}, 'N7': {'table':'N7', 'factor': 1}, - 'O1': {'table': 'O1', 'factor': 1}, - 'O2,3': {'table':'O3', 'factor': 3/2}, 'O2': {'table':'O3', 'factor': 1/2}, 'O3': {'table':'O3', 'factor': 1}, - 'O4,5': {'table':'O5', 'factor': 5/3}, 'O4': {'table':'O5', 'factor': 2/3}, 'O5': {'table':'O5', 'factor': 1}} +conventions = { + "K": {"table": "K1", "factor": 1}, + "L1": {"table": "L1", "factor": 1}, + "L2,3": {"table": "L3", "factor": 3 / 2}, + "L2": {"table": "L3", "factor": 1 / 2}, + "L3": {"table": "L3", "factor": 1}, + "M1": {"table": "M1", "factor": 1}, + "M2,3": {"table": "M3", "factor": 3 / 2}, + "M2": {"table": "M3", "factor": 1 / 2}, + "M3": {"table": "M3", "factor": 1}, + "M4,5": {"table": "M5", "factor": 5 / 3}, + "M4": {"table": "M5", "factor": 2 / 3}, + "M5": {"table": "M5", "factor": 1}, + "N1": {"table": "N1", "factor": 1}, + "N2,3": {"table": "N3", "factor": 3 / 2}, + "N2": {"table": "N3", "factor": 1 / 2}, + "N3": {"table": "N3", "factor": 1}, + "N4,5": {"table": "N5", "factor": 5 / 3}, + "N4": {"table": "N5", "factor": 2 / 3}, + "N5": {"table": "N5", "factor": 1}, + "N6,7": {"table": "N7", "factor": 7 / 4}, + "N6": {"table": "N7", "factor": 4 / 7}, + "N7": {"table": "N7", "factor": 1}, + "O1": {"table": "O1", "factor": 1}, + "O2,3": {"table": "O3", "factor": 3 / 2}, + "O2": {"table": "O3", "factor": 1 / 2}, + "O3": {"table": "O3", "factor": 1}, + "O4,5": {"table": "O5", "factor": 5 / 3}, + "O4": {"table": "O5", "factor": 2 / 3}, + "O5": {"table": "O5", "factor": 1}, +} class HartreeSlaterGOS(TabulatedGOS): @@ -83,18 +101,18 @@ class HartreeSlaterGOS(TabulatedGOS): """ - _name = 'Hartree-Slater' + _name = "Hartree-Slater" _whitelist = { - 'gos_array': None, - 'rel_energy_axis': None, - 'qaxis': None, - 'element': None, - 'subshell': None, - } + "gos_array": None, + "rel_energy_axis": None, + "qaxis": None, + "element": None, + "subshell": None, + } def read_elements(self): super().read_elements() - self.subshell_factor = conventions[self.subshell]['factor'] + self.subshell_factor = conventions[self.subshell]["factor"] def read_gos_data(self): # pragma: no cover _logger.info( @@ -105,7 +123,7 @@ def read_gos_data(self): # pragma: no cover ) element = self.element subshell = self.subshell - table = conventions[subshell]['table'] + table = conventions[subshell]["table"] # Check if the Peter Rez's Hartree Slater GOS distributed by # Gatan are available. Otherwise exit @@ -121,7 +139,7 @@ def read_gos_data(self): # pragma: no cover ) with open(gos_file) as f: - GOS_list = f.read().replace('\r', '').split() + GOS_list = f.read().replace("\r", "").split() # Map the parameters info1_1 = float(GOS_list[2]) @@ -137,8 +155,6 @@ def read_gos_data(self): # pragma: no cover del GOS_list # Calculate the scale of the matrix - self.rel_energy_axis = self.get_parametrized_energy_axis( - info2_1, info2_2, nrow) - self.qaxis = self.get_parametrized_qaxis( - info1_1, info1_2, ncol) + self.rel_energy_axis = self.get_parametrized_energy_axis(info2_1, info2_2, nrow) + self.qaxis = self.get_parametrized_qaxis(info1_1, info1_2, ncol) self.energy_axis = self.rel_energy_axis + self.onset_energy diff --git a/exspy/misc/eels/hydrogenic_gos.py b/exspy/misc/eels/hydrogenic_gos.py index b8abde27b..3ea56ca7d 100644 --- a/exspy/misc/eels/hydrogenic_gos.py +++ b/exspy/misc/eels/hydrogenic_gos.py @@ -27,9 +27,33 @@ _logger = logging.getLogger(__name__) XU = [ - .82, .52, .52, .42, .30, .29, .22, .30, .22, .16, .12, .13, .13, .14, .16, - .18, .19, .22, .14, .11, .12, .12, .12, .10, .10, .10 - ] + 0.82, + 0.52, + 0.52, + 0.42, + 0.30, + 0.29, + 0.22, + 0.30, + 0.22, + 0.16, + 0.12, + 0.13, + 0.13, + 0.14, + 0.16, + 0.18, + 0.19, + 0.22, + 0.14, + 0.11, + 0.12, + 0.12, + 0.12, + 0.10, + 0.10, + 0.10, +] # IE3=[73,99,135,164,200,245,294,347,402,455,513,575,641,710, # 779,855,931,1021,1115,1217,1323,1436,1550,1675] @@ -75,7 +99,8 @@ class HydrogenicGOS(BaseGOS): I. Iyengar. See http://www.tem-eels.ca/ for the original code. """ - _name = 'hydrogenic' + + _name = "hydrogenic" def __init__(self, element_subshell): """ @@ -89,64 +114,77 @@ def __init__(self, element_subshell): # Check if the Peter Rez's Hartree Slater GOS distributed by # Gatan are available. Otherwise exit - self.element, self.subshell = element_subshell.split('_') + self.element, self.subshell = element_subshell.split("_") self.read_elements() self.energy_shift = 0 - if self.subshell[:1] == 'K': + if self.subshell[:1] == "K": self.gosfunc = self.gosfuncK - self.rel_energy_axis = self.get_parametrized_energy_axis( - 50, 3, 50) - elif self.subshell[:1] == 'L': + self.rel_energy_axis = self.get_parametrized_energy_axis(50, 3, 50) + elif self.subshell[:1] == "L": self.gosfunc = self.gosfuncL - self.onset_energy_L3 = self.element_dict['Atomic_properties'][ - 'Binding_energies']['L3']['onset_energy (eV)'] - self.onset_energy_L1 = self.element_dict['Atomic_properties'][ - 'Binding_energies']['L1']['onset_energy (eV)'] + self.onset_energy_L3 = self.element_dict["Atomic_properties"][ + "Binding_energies" + ]["L3"]["onset_energy (eV)"] + self.onset_energy_L1 = self.element_dict["Atomic_properties"][ + "Binding_energies" + ]["L1"]["onset_energy (eV)"] self.onset_energy = self.onset_energy_L3 - relative_axis = self.get_parametrized_energy_axis( - 50, 3, 50) + relative_axis = self.get_parametrized_energy_axis(50, 3, 50) dL3L2 = self.onset_energy_L1 - self.onset_energy_L3 - self.rel_energy_axis = np.hstack(( - relative_axis[:relative_axis.searchsorted(dL3L2)], - relative_axis + dL3L2)) + self.rel_energy_axis = np.hstack( + ( + relative_axis[: relative_axis.searchsorted(dL3L2)], + relative_axis + dL3L2, + ) + ) else: raise ValueError( - 'The Hydrogenic GOS currently can only' - 'compute K or L shells. Try using other GOS.') + "The Hydrogenic GOS currently can only" + "compute K or L shells. Try using other GOS." + ) self.energy_axis = self.rel_energy_axis + self.onset_energy info_str = ( - "\nHydrogenic GOS\n" + - ("\tElement: %s " % self.element) + - ("\tSubshell: %s " % self.subshell) + - ("\tOnset Energy = %s " % self.onset_energy)) + "\nHydrogenic GOS\n" + + ("\tElement: %s " % self.element) + + ("\tSubshell: %s " % self.subshell) + + ("\tOnset Energy = %s " % self.onset_energy) + ) _logger.info(info_str) def integrateq(self, onset_energy, angle, E0): energy_shift = onset_energy - self.onset_energy self.energy_shift = energy_shift gamma = 1 + E0 / 511.06 - T = 511060 * (1 - 1 / gamma ** 2) / 2 + T = 511060 * (1 - 1 / gamma**2) / 2 qint = np.zeros((self.energy_axis.shape[0])) for i, E in enumerate(self.energy_axis + energy_shift): - qa0sqmin = (E ** 2) / (4 * R * T) + (E ** 3) / ( - 8 * gamma ** 3 * R * T ** 2) + qa0sqmin = (E**2) / (4 * R * T) + (E**3) / (8 * gamma**3 * R * T**2) p02 = T / (R * (1 - 2 * T / 511060)) pp2 = p02 - E / R * (gamma - E / 1022120) - qa0sqmax = qa0sqmin + 4 * np.sqrt(p02 * pp2) * \ - (math.sin(angle / 2)) ** 2 + qa0sqmax = qa0sqmin + 4 * np.sqrt(p02 * pp2) * (math.sin(angle / 2)) ** 2 # dsbyde IS THE ENERGY-DIFFERENTIAL X-SECN (barn/eV/atom) - qint[i] = 3.5166e8 * (R / T) * (R / E) * ( - integrate.quad( - lambda x: self.gosfunc(E, np.exp(x)), - math.log(qa0sqmin), math.log(qa0sqmax))[0]) + qint[i] = ( + 3.5166e8 + * (R / T) + * (R / E) + * ( + integrate.quad( + lambda x: self.gosfunc(E, np.exp(x)), + math.log(qa0sqmin), + math.log(qa0sqmax), + )[0] + ) + ) self.qint = qint return interpolate.make_interp_spline( - self.energy_axis + energy_shift, qint, k=1, - ) + self.energy_axis + energy_shift, + qint, + k=1, + ) def gosfuncK(self, E, qa02): # gosfunc calculates (=DF/DE) which IS PER EV AND PER ATOM @@ -158,8 +196,8 @@ def gosfuncK(self, E, qa02): zs = z - 0.5 rnk = 2 - q = qa02 / zs ** 2 - kh2 = E / (r * zs ** 2) - 1 + q = qa02 / zs**2 + kh2 = E / (r * zs**2) - 1 akh = np.sqrt(abs(kh2)) if akh < 0.01: akh = 0.01 @@ -171,12 +209,10 @@ def gosfuncK(self, E, qa02): c = np.e ** ((-2 / akh) * bp) else: d = 1 - y = -1 / akh * np.log((q + 1 - kh2 + 2 * akh) / ( - q + 1 - kh2 - 2 * akh)) - c = np.e ** y + y = -1 / akh * np.log((q + 1 - kh2 + 2 * akh) / (q + 1 - kh2 - 2 * akh)) + c = np.e**y a = ((q - kh2 + 1) ** 2 + 4 * kh2) ** 3 - return 128 * rnk * E / ( - r * zs ** 4) * c / d * (q + kh2 / 3 + 1 / 3) / (a * r) + return 128 * rnk * E / (r * zs**4) * c / d * (q + kh2 / 3 + 1 / 3) / (a * r) def gosfuncL(self, E, qa02): # gosfunc calculates (=DF/DE) which IS PER EV AND PER ATOM @@ -191,15 +227,15 @@ def gosfuncL(self, E, qa02): # Egerton does not tabulate the correction for Z>36. # This produces XSs that are within 10% of Hartree-Slater XSs # for these elements. - u = .1 + u = 0.1 else: # Egerton's correction to the Hydrogenic XS u = XU[int(iz)] el3 = self.onset_energy_L3 + self.energy_shift el1 = self.onset_energy_L1 + self.energy_shift - q = qa02 / zs ** 2 - kh2 = E / (r * zs ** 2) - 0.25 + q = qa02 / zs**2 + kh2 = E / (r * zs**2) - 0.25 akh = np.sqrt(abs(kh2)) if kh2 >= 0.0: d = 1 - np.exp(-2 * np.pi / akh) @@ -209,22 +245,33 @@ def gosfuncL(self, E, qa02): c = np.exp((-2 / akh) * bp) else: d = 1 - y = -1 / akh * \ - np.log((q + 0.25 - kh2 + akh) / (q + 0.25 - kh2 - akh)) + y = -1 / akh * np.log((q + 0.25 - kh2 + akh) / (q + 0.25 - kh2 - akh)) c = np.exp(y) if E - el1 <= 0: - g = 2.25 * q ** 4 - (0.75 + 3 * kh2) * q ** 3 + ( - 0.59375 - 0.75 * kh2 - 0.5 * kh2 ** 2) * q * q + ( - 0.11146 + 0.85417 * kh2 + 1.8833 * kh2 * kh2 + kh2 ** 3) * \ - q + 0.0035807 + kh2 / 21.333 + kh2 * kh2 / 4.5714 + kh2 ** 3 \ - / 2.4 + kh2 ** 4 / 4 + g = ( + 2.25 * q**4 + - (0.75 + 3 * kh2) * q**3 + + (0.59375 - 0.75 * kh2 - 0.5 * kh2**2) * q * q + + (0.11146 + 0.85417 * kh2 + 1.8833 * kh2 * kh2 + kh2**3) * q + + 0.0035807 + + kh2 / 21.333 + + kh2 * kh2 / 4.5714 + + kh2**3 / 2.4 + + kh2**4 / 4 + ) a = ((q - kh2 + 0.25) ** 2 + kh2) ** 5 else: - g = q ** 3 - (5 / 3 * kh2 + 11 / 12) * q ** 2 + ( - kh2 * kh2 / 3 + 1.5 * kh2 + 65 / 48) * q + kh2 ** 3 / 3 + \ - 0.75 * kh2 * kh2 + 23 / 48 * kh2 + 5 / 64 + g = ( + q**3 + - (5 / 3 * kh2 + 11 / 12) * q**2 + + (kh2 * kh2 / 3 + 1.5 * kh2 + 65 / 48) * q + + kh2**3 / 3 + + 0.75 * kh2 * kh2 + + 23 / 48 * kh2 + + 5 / 64 + ) a = ((q - kh2 + 0.25) ** 2 + kh2) ** 4 rf = ((E + 0.1 - el3) / 1.8 / z / z) ** u # The following commented lines are to give a more accurate GOS @@ -232,4 +279,4 @@ def gosfuncL(self, E, qa02): # for quantification by curve fitting. # if abs(iz - 11) <= 5 and E - el3 <= 20: # rf = 1 - return rf * 32 * g * c / a / d * E / r / r / zs ** 4 + return rf * 32 * g * c / a / d * E / r / r / zs**4 diff --git a/exspy/misc/eels/tools.py b/exspy/misc/eels/tools.py index 2bbb8bc80..e7ec11b88 100644 --- a/exspy/misc/eels/tools.py +++ b/exspy/misc/eels/tools.py @@ -31,14 +31,11 @@ _logger = logging.getLogger(__name__) -def _estimate_gain(ns, cs, - weighted=False, - higher_than=None, - plot_results=False, - binning=0, - pol_order=1): +def _estimate_gain( + ns, cs, weighted=False, higher_than=None, plot_results=False, binning=0, pol_order=1 +): if binning > 0: - factor = 2 ** binning + factor = 2**binning remainder = np.mod(ns.shape[1], factor) if remainder != 0: ns = ns[:, remainder:] @@ -68,6 +65,7 @@ def _estimate_gain(ns, cs, from hyperspy._signals.signal1D import Signal1D from hyperspy.models.model1d import Model1D from hyperspy.components1d import Line + s = Signal1D(variance2fit) s.axes_manager.signal_axes[0].axis = average2fit m = Model1D(s) @@ -82,11 +80,10 @@ def _estimate_gain(ns, cs, if plot_results is True: plt.figure() plt.scatter(average.squeeze(), variance.squeeze()) - plt.xlabel('Counts') - plt.ylabel('Variance') - plt.plot(average2fit, np.polyval(fit, average2fit), color='red') - results = {'fit': fit, 'variance': variance.squeeze(), - 'counts': average.squeeze()} + plt.xlabel("Counts") + plt.ylabel("Variance") + plt.plot(average2fit, np.polyval(fit, average2fit), color="red") + results = {"fit": fit, "variance": variance.squeeze(), "counts": average.squeeze()} return results @@ -99,15 +96,16 @@ def _estimate_correlation_factor(g0, gk, k): def estimate_variance_parameters( - noisy_signal, - clean_signal, - mask=None, - pol_order=1, - higher_than=None, - return_results=False, - plot_results=True, - weighted=False, - store_results="ask"): + noisy_signal, + clean_signal, + mask=None, + pol_order=1, + higher_than=None, + return_results=False, + plot_results=True, + weighted=False, + store_results="ask", +): """Find the scale and offset of the Poissonian noise By comparing an SI with its denoised version (i.e. by PCA), @@ -140,40 +138,56 @@ def estimate_variance_parameters( # The rest of the code assumes that the first data axis # is the navigation axis. We transpose the data if that is not the # case. - ns = (noisy_signal.data.copy() - if noisy_signal.axes_manager[0].index_in_array == 0 - else noisy_signal.data.T.copy()) - cs = (clean_signal.data.copy() - if clean_signal.axes_manager[0].index_in_array == 0 - else clean_signal.data.T.copy()) + ns = ( + noisy_signal.data.copy() + if noisy_signal.axes_manager[0].index_in_array == 0 + else noisy_signal.data.T.copy() + ) + cs = ( + clean_signal.data.copy() + if clean_signal.axes_manager[0].index_in_array == 0 + else clean_signal.data.T.copy() + ) if mask is not None: - _slice = [slice(None), ] * len(ns.shape) - _slice[noisy_signal.axes_manager.signal_axes[0].index_in_array]\ - = ~mask + _slice = [ + slice(None), + ] * len(ns.shape) + _slice[noisy_signal.axes_manager.signal_axes[0].index_in_array] = ~mask ns = ns[_slice] cs = cs[_slice] results0 = _estimate_gain( - ns, cs, weighted=weighted, higher_than=higher_than, - plot_results=plot_results, binning=0, pol_order=pol_order) + ns, + cs, + weighted=weighted, + higher_than=higher_than, + plot_results=plot_results, + binning=0, + pol_order=pol_order, + ) results2 = _estimate_gain( - ns, cs, weighted=weighted, higher_than=higher_than, - plot_results=False, binning=2, pol_order=pol_order) - - c = _estimate_correlation_factor(results0['fit'][0], - results2['fit'][0], 4) - - message = ("Gain factor: %.2f\n" % results0['fit'][0] + - "Gain offset: %.2f\n" % results0['fit'][1] + - "Correlation factor: %.2f\n" % c) + ns, + cs, + weighted=weighted, + higher_than=higher_than, + plot_results=False, + binning=2, + pol_order=pol_order, + ) + + c = _estimate_correlation_factor(results0["fit"][0], results2["fit"][0], 4) + + message = ( + "Gain factor: %.2f\n" % results0["fit"][0] + + "Gain offset: %.2f\n" % results0["fit"][1] + + "Correlation factor: %.2f\n" % c + ) if store_results == "ask": is_ok = "" while is_ok not in ("Yes", "No"): - is_ok = input( - message + - "Would you like to store the results (Yes/No)?") + is_ok = input(message + "Would you like to store the results (Yes/No)?") is_ok = is_ok == "Yes" else: is_ok = store_results @@ -181,18 +195,20 @@ def estimate_variance_parameters( if is_ok: noisy_signal.metadata.set_item( "Signal.Noise_properties.Variance_linear_model.gain_factor", - results0['fit'][0]) + results0["fit"][0], + ) noisy_signal.metadata.set_item( "Signal.Noise_properties.Variance_linear_model.gain_offset", - results0['fit'][1]) + results0["fit"][1], + ) noisy_signal.metadata.set_item( - "Signal.Noise_properties.Variance_linear_model." - "correlation_factor", - c) + "Signal.Noise_properties.Variance_linear_model." "correlation_factor", c + ) noisy_signal.metadata.set_item( - "Signal.Noise_properties.Variance_linear_model." + - "parameters_estimation_method", - 'exSpy') + "Signal.Noise_properties.Variance_linear_model." + + "parameters_estimation_method", + "exSpy", + ) if return_results is True: return results0 @@ -201,15 +217,20 @@ def estimate_variance_parameters( def power_law_perc_area(E1, E2, r): a = E1 b = E2 - return 100 * ((a ** r * r - a ** r) * (a / (a ** r * r - a ** r) - - (b + a) / ((b + a) ** r * r - (b + a) ** r))) / a + return ( + 100 + * ( + (a**r * r - a**r) + * (a / (a**r * r - a**r) - (b + a) / ((b + a) ** r * r - (b + a) ** r)) + ) + / a + ) def rel_std_of_fraction(a, std_a, b, std_b, corr_factor=1): rel_a = std_a / a rel_b = std_b / b - return np.sqrt(rel_a ** 2 + rel_b ** 2 - - 2 * rel_a * rel_b * corr_factor) + return np.sqrt(rel_a**2 + rel_b**2 - 2 * rel_a * rel_b * corr_factor) def ratio(edge_A, edge_B): @@ -219,11 +240,9 @@ def ratio(edge_A, edge_B): std_b = edge_B.intensity.std ratio = a / b ratio_std = ratio * rel_std_of_fraction(a, std_a, b, std_b) - _logger.info("Ratio %s/%s %1.3f +- %1.3f ", - edge_A.name, - edge_B.name, - a / b, - 1.96 * ratio_std) + _logger.info( + "Ratio %s/%s %1.3f +- %1.3f ", edge_A.name, edge_B.name, a / b, 1.96 * ratio_std + ) return ratio, ratio_std @@ -263,23 +282,25 @@ def eels_constant(s, zlp, t): """ # Constants and units - me = constants.value( - 'electron mass energy equivalent in MeV') * 1e3 # keV + me = constants.value("electron mass energy equivalent in MeV") * 1e3 # keV # Mapped parameters try: e0 = s.metadata.Acquisition_instrument.TEM.beam_energy except BaseException: - raise AttributeError("Please define the beam energy." - "You can do this e.g. by using the " - "set_microscope_parameters method") + raise AttributeError( + "Please define the beam energy." + "You can do this e.g. by using the " + "set_microscope_parameters method" + ) try: - beta = s.metadata.Acquisition_instrument.\ - TEM.Detector.EELS.collection_angle + beta = s.metadata.Acquisition_instrument.TEM.Detector.EELS.collection_angle except BaseException: - raise AttributeError("Please define the collection semi-angle." - "You can do this e.g. by using the " - "set_microscope_parameters method") + raise AttributeError( + "Please define the collection semi-angle." + "You can do this e.g. by using the " + "set_microscope_parameters method" + ) axis = s.axes_manager.signal_axes[0] eaxis = axis.axis.copy() @@ -288,47 +309,52 @@ def eels_constant(s, zlp, t): eaxis[0] = 1e-10 if isinstance(zlp, hyperspy.signal.BaseSignal): - if (zlp.axes_manager.navigation_dimension == - s.axes_manager.navigation_dimension): + if zlp.axes_manager.navigation_dimension == s.axes_manager.navigation_dimension: if zlp.axes_manager.signal_dimension == 0: i0 = zlp.data else: i0 = zlp.integrate1D(axis.index_in_axes_manager).data else: - raise ValueError('The ZLP signal dimensions are not ' - 'compatible with the dimensions of the ' - 'low-loss signal') + raise ValueError( + "The ZLP signal dimensions are not " + "compatible with the dimensions of the " + "low-loss signal" + ) # The following prevents errors if the signal is a single spectrum if len(i0) != 1: - i0 = i0.reshape( - np.insert(i0.shape, axis.index_in_array, 1)) + i0 = i0.reshape(np.insert(i0.shape, axis.index_in_array, 1)) elif isinstance(zlp, numbers.Number): i0 = zlp else: - raise ValueError('The zero-loss peak input is not valid, it must be\ - in the BaseSignal class or a Number.') + raise ValueError( + "The zero-loss peak input is not valid, it must be\ + in the BaseSignal class or a Number." + ) if isinstance(t, hyperspy.signal.BaseSignal): - if (t.axes_manager.navigation_dimension == - s.axes_manager.navigation_dimension) and ( - t.axes_manager.signal_dimension == 0): + if ( + t.axes_manager.navigation_dimension == s.axes_manager.navigation_dimension + ) and (t.axes_manager.signal_dimension == 0): t = t.data - t = t.reshape( - np.insert(t.shape, axis.index_in_array, 1)) + t = t.reshape(np.insert(t.shape, axis.index_in_array, 1)) else: - raise ValueError('The thickness signal dimensions are not ' - 'compatible with the dimensions of the ' - 'low-loss signal') + raise ValueError( + "The thickness signal dimensions are not " + "compatible with the dimensions of the " + "low-loss signal" + ) # Kinetic definitions - ke = e0 * (1 + e0 / 2. / me) / (1 + e0 / me) ** 2 + ke = e0 * (1 + e0 / 2.0 / me) / (1 + e0 / me) ** 2 tgt = e0 * (2 * me + e0) / (me + e0) k = s.__class__( - data=(t * i0 / (332.5 * ke)) * np.log(1 + (beta * tgt / eaxis) ** 2)) + data=(t * i0 / (332.5 * ke)) * np.log(1 + (beta * tgt / eaxis) ** 2) + ) k.metadata.General.title = "EELS proportionality constant K" return k -def get_edges_near_energy(energy, width=10, only_major=False, order='closest'): + +def get_edges_near_energy(energy, width=10, only_major=False, order="closest"): """Find edges near a given energy that are within the given energy window. @@ -355,42 +381,43 @@ def get_edges_near_energy(energy, width=10, only_major=False, order='closest'): if width < 0: raise ValueError("Provided width needs to be >= 0.") - if order not in ('closest', 'ascending', 'descending'): - raise ValueError("order needs to be 'closest', 'ascending' or " - "'descending'") + if order not in ("closest", "ascending", "descending"): + raise ValueError("order needs to be 'closest', 'ascending' or " "'descending'") - Emin, Emax = energy - width/2, energy + width/2 + Emin, Emax = energy - width / 2, energy + width / 2 # find all subshells that have its energy within range valid_edges = [] for element, element_info in elements_db.items(): try: - for shell, shell_info in element_info[ - 'Atomic_properties']['Binding_energies'].items(): + for shell, shell_info in element_info["Atomic_properties"][ + "Binding_energies" + ].items(): if only_major: - if shell_info['relevance'] != 'Major': + if shell_info["relevance"] != "Major": continue - if shell[-1] != 'a' and \ - Emin <= shell_info['onset_energy (eV)'] <= Emax: - subshell = '{}_{}'.format(element, shell) - Ediff = abs(shell_info['onset_energy (eV)'] - energy) - valid_edges.append((subshell, - shell_info['onset_energy (eV)'], - Ediff)) + if shell[-1] != "a" and Emin <= shell_info["onset_energy (eV)"] <= Emax: + subshell = "{}_{}".format(element, shell) + Ediff = abs(shell_info["onset_energy (eV)"] - energy) + valid_edges.append( + (subshell, shell_info["onset_energy (eV)"], Ediff) + ) except KeyError: continue # Sort according to 'order' and return only the edges - if order == 'closest': + if order == "closest": edges = [edge for edge, _, _ in sorted(valid_edges, key=lambda x: x[2])] - elif order == 'ascending': + elif order == "ascending": edges = [edge for edge, _, _ in sorted(valid_edges, key=lambda x: x[1])] - elif order == 'descending': - edges = [edge for edge, _, _ in sorted(valid_edges, key=lambda x: x[1], - reverse=True)] + elif order == "descending": + edges = [ + edge for edge, _, _ in sorted(valid_edges, key=lambda x: x[1], reverse=True) + ] return edges + def get_info_from_edges(edges): """Return the information of a sequence of edges as a list of dictionaries @@ -409,8 +436,8 @@ def get_info_from_edges(edges): edges = np.atleast_1d(edges) info = [] for edge in edges: - element, subshell = edge.split('_') - d = elements_db[element]['Atomic_properties']['Binding_energies'][subshell] + element, subshell = edge.split("_") + d = elements_db[element]["Atomic_properties"]["Binding_energies"][subshell] info.append(d) return info diff --git a/exspy/misc/material.py b/exspy/misc/material.py index 776545bdc..de068afee 100644 --- a/exspy/misc/material.py +++ b/exspy/misc/material.py @@ -33,7 +33,7 @@ "density_of_mixture", "mass_absorption_coefficient", "mass_absorption_mixture", - ] +] def __dir__(): @@ -62,21 +62,24 @@ def _weight_to_atomic(weight_percent, elements): """ if len(elements) != len(weight_percent): raise ValueError( - 'The number of elements must match the size of the first axis' - 'of weight_percent.') + "The number of elements must match the size of the first axis" + "of weight_percent." + ) atomic_weights = np.array( - [elements_db[element]['General_properties']['atomic_weight'] - for element in elements]) - atomic_percent = np.array( - list(map(np.divide, weight_percent, atomic_weights))) - sum_weight = atomic_percent.sum(axis=0) / 100. + [ + elements_db[element]["General_properties"]["atomic_weight"] + for element in elements + ] + ) + atomic_percent = np.array(list(map(np.divide, weight_percent, atomic_weights))) + sum_weight = atomic_percent.sum(axis=0) / 100.0 for i, el in enumerate(elements): atomic_percent[i] /= sum_weight atomic_percent[i] = np.where(sum_weight == 0.0, 0.0, atomic_percent[i]) return atomic_percent -def weight_to_atomic(weight_percent, elements='auto'): +def weight_to_atomic(weight_percent, elements="auto"): """Convert weight percent (wt%) to atomic percent (at.%). Parameters @@ -101,16 +104,16 @@ def weight_to_atomic(weight_percent, elements='auto'): """ from hyperspy.signals import BaseSignal + elements = _elements_auto(weight_percent, elements) if isinstance(weight_percent[0], BaseSignal): atomic_percent = stack(weight_percent) - atomic_percent.data = _weight_to_atomic( - atomic_percent.data, elements) + atomic_percent.data = _weight_to_atomic(atomic_percent.data, elements) atomic_percent.data = np.nan_to_num(atomic_percent.data) atomic_percent = atomic_percent.split() for i, el in enumerate(elements): - atomic_percent[i].metadata.General.title = 'atomic percent of ' + el + atomic_percent[i].metadata.General.title = "atomic percent of " + el return atomic_percent else: return _weight_to_atomic(weight_percent, elements) @@ -140,21 +143,24 @@ def _atomic_to_weight(atomic_percent, elements): """ if len(elements) != len(atomic_percent): raise ValueError( - 'The number of elements must match the size of the first axis' - 'of atomic_percent.') + "The number of elements must match the size of the first axis" + "of atomic_percent." + ) atomic_weights = np.array( - [elements_db[element]['General_properties']['atomic_weight'] - for element in elements]) - weight_percent = np.array( - list(map(np.multiply, atomic_percent, atomic_weights))) - sum_atomic = weight_percent.sum(axis=0) / 100. + [ + elements_db[element]["General_properties"]["atomic_weight"] + for element in elements + ] + ) + weight_percent = np.array(list(map(np.multiply, atomic_percent, atomic_weights))) + sum_atomic = weight_percent.sum(axis=0) / 100.0 for i, el in enumerate(elements): weight_percent[i] /= sum_atomic weight_percent[i] = np.where(sum_atomic == 0.0, 0.0, weight_percent[i]) return weight_percent -def atomic_to_weight(atomic_percent, elements='auto'): +def atomic_to_weight(atomic_percent, elements="auto"): """Convert atomic percent to weight percent. Parameters @@ -179,22 +185,20 @@ def atomic_to_weight(atomic_percent, elements='auto'): """ from hyperspy.signals import BaseSignal + elements = _elements_auto(atomic_percent, elements) if isinstance(atomic_percent[0], BaseSignal): weight_percent = stack(atomic_percent, show_progressbar=False) - weight_percent.data = _atomic_to_weight( - weight_percent.data, elements) + weight_percent.data = _atomic_to_weight(weight_percent.data, elements) weight_percent = weight_percent.split() for i, el in enumerate(elements): - atomic_percent[i].metadata.General.title = 'weight percent of ' + el + atomic_percent[i].metadata.General.title = "weight percent of " + el return weight_percent else: return _atomic_to_weight(atomic_percent, elements) -def _density_of_mixture(weight_percent, - elements, - mean='harmonic'): +def _density_of_mixture(weight_percent, elements, mean="harmonic"): """Calculate the density a mixture of elements. The density of the elements is retrieved from an internal database. The @@ -225,34 +229,37 @@ def _density_of_mixture(weight_percent, """ if len(elements) != len(weight_percent): raise ValueError( - 'The number of elements must match the size of the first axis' - 'of weight_percent.') + "The number of elements must match the size of the first axis" + "of weight_percent." + ) densities = np.array( - [elements_db[element]['Physical_properties']['density (g/cm^3)'] - for element in elements]) - sum_densities = np.zeros_like(weight_percent, dtype='float') - try : - if mean == 'harmonic': + [ + elements_db[element]["Physical_properties"]["density (g/cm^3)"] + for element in elements + ] + ) + sum_densities = np.zeros_like(weight_percent, dtype="float") + try: + if mean == "harmonic": for i, weight in enumerate(weight_percent): sum_densities[i] = weight / densities[i] sum_densities = sum_densities.sum(axis=0) density = np.sum(weight_percent, axis=0) / sum_densities return np.where(sum_densities == 0.0, 0.0, density) - elif mean == 'weighted': + elif mean == "weighted": for i, weight in enumerate(weight_percent): sum_densities[i] = weight * densities[i] sum_densities = sum_densities.sum(axis=0) sum_weight = np.sum(weight_percent, axis=0) density = sum_densities / sum_weight return np.where(sum_weight == 0.0, 0.0, density) - except TypeError : + except TypeError: raise ValueError( - 'The density of one of the elements is unknown (Probably At or Fr).') + "The density of one of the elements is unknown (Probably At or Fr)." + ) -def density_of_mixture(weight_percent, - elements='auto', - mean='harmonic'): +def density_of_mixture(weight_percent, elements="auto", mean="harmonic"): """Calculate the density of a mixture of elements. The density of the elements is retrieved from an internal database. The @@ -284,11 +291,12 @@ def density_of_mixture(weight_percent, """ from hyperspy.signals import BaseSignal + elements = _elements_auto(weight_percent, elements) if isinstance(weight_percent[0], BaseSignal): density = weight_percent[0]._deepcopy_with_new_data( - _density_of_mixture(stack(weight_percent).data, - elements, mean=mean)) + _density_of_mixture(stack(weight_percent).data, elements, mean=mean) + ) return density else: return _density_of_mixture(weight_percent, elements, mean=mean) @@ -340,16 +348,18 @@ def mass_absorption_coefficient(element, energies): if isinstance(energy, str): energies[i] = utils_eds._get_energy_xray_line(energy) index = np.searchsorted(energies_db, energies) - mac_res = np.exp(np.log(macs[index - 1]) + - np.log(macs[index] / macs[index - 1]) * - (np.log(energies / energies_db[index - 1]) / - np.log(energies_db[index] / energies_db[index - 1]))) + mac_res = np.exp( + np.log(macs[index - 1]) + + np.log(macs[index] / macs[index - 1]) + * ( + np.log(energies / energies_db[index - 1]) + / np.log(energies_db[index] / energies_db[index - 1]) + ) + ) return np.nan_to_num(mac_res) -def _mass_absorption_mixture(weight_percent, - elements, - energies): +def _mass_absorption_mixture(weight_percent, elements, energies): """Calculate the mass absorption coefficient for X-ray absorbed in a mixture of elements. @@ -390,8 +400,7 @@ def _mass_absorption_mixture(weight_percent, Scattering Tables (version 2.1). """ if len(elements) != len(weight_percent): - raise ValueError( - "Elements and weight_fraction should have the same length") + raise ValueError("Elements and weight_fraction should have the same length") if isinstance(weight_percent[0], Iterable): weight_fraction = np.array(weight_percent) weight_fraction /= np.sum(weight_fraction, 0) @@ -401,15 +410,14 @@ def _mass_absorption_mixture(weight_percent, mac_res += np.array([weight * ma for ma in mac_re]) return mac_res else: - mac_res = np.array([mass_absorption_coefficient( - el, energies) for el in elements]) + mac_res = np.array( + [mass_absorption_coefficient(el, energies) for el in elements] + ) mac_res = np.dot(weight_percent, mac_res) / np.sum(weight_percent, 0) return mac_res -def mass_absorption_mixture(weight_percent, - elements='auto', - energies='auto'): +def mass_absorption_mixture(weight_percent, elements="auto", energies="auto"): """Calculate the mass absorption coefficient for X-ray absorbed in a mixture of elements. @@ -454,20 +462,23 @@ def mass_absorption_mixture(weight_percent, """ from hyperspy.signals import BaseSignal + elements = _elements_auto(weight_percent, elements) energies = _lines_auto(weight_percent, energies) if isinstance(weight_percent[0], BaseSignal): weight_per = np.array([wt.data for wt in weight_percent]) - mac_res = stack([weight_percent[0].deepcopy()] * len(energies), - show_progressbar=False) - mac_res.data = \ - _mass_absorption_mixture(weight_per, elements, energies) + mac_res = stack( + [weight_percent[0].deepcopy()] * len(energies), show_progressbar=False + ) + mac_res.data = _mass_absorption_mixture(weight_per, elements, energies) mac_res = mac_res.split() for i, energy in enumerate(energies): mac_res[i].metadata.set_item("Sample.xray_lines", ([energy])) mac_res[i].metadata.General.set_item( - "title", "Absoprtion coeff of" - " %s in %s" % (energy, mac_res[i].metadata.General.title)) + "title", + "Absoprtion coeff of" + " %s in %s" % (energy, mac_res[i].metadata.General.title), + ) if mac_res[i].metadata.has_item("Sample.elements"): del mac_res[i].metadata.Sample.elements return mac_res @@ -478,18 +489,19 @@ def mass_absorption_mixture(weight_percent, def _lines_auto(composition, xray_lines): if isinstance(composition[0], numbers.Number): if isinstance(xray_lines, str): - if xray_lines == 'auto': + if xray_lines == "auto": raise ValueError("The X-ray lines needs to be provided.") else: if isinstance(xray_lines, str): - if xray_lines == 'auto': + if xray_lines == "auto": xray_lines = [] for compo in composition: if len(compo.metadata.Sample.xray_lines) > 1: raise ValueError( "The signal %s contains more than one X-ray lines " "but this function requires only one X-ray lines " - "per signal." % compo.metadata.General.title) + "per signal." % compo.metadata.General.title + ) else: xray_lines.append(compo.metadata.Sample.xray_lines[0]) return xray_lines @@ -498,18 +510,19 @@ def _lines_auto(composition, xray_lines): def _elements_auto(composition, elements): if isinstance(composition[0], numbers.Number): if isinstance(elements, str): - if elements == 'auto': + if elements == "auto": raise ValueError("The elements needs to be provided.") else: if isinstance(elements, str): - if elements == 'auto': + if elements == "auto": elements = [] for compo in composition: if len(compo.metadata.Sample.elements) > 1: raise ValueError( "The signal %s contains more than one element " "but this function requires only one element " - "per signal." % compo.metadata.General.title) + "per signal." % compo.metadata.General.title + ) else: elements.append(compo.metadata.Sample.elements[0]) return elements diff --git a/exspy/models/__init__.py b/exspy/models/__init__.py index 04b4bab4f..b95558346 100644 --- a/exspy/models/__init__.py +++ b/exspy/models/__init__.py @@ -10,7 +10,7 @@ "EDSSEMModel", "EDSTEMModel", "EELSModel", - ] +] def __dir__(): diff --git a/exspy/models/edsmodel.py b/exspy/models/edsmodel.py index 2a8c4567e..b808037cc 100644 --- a/exspy/models/edsmodel.py +++ b/exspy/models/edsmodel.py @@ -34,14 +34,15 @@ _logger = logging.getLogger(__name__) -eV2keV = 1000. +eV2keV = 1000.0 sigma2fwhm = 2 * math.sqrt(2 * math.log(2)) def _get_weight(element, line, weight_line=None): if weight_line is None: - weight_line = elements_db[ - element]['Atomic_properties']['Xray_lines'][line]['weight'] + weight_line = elements_db[element]["Atomic_properties"]["Xray_lines"][line][ + "weight" + ] return "x * {}".format(weight_line) @@ -78,12 +79,13 @@ def _get_sigma(E, E_ref, units_factor, return_f=False): """ energy2sigma_factor = 2.5 / (eV2keV * (sigma2fwhm**2)) if return_f: - return lambda sig_ref: math.sqrt(abs( - energy2sigma_factor * (E - E_ref) * units_factor + - np.power(sig_ref, 2))) + return lambda sig_ref: math.sqrt( + abs(energy2sigma_factor * (E - E_ref) * units_factor + np.power(sig_ref, 2)) + ) else: return "sqrt(abs({} * ({} - {}) * {} + sig_ref ** 2))".format( - energy2sigma_factor, E, E_ref, units_factor) + energy2sigma_factor, E, E_ref, units_factor + ) def _get_offset(diff): @@ -122,10 +124,9 @@ class EDSModel(Model1D): >>> m.calibrate_xray_lines('sub_weight',['Mn_La'], bound=10) """ - def __init__(self, spectrum, - auto_background=True, - auto_add_lines=True, - *args, **kwargs): + def __init__( + self, spectrum, auto_background=True, auto_add_lines=True, *args, **kwargs + ): Model1D.__init__(self, spectrum, *args, **kwargs) self.xray_lines = list() self.family_lines = list() @@ -133,40 +134,38 @@ def __init__(self, spectrum, self.end_energy = min(end_energy, self.signal._get_beam_energy()) self.start_energy = self.axes_manager.signal_axes[0].low_value self.background_components = list() - if 'dictionary' in kwargs or len(args) > 1: + if "dictionary" in kwargs or len(args) > 1: auto_add_lines = False auto_background = False - d = args[1] if len(args) > 1 else kwargs['dictionary'] - if len(d['xray_lines']) > 0: - self.xray_lines.extend( - [self[name] for name in d['xray_lines']]) - if len(d['background_components']) > 0: + d = args[1] if len(args) > 1 else kwargs["dictionary"] + if len(d["xray_lines"]) > 0: + self.xray_lines.extend([self[name] for name in d["xray_lines"]]) + if len(d["background_components"]) > 0: self.background_components.extend( - [self[name] for name in d['background_components']]) + [self[name] for name in d["background_components"]] + ) if auto_background is True: self.add_polynomial_background() if auto_add_lines is True: # Will raise an error if no elements are specified, so check: - if 'Sample.elements' in self.signal.metadata: + if "Sample.elements" in self.signal.metadata: self.add_family_lines() def as_dictionary(self, fullcopy=True): dic = super(EDSModel, self).as_dictionary(fullcopy) - dic['xray_lines'] = [c.name for c in self.xray_lines] - dic['background_components'] = [c.name for c in - self.background_components] + dic["xray_lines"] = [c.name for c in self.xray_lines] + dic["background_components"] = [c.name for c in self.background_components] return dic @property def units_factor(self): units_name = self.axes_manager.signal_axes[0].units - if units_name == 'eV': - return 1000. - elif units_name == 'keV': - return 1. + if units_name == "eV": + return 1000.0 + elif units_name == "keV": + return 1.0 else: - raise ValueError("Energy units, %s, not supported" % - str(units_name)) + raise ValueError("Energy units, %s, not supported" % str(units_name)) @property def spectrum(self): @@ -179,15 +178,14 @@ def spectrum(self, value): else: raise ValueError( "This attribute can only contain an EDSSpectrum " - "but an object of type %s was provided" % - str(type(value))) + "but an object of type %s was provided" % str(type(value)) + ) @property def _active_xray_lines(self): - return [xray_line for xray_line - in self.xray_lines if xray_line.active] + return [xray_line for xray_line in self.xray_lines if xray_line.active] - def add_family_lines(self, xray_lines='from_elements'): + def add_family_lines(self, xray_lines="from_elements"): """Create the Xray-lines instances and configure them appropiately If a X-ray line is given, all the the lines of the familiy is added. @@ -209,37 +207,41 @@ def add_family_lines(self, xray_lines='from_elements'): """ # Test that signal axis is uniform if not self.axes_manager[-1].is_uniform: - raise NotImplementedError("This function is not yet implemented " - "for non-uniform axes.") + raise NotImplementedError( + "This function is not yet implemented " "for non-uniform axes." + ) only_one = False only_lines = ("Ka", "La", "Ma") - if xray_lines is None or xray_lines == 'from_elements': - if 'Sample.xray_lines' in self.signal.metadata \ - and xray_lines != 'from_elements': + if xray_lines is None or xray_lines == "from_elements": + if ( + "Sample.xray_lines" in self.signal.metadata + and xray_lines != "from_elements" + ): xray_lines = self.signal.metadata.Sample.xray_lines - elif 'Sample.elements' in self.signal.metadata: + elif "Sample.elements" in self.signal.metadata: xray_lines = self.signal._get_lines_from_elements( self.signal.metadata.Sample.elements, only_one=only_one, - only_lines=only_lines) + only_lines=only_lines, + ) else: - raise ValueError( - "No elements defined, set them with `add_elements`") + raise ValueError("No elements defined, set them with `add_elements`") components_names = [xr.name for xr in self.xray_lines] xray_lines = filter(lambda x: x not in components_names, xray_lines) - xray_lines, xray_not_here = self.signal.\ - _get_xray_lines_in_spectral_range(xray_lines) + xray_lines, xray_not_here = self.signal._get_xray_lines_in_spectral_range( + xray_lines + ) for xray in xray_not_here: warnings.warn("%s is not in the data energy range." % (xray)) for xray_line in xray_lines: element, line = utils_eds._get_element_and_line(xray_line) line_energy, line_FWHM = self.signal._get_line_energy( - xray_line, - FWHM_MnKa='auto') + xray_line, FWHM_MnKa="auto" + ) component = create_component.Gaussian() component.centre.value = line_energy component.fwhm = line_FWHM @@ -251,30 +253,32 @@ def add_family_lines(self, xray_lines='from_elements'): if self.signal._lazy: # For lazy signal, use a default value to avoid having # to do out-of-core computation - component.A.map['values'] = 10 + component.A.map["values"] = 10 else: - component.A.map[ - 'values'] = self.signal.isig[line_energy].data * \ - line_FWHM / self.signal.axes_manager[-1].scale - component.A.map['is_set'] = True + component.A.map["values"] = ( + self.signal.isig[line_energy].data + * line_FWHM + / self.signal.axes_manager[-1].scale + ) + component.A.map["is_set"] = True component.A.ext_force_positive = True - for li in elements_db[element]['Atomic_properties']['Xray_lines']: + for li in elements_db[element]["Atomic_properties"]["Xray_lines"]: if line[0] in li and line != li: - xray_sub = element + '_' + li - if self.signal.\ - _get_xray_lines_in_spectral_range( - [xray_sub])[0] != []: - line_energy, line_FWHM = self.signal.\ - _get_line_energy( - xray_sub, FWHM_MnKa='auto') + xray_sub = element + "_" + li + if ( + self.signal._get_xray_lines_in_spectral_range([xray_sub])[0] + != [] + ): + line_energy, line_FWHM = self.signal._get_line_energy( + xray_sub, FWHM_MnKa="auto" + ) component_sub = create_component.Gaussian() component_sub.centre.value = line_energy component_sub.fwhm = line_FWHM component_sub.centre.free = False component_sub.sigma.free = False component_sub.name = xray_sub - component_sub.A.twin_function_expr = _get_weight( - element, li) + component_sub.A.twin_function_expr = _get_weight(element, li) component_sub.A.twin = component.A self.append(component_sub) self.family_lines.append(component_sub) @@ -282,8 +286,7 @@ def add_family_lines(self, xray_lines='from_elements'): @property def _active_background_components(self): - return [bc for bc in self.background_components - if bc.free_parameters] + return [bc for bc in self.background_components if bc.free_parameters] def add_polynomial_background(self, order=6): """ @@ -297,7 +300,7 @@ def add_polynomial_background(self, order=6): The order of the polynomial """ background = create_component.Polynomial(order=order) - background.name = 'background_order_' + str(order) + background.name = "background_order_" + str(order) background.isbackground = True self.append(background) self.background_components.append(background) @@ -317,34 +320,32 @@ def fix_background(self): component.set_parameters_not_free() def enable_xray_lines(self): - """Enable the X-ray lines components. - - """ + """Enable the X-ray lines components.""" for component in self.xray_lines: component.active = True def disable_xray_lines(self): - """Disable the X-ray lines components. - - """ + """Disable the X-ray lines components.""" for component in self._active_xray_lines: component.active = False def _make_position_adjuster(self, component, fix_it, show_label): # Override to ensure formatting of labels of xray lines - super(EDSModel, self)._make_position_adjuster( - component, fix_it, show_label) + super(EDSModel, self)._make_position_adjuster(component, fix_it, show_label) if show_label and component in (self.xray_lines + self.family_lines): label = self._position_widgets[component._position][1] - label.string = (r"$\mathrm{%s}_{\mathrm{%s}}$" % - _get_element_and_line(component.name)) - - def fit_background(self, - start_energy=None, - end_energy=None, - windows_sigma=(4., 3.), - kind='single', - **kwargs): + label.string = r"$\mathrm{%s}_{\mathrm{%s}}$" % _get_element_and_line( + component.name + ) + + def fit_background( + self, + start_energy=None, + end_energy=None, + windows_sigma=(4.0, 3.0), + kind="single", + **kwargs + ): """ Fit the background in the energy range containing no X-ray line. @@ -385,13 +386,14 @@ def fit_background(self, for component in self: if component.isbackground is False: self.remove_signal_range( - component.centre.value - - windows_sigma[0] * component.sigma.value, - component.centre.value + - windows_sigma[1] * component.sigma.value) - if kind == 'single': + component.centre.value + - windows_sigma[0] * component.sigma.value, + component.centre.value + + windows_sigma[1] * component.sigma.value, + ) + if kind == "single": self.fit(**kwargs) - if kind == 'multi': + if kind == "multi": self.multifit(**kwargs) self.reset_signal_range() self.fix_background() @@ -407,7 +409,7 @@ def _twin_xray_lines_width(self, xray_lines): xray_lines: list of str or 'all_alpha' The Xray lines. If 'all_alpha', fit all using all alpha lines """ - if xray_lines == 'all_alpha': + if xray_lines == "all_alpha": xray_lines = [compo.name for compo in self.xray_lines] for i, xray_line in enumerate(xray_lines): @@ -420,9 +422,11 @@ def _twin_xray_lines_width(self, xray_lines): component.sigma.free = True E = component.centre.value component.sigma.twin_inverse_function_expr = _get_sigma( - E_ref, E, self.units_factor) + E_ref, E, self.units_factor + ) component.sigma.twin_function_expr = _get_sigma( - E, E_ref, self.units_factor) + E, E_ref, self.units_factor + ) def _set_energy_resolution(self, xray_lines, *args, **kwargs): """ @@ -434,29 +438,40 @@ def _set_energy_resolution(self, xray_lines, *args, **kwargs): xray_lines: list of str or 'all_alpha' The Xray lines. If 'all_alpha', fit all using all alpha lines """ - if xray_lines == 'all_alpha': + if xray_lines == "all_alpha": xray_lines = [compo.name for compo in self.xray_lines] - energy_Mn_Ka, FWHM_MnKa_old = self.signal._get_line_energy('Mn_Ka', - 'auto') + energy_Mn_Ka, FWHM_MnKa_old = self.signal._get_line_energy("Mn_Ka", "auto") FWHM_MnKa_old *= eV2keV / self.units_factor get_sigma_Mn_Ka = _get_sigma( - energy_Mn_Ka, self[xray_lines[0]].centre.value, self.units_factor, - return_f=True) - FWHM_MnKa = get_sigma_Mn_Ka(self[xray_lines[0]].sigma.value - ) * eV2keV / self.units_factor * sigma2fwhm + energy_Mn_Ka, + self[xray_lines[0]].centre.value, + self.units_factor, + return_f=True, + ) + FWHM_MnKa = ( + get_sigma_Mn_Ka(self[xray_lines[0]].sigma.value) + * eV2keV + / self.units_factor + * sigma2fwhm + ) if FWHM_MnKa < 110: - raise ValueError("FWHM_MnKa of " + str(FWHM_MnKa) + - " smaller than" + "physically possible") + raise ValueError( + "FWHM_MnKa of " + + str(FWHM_MnKa) + + " smaller than" + + "physically possible" + ) else: - self.signal.set_microscope_parameters( - energy_resolution_MnKa=FWHM_MnKa) - _logger.info("Energy resolution (FWHM at Mn Ka) changed from " + - "{:.2f} to {:.2f} eV".format( - FWHM_MnKa_old, FWHM_MnKa)) + self.signal.set_microscope_parameters(energy_resolution_MnKa=FWHM_MnKa) + _logger.info( + "Energy resolution (FWHM at Mn Ka) changed from " + + "{:.2f} to {:.2f} eV".format(FWHM_MnKa_old, FWHM_MnKa) + ) for component in self: if component.isbackground is False: line_FWHM = self.signal._get_line_energy( - component.name, FWHM_MnKa='auto')[1] + component.name, FWHM_MnKa="auto" + )[1] component.fwhm = line_FWHM def _twin_xray_lines_scale(self, xray_lines): @@ -468,7 +483,7 @@ def _twin_xray_lines_scale(self, xray_lines): xray_lines: list of str or 'all_alpha' The Xray lines. If 'all_alpha', fit all using all alpha lines """ - if xray_lines == 'all_alpha': + if xray_lines == "all_alpha": xray_lines = [compo.name for compo in self.xray_lines] ax = self.signal.axes_manager[-1] ref = [] @@ -483,8 +498,7 @@ def _twin_xray_lines_scale(self, xray_lines): component.centre.free = True E = component.centre.value fact = float(ax.value2index(E)) / ax.value2index(E_ref) - component.centre.twin_function_expr = _get_scale( - E, E_ref, fact) + component.centre.twin_function_expr = _get_scale(E, E_ref, fact) component.centre.twin = component_ref.centre ref.append(E) return ref @@ -508,15 +522,17 @@ def _set_energy_scale(self, xray_lines, ref): """ # Test that signal axis is uniform if not self.axes_manager[-1].is_uniform: - raise NotImplementedError("This function is not yet implemented " - "for non-uniform axes.") + raise NotImplementedError( + "This function is not yet implemented " "for non-uniform axes." + ) - if xray_lines == 'all_alpha': + if xray_lines == "all_alpha": xray_lines = [compo.name for compo in self.xray_lines] ax = self.signal.axes_manager[-1] scale_old = self.signal.axes_manager[-1].scale - ind = np.argsort(np.array( - [compo.centre.value for compo in self.xray_lines]))[-1] + ind = np.argsort(np.array([compo.centre.value for compo in self.xray_lines]))[ + -1 + ] E = self[xray_lines[ind]].centre.value scale = (ref[ind] - ax.offset) / ax.value2index(E) ax.scale = scale @@ -534,7 +550,7 @@ def _twin_xray_lines_offset(self, xray_lines): xray_lines: list of str or 'all_alpha' The Xray lines. If 'all_alpha', fit all using all alpha lines """ - if xray_lines == 'all_alpha': + if xray_lines == "all_alpha": xray_lines = [compo.name for compo in self.xray_lines] ref = [] for i, xray_line in enumerate(xray_lines): @@ -572,10 +588,11 @@ def _set_energy_offset(self, xray_lines, ref): """ # Test that signal axis is uniform if not self.axes_manager[-1].is_uniform: - raise NotImplementedError("This function is not yet implemented " - "for non-uniform axes.") + raise NotImplementedError( + "This function is not yet implemented " "for non-uniform axes." + ) - if xray_lines == 'all_alpha': + if xray_lines == "all_alpha": xray_lines = [compo.name for compo in self.xray_lines] diff = self[xray_lines[0]].centre.value - ref[0] offset_old = self.signal.axes_manager[-1].offset @@ -586,10 +603,9 @@ def _set_energy_offset(self, xray_lines, ref): component = self[xray_line] component.centre.value = ref[i] - def calibrate_energy_axis(self, - calibrate='resolution', - xray_lines='all_alpha', - **kwargs): + def calibrate_energy_axis( + self, calibrate="resolution", xray_lines="all_alpha", **kwargs + ): """ Calibrate the resolution, the scale or the offset of the energy axis by fitting. @@ -611,15 +627,15 @@ def calibrate_energy_axis(self, """ - if calibrate == 'resolution': + if calibrate == "resolution": free = self._twin_xray_lines_width fix = self.fix_xray_lines_width scale = self._set_energy_resolution - elif calibrate == 'scale': + elif calibrate == "scale": free = self._twin_xray_lines_scale fix = self.fix_xray_lines_energy scale = self._set_energy_scale - elif calibrate == 'offset': + elif calibrate == "offset": free = self._twin_xray_lines_offset fix = self.fix_xray_lines_energy scale = self._set_energy_offset @@ -629,7 +645,7 @@ def calibrate_energy_axis(self, scale(xray_lines=xray_lines, ref=ref) self.update_plot() - def free_sub_xray_lines_weight(self, xray_lines='all', bound=0.01): + def free_sub_xray_lines_weight(self, xray_lines="all", bound=0.01): """ Free the weight of a sub X-ray lines @@ -650,22 +666,19 @@ def free_twin(component): if component.A.value - bound * component.A.value <= 0: component.A.bmin = 1e-10 else: - component.A.bmin = component.A.value - \ - bound * component.A.value - component.A.bmax = component.A.value + \ - bound * component.A.value + component.A.bmin = component.A.value - bound * component.A.value + component.A.bmax = component.A.value + bound * component.A.value component.A.ext_force_positive = True - xray_families = [ - utils_eds._get_xray_lines_family(line) for line in xray_lines] + + xray_families = [utils_eds._get_xray_lines_family(line) for line in xray_lines] for component in self: if component.isbackground is False: - if xray_lines == 'all': + if xray_lines == "all": free_twin(component) - elif utils_eds._get_xray_lines_family( - component.name) in xray_families: + elif utils_eds._get_xray_lines_family(component.name) in xray_families: free_twin(component) - def fix_sub_xray_lines_weight(self, xray_lines='all'): + def fix_sub_xray_lines_weight(self, xray_lines="all"): """ Fix the weight of a sub X-ray lines to the main X-ray lines @@ -676,26 +689,30 @@ def fix_twin(component): component.A.bmin = 0.0 component.A.bmax = None element, line = utils_eds._get_element_and_line(component.name) - for li in elements_db[element]['Atomic_properties']['Xray_lines']: + for li in elements_db[element]["Atomic_properties"]["Xray_lines"]: if line[0] in li and line != li: - xray_sub = element + '_' + li + xray_sub = element + "_" + li if xray_sub in self: component_sub = self[xray_sub] component_sub.A.bmin = 1e-10 component_sub.A.bmax = None weight_line = component_sub.A.value / component.A.value component_sub.A.twin_function_expr = _get_weight( - element, li, weight_line) + element, li, weight_line + ) component_sub.A.twin = component.A else: - warnings.warn("The X-ray line expected to be in the " - "model was not found") + warnings.warn( + "The X-ray line expected to be in the " + "model was not found" + ) + for component in self.xray_lines: - if xray_lines == 'all' or component.name in xray_lines: + if xray_lines == "all" or component.name in xray_lines: fix_twin(component) self.fetch_stored_values() - def free_xray_lines_energy(self, xray_lines='all', bound=0.001): + def free_xray_lines_energy(self, xray_lines="all", bound=0.001): """ Free the X-ray line energy (shift or centre of the Gaussian) @@ -709,12 +726,12 @@ def free_xray_lines_energy(self, xray_lines='all', bound=0.001): for component in self: if component.isbackground is False: - if xray_lines == 'all' or component.name in xray_lines: + if xray_lines == "all" or component.name in xray_lines: component.centre.free = True component.centre.bmin = component.centre.value - bound component.centre.bmax = component.centre.value + bound - def fix_xray_lines_energy(self, xray_lines='all'): + def fix_xray_lines_energy(self, xray_lines="all"): """ Fix the X-ray line energy (shift or centre of the Gaussian) @@ -726,17 +743,17 @@ def fix_xray_lines_energy(self, xray_lines='all'): bound: float the bound around the actual energy, in keV or eV """ - if xray_lines == 'all_alpha': + if xray_lines == "all_alpha": xray_lines = [compo.name for compo in self.xray_lines] for component in self: if component.isbackground is False: - if xray_lines == 'all' or component.name in xray_lines: + if xray_lines == "all" or component.name in xray_lines: component.centre.twin = None component.centre.free = False component.centre.bmin = None component.centre.bmax = None - def free_xray_lines_width(self, xray_lines='all', bound=0.01): + def free_xray_lines_width(self, xray_lines="all", bound=0.01): """ Free the X-ray line width (sigma of the Gaussian) @@ -750,12 +767,12 @@ def free_xray_lines_width(self, xray_lines='all', bound=0.01): for component in self: if component.isbackground is False: - if xray_lines == 'all' or component.name in xray_lines: + if xray_lines == "all" or component.name in xray_lines: component.sigma.free = True component.sigma.bmin = component.sigma.value - bound component.sigma.bmax = component.sigma.value + bound - def fix_xray_lines_width(self, xray_lines='all'): + def fix_xray_lines_width(self, xray_lines="all"): """ Fix the X-ray line width (sigma of the Gaussian) @@ -767,22 +784,19 @@ def fix_xray_lines_width(self, xray_lines='all'): bound: float the bound around the actual energy, in keV or eV """ - if xray_lines == 'all_alpha': + if xray_lines == "all_alpha": xray_lines = [compo.name for compo in self.xray_lines] for component in self: if component.isbackground is False: - if xray_lines == 'all' or component.name in xray_lines: + if xray_lines == "all" or component.name in xray_lines: component.sigma.twin = None component.sigma.free = False component.sigma.bmin = None component.sigma.bmax = None - def calibrate_xray_lines(self, - calibrate='energy', - xray_lines='all', - bound=1, - kind='single', - **kwargs): + def calibrate_xray_lines( + self, calibrate="energy", xray_lines="all", bound=1, kind="single", **kwargs + ): """ Calibrate individually the X-ray line parameters. @@ -810,14 +824,14 @@ def calibrate_xray_lines(self, multifit, depending on the value of kind. """ - if calibrate == 'energy': + if calibrate == "energy": bound = (bound / eV2keV) * self.units_factor free = self.free_xray_lines_energy fix = self.fix_xray_lines_energy - elif calibrate == 'sub_weight': + elif calibrate == "sub_weight": free = self.free_sub_xray_lines_weight fix = self.fix_sub_xray_lines_weight - elif calibrate == 'width': + elif calibrate == "width": bound = (bound / eV2keV) * self.units_factor free = self.free_xray_lines_width fix = self.fix_xray_lines_width @@ -829,12 +843,14 @@ def calibrate_xray_lines(self, self.multifit(bounded=True, **kwargs) fix(xray_lines=xray_lines) - def get_lines_intensity(self, - xray_lines=None, - plot_result=False, - only_one=True, - only_lines=("a",), - **kwargs): + def get_lines_intensity( + self, + xray_lines=None, + plot_result=False, + only_one=True, + only_lines=("a",), + **kwargs + ): """ Return the fitted intensity of the X-ray lines. @@ -882,34 +898,36 @@ def get_lines_intensity(self, if xray_lines is None: xray_lines = [component.name for component in self.xray_lines] else: - xray_lines = self.signal._parse_xray_lines( - xray_lines, only_one, only_lines) - xray_lines = list(filter(lambda x: x in [a.name for a in - self], xray_lines)) + xray_lines = self.signal._parse_xray_lines(xray_lines, only_one, only_lines) + xray_lines = list(filter(lambda x: x in [a.name for a in self], xray_lines)) if len(xray_lines) == 0: raise ValueError("These X-ray lines are not part of the model.") for xray_line in xray_lines: element, line = utils_eds._get_element_and_line(xray_line) line_energy = self.signal._get_line_energy(xray_line) - data_res = self[xray_line].A.map['values'] + data_res = self[xray_line].A.map["values"] if self.axes_manager.navigation_dimension == 0: data_res = data_res[0] img = self.signal.isig[0:1].integrate1D(-1) img.data = data_res - img.metadata.General.title = ( - 'Intensity of %s at %.2f %s from %s' % - (xray_line, - line_energy, - self.signal.axes_manager.signal_axes[0].units, - self.signal.metadata.General.title)) + img.metadata.General.title = "Intensity of %s at %.2f %s from %s" % ( + xray_line, + line_energy, + self.signal.axes_manager.signal_axes[0].units, + self.signal.metadata.General.title, + ) img = img.transpose(signal_axes=[]) if plot_result and img.axes_manager.signal_dimension == 0: - print("%s at %s %s : Intensity = %.2f" - % (xray_line, - line_energy, - self.signal.axes_manager.signal_axes[0].units, - img.data)) + print( + "%s at %s %s : Intensity = %.2f" + % ( + xray_line, + line_energy, + self.signal.axes_manager.signal_axes[0].units, + img.data, + ) + ) img.metadata.set_item("Sample.elements", ([element])) img.metadata.set_item("Sample.xray_lines", ([xray_line])) intensities.append(img) @@ -920,7 +938,9 @@ def get_lines_intensity(self, def remove(self, thing): thing = self._get_component(thing) if not np.iterable(thing): - thing = [thing, ] + thing = [ + thing, + ] for comp in thing: if comp in self.xray_lines: self.xray_lines.remove(comp) diff --git a/exspy/models/edssemmodel.py b/exspy/models/edssemmodel.py index ff139e2c7..3d720bb50 100644 --- a/exspy/models/edssemmodel.py +++ b/exspy/models/edssemmodel.py @@ -38,9 +38,9 @@ class EDSSEMModel(EDSModel): Any extra arguments are passed to the Model constructor. """ - def __init__(self, spectrum, - auto_background=True, - auto_add_lines=True, - *args, **kwargs): - EDSModel.__init__(self, spectrum, auto_background, auto_add_lines, - *args, **kwargs) + def __init__( + self, spectrum, auto_background=True, auto_add_lines=True, *args, **kwargs + ): + EDSModel.__init__( + self, spectrum, auto_background, auto_add_lines, *args, **kwargs + ) diff --git a/exspy/models/edstemmodel.py b/exspy/models/edstemmodel.py index c8965ffae..1e07c2a79 100644 --- a/exspy/models/edstemmodel.py +++ b/exspy/models/edstemmodel.py @@ -38,9 +38,9 @@ class EDSTEMModel(EDSModel): Any extra arguments are passed to the Model constructor. """ - def __init__(self, spectrum, - auto_background=True, - auto_add_lines=True, - *args, **kwargs): - EDSModel.__init__(self, spectrum, auto_background, auto_add_lines, - *args, **kwargs) + def __init__( + self, spectrum, auto_background=True, auto_add_lines=True, *args, **kwargs + ): + EDSModel.__init__( + self, spectrum, auto_background, auto_add_lines, *args, **kwargs + ) diff --git a/exspy/models/eelsmodel.py b/exspy/models/eelsmodel.py index 49cb9098f..2b413ace6 100644 --- a/exspy/models/eelsmodel.py +++ b/exspy/models/eelsmodel.py @@ -35,7 +35,6 @@ _logger = logging.getLogger(__name__) - def generate_uniform_axis(offset, scale, size, offset_index=0): """Creates a uniform axis vector given the offset, scale and number of channels. @@ -56,17 +55,21 @@ def generate_uniform_axis(offset, scale, size, offset_index=0): """ - return np.linspace(offset - offset_index * scale, - offset + scale * (size - 1 - offset_index), - size) - + return np.linspace( + offset - offset_index * scale, offset + scale * (size - 1 - offset_index), size + ) class EELSModel(Model1D): - - def __init__(self, signal1D, auto_background=True, - auto_add_edges=True, low_loss=None, - GOS="gosh", dictionary=None): + def __init__( + self, + signal1D, + auto_background=True, + auto_add_edges=True, + low_loss=None, + GOS="gosh", + dictionary=None, + ): """ Build an EELS model. @@ -95,11 +98,11 @@ def __init__(self, signal1D, auto_background=True, self._background_components = [] self._whitelist.update( { - '_convolved': None, - 'low_loss': ('sig', None), - } - ) - self._slicing_whitelist['low_loss'] = 'inav' + "_convolved": None, + "low_loss": ("sig", None), + } + ) + self._slicing_whitelist["low_loss"] = "inav" if dictionary is not None: auto_background = False auto_add_edges = False @@ -146,8 +149,8 @@ def _get_model_data(self, *args, **kwargs): if self.convolved is False: return super()._get_model_data(*args, **kwargs) else: # convolved - component_list = kwargs.get('component_list') - ignore_channel_switches = kwargs.get('ignore_channel_switches', False) + component_list = kwargs.get("component_list") + ignore_channel_switches = kwargs.get("ignore_channel_switches", False) slice_ = slice(None) if ignore_channel_switches else self._channel_switches if self.convolution_axis is None: raise RuntimeError("`low_loss` is not set.") @@ -160,7 +163,9 @@ def _get_model_data(self, *args, **kwargs): sum_ += component.function(self.axis.axis) to_return = sum_ + np.convolve( self.low_loss._get_current_data(self.axes_manager), - sum_convolved, mode="valid") + sum_convolved, + mode="valid", + ) to_return = to_return[slice_] return to_return @@ -169,32 +174,37 @@ def _jacobian(self, param, y, weights=None): return super()._jacobian(param, y, weights) if weights is None: - weights = 1. + weights = 1.0 counter = 0 grad = np.zeros(len(self.axis.axis)) for component in self: # Cut the parameters list if component.active: component.fetch_values_from_array( - param[ - counter:counter + - component._nfree_param], - onlyfree=True) + param[counter : counter + component._nfree_param], onlyfree=True + ) if component.convolved: for parameter in component.free_parameters: par_grad = np.convolve( parameter.grad(self.convolution_axis), self.low_loss._get_current_data(self.axes_manager), - mode="valid") + mode="valid", + ) if parameter._twins: for par in parameter._twins: - np.add(par_grad, np.convolve( - par.grad( - self.convolution_axis), - self.low_loss._get_current_data(self.axes_manager), - mode="valid"), par_grad) + np.add( + par_grad, + np.convolve( + par.grad(self.convolution_axis), + self.low_loss._get_current_data( + self.axes_manager + ), + mode="valid", + ), + par_grad, + ) grad = np.vstack((grad, par_grad)) @@ -212,7 +222,6 @@ def _jacobian(self, param, y, weights=None): to_return = grad[1:, self._channel_switches] * weights - if self.axis.is_binned: if self.axis.is_uniform: to_return *= self.axis.scale @@ -221,7 +230,6 @@ def _jacobian(self, param, y, weights=None): return to_return - @property def signal(self): return self._signal @@ -233,8 +241,8 @@ def signal(self, value): else: raise ValueError( "This attribute can only contain an EELSSpectrum " - "but an object of type %s was provided" % - str(type(value))) + "but an object of type %s was provided" % str(type(value)) + ) @property def convolved(self): @@ -248,7 +256,8 @@ def convolved(self, value): raise RuntimeError( "Cannot set `convolved` to True as the " "`low_loss` attribute" - "is not set.") + "is not set." + ) else: self._convolved = value self.update_plot() @@ -262,16 +271,18 @@ def low_loss(self): @low_loss.setter def low_loss(self, value): if value is not None: - if (value.axes_manager.navigation_shape != - self.signal.axes_manager.navigation_shape): + if ( + value.axes_manager.navigation_shape + != self.signal.axes_manager.navigation_shape + ): raise ValueError( "The signal does not have the same navigation dimension " "as the signal it will be convolved with." - ) + ) if not value.axes_manager.signal_axes[0].is_uniform: raise ValueError( "Convolution is not supported with non-uniform signal axes." - ) + ) self._low_loss = value self.set_convolution_axis() self.convolved = True @@ -291,8 +302,9 @@ def set_convolution_axis(self): dimension = self.axis.size + ll_axis.size - 1 step = self.axis.scale knot_position = ll_axis.size - ll_axis.value2index(0) - 1 - self.convolution_axis = generate_uniform_axis(self.axis.offset, step, - dimension, knot_position) + self.convolution_axis = generate_uniform_axis( + self.axis.offset, step, dimension, knot_position + ) def append(self, component): """Append component to EELS model. @@ -311,14 +323,17 @@ def append(self, component): if isinstance(component, EELSCLEdge): # Test that signal axis is uniform if not self.axes_manager[-1].is_uniform: - raise NotImplementedError("This operation is not yet implemented " - "for non-uniform energy axes") + raise NotImplementedError( + "This operation is not yet implemented " + "for non-uniform energy axes" + ) tem = self.signal.metadata.Acquisition_instrument.TEM component.set_microscope_parameters( E0=tem.beam_energy, alpha=tem.convergence_angle, beta=tem.Detector.EELS.collection_angle, - energy_scale=self.axis.scale) + energy_scale=self.axis.scale, + ) component.energy_scale = self.axis.scale component._set_fine_structure_coeff() self._classify_components() @@ -347,16 +362,17 @@ def _classify_components(self): self._background_components = [] for component in self: if isinstance(component, EELSCLEdge): - if component.onset_energy.value < \ - self.axis.axis[self._channel_switches][0]: + if ( + component.onset_energy.value + < self.axis.axis[self._channel_switches][0] + ): component.isbackground = True if component.isbackground is not True: self.edges.append(component) else: component.fine_structure_active = False component.fine_structure_coeff.free = False - elif (isinstance(component, PowerLaw) or - component.isbackground is True): + elif isinstance(component, PowerLaw) or component.isbackground is True: self._background_components.append(component) if self.edges: @@ -365,11 +381,9 @@ def _classify_components(self): if len(self._background_components) > 1: self._backgroundtype = "mix" elif len(self._background_components) == 1: - self._backgroundtype = \ - self._background_components[0].__repr__() + self._backgroundtype = self._background_components[0].__repr__() bg = self._background_components[0] - if isinstance(bg, PowerLaw) and self.edges and not \ - bg.A.map["is_set"].any(): + if isinstance(bg, PowerLaw) and self.edges and not bg.A.map["is_set"].any(): self.two_area_background_estimation() @property @@ -403,11 +417,11 @@ def _add_edges_from_subshells_names(self, e_shells=None): self.append(master_edge) element = master_edge.element while len(e_shells) > 0: - next_element = e_shells[-1].split('_')[0] + next_element = e_shells[-1].split("_")[0] if next_element != element: # New master edge self._add_edges_from_subshells_names(e_shells=e_shells) - elif self.GOS == 'hydrogenic': + elif self.GOS == "hydrogenic": # The hydrogenic GOS includes all the L subshells in one # so we get rid of the others e_shells.pop() @@ -420,14 +434,12 @@ def _add_edges_from_subshells_names(self, e_shells=None): edge.intensity.twin = master_edge.intensity edge.onset_energy.twin = master_edge.onset_energy edge.onset_energy.twin_function_expr = "x + {}".format( - (edge.GOS.onset_energy - master_edge.GOS.onset_energy)) + (edge.GOS.onset_energy - master_edge.GOS.onset_energy) + ) edge.free_onset_energy = False self.append(edge) - def resolve_fine_structure( - self, - preedge_safe_window_width=2, - i1=0): + def resolve_fine_structure(self, preedge_safe_window_width=2, i1=0): """Adjust the fine structure of all edges to avoid overlapping This function is called automatically every time the position of an edge @@ -447,46 +459,62 @@ def resolve_fine_structure( if not self._active_edges: return - while (self._active_edges[i1].fine_structure_active is False and - i1 < len(self._active_edges) - 1): + while ( + self._active_edges[i1].fine_structure_active is False + and i1 < len(self._active_edges) - 1 + ): i1 += 1 if i1 < len(self._active_edges) - 1: i2 = i1 + 1 - while (self._active_edges[i2].fine_structure_active is False and - i2 < len(self._active_edges) - 1): + while ( + self._active_edges[i2].fine_structure_active is False + and i2 < len(self._active_edges) - 1 + ): i2 += 1 if self._active_edges[i2].fine_structure_active is True: distance_between_edges = ( - self._active_edges[i2].onset_energy.value - - self._active_edges[i1].onset_energy.value) - if (self._active_edges[i1].fine_structure_width > - distance_between_edges - - self._preedge_safe_window_width): + self._active_edges[i2].onset_energy.value + - self._active_edges[i1].onset_energy.value + ) + if ( + self._active_edges[i1].fine_structure_width + > distance_between_edges - self._preedge_safe_window_width + ): min_d = self._min_distance_between_edges_for_fine_structure - if (distance_between_edges - - self._preedge_safe_window_width) <= min_d: - _logger.info(( - "Automatically deactivating the fine structure " - "of edge number %d to avoid conflicts with edge " - "number %d") % (i2 + 1, i1 + 1)) + if ( + distance_between_edges - self._preedge_safe_window_width + ) <= min_d: + _logger.info( + ( + "Automatically deactivating the fine structure " + "of edge number %d to avoid conflicts with edge " + "number %d" + ) + % (i2 + 1, i1 + 1) + ) self._active_edges[i2].fine_structure_active = False - self._active_edges[ - i2].fine_structure_coeff.free = False + self._active_edges[i2].fine_structure_coeff.free = False self.resolve_fine_structure(i1=i2) else: new_fine_structure_width = ( - distance_between_edges - - self._preedge_safe_window_width) - _logger.info(( - "Automatically changing the fine structure " - "width of edge %d from %s eV to %s eV to avoid " - "conflicts with edge number %d") % ( - i1 + 1, - self._active_edges[i1].fine_structure_width, - new_fine_structure_width, - i2 + 1)) - self._active_edges[i1].fine_structure_width = \ - new_fine_structure_width + distance_between_edges - self._preedge_safe_window_width + ) + _logger.info( + ( + "Automatically changing the fine structure " + "width of edge %d from %s eV to %s eV to avoid " + "conflicts with edge number %d" + ) + % ( + i1 + 1, + self._active_edges[i1].fine_structure_width, + new_fine_structure_width, + i2 + 1, + ) + ) + self._active_edges[ + i1 + ].fine_structure_width = new_fine_structure_width self.resolve_fine_structure(i1=i2) else: self.resolve_fine_structure(i1=i2) @@ -518,9 +546,7 @@ def fit(self, kind="std", **kwargs): """ if kind not in ["smart", "std"]: - raise ValueError( - f"kind must be either 'std' or 'smart', not '{kind}'" - ) + raise ValueError(f"kind must be either 'std' or 'smart', not '{kind}'") elif kind == "smart": return self.smart_fit(**kwargs) elif kind == "std": @@ -579,8 +605,11 @@ def _get_first_ionization_edge_energy(self, start_energy=None): if not self._active_edges: return None start_energy = self._get_start_energy(start_energy) - iee_list = [edge.onset_energy.value for edge in self._active_edges - if edge.onset_energy.value > start_energy] + iee_list = [ + edge.onset_energy.value + for edge in self._active_edges + if edge.onset_energy.value > start_energy + ] iee = min(iee_list) if iee_list else None return iee @@ -613,8 +642,9 @@ def fit_background(self, start_energy=None, only_current=True, **kwargs): return iee = self._get_first_ionization_edge_energy(start_energy=start_energy) if iee is not None: - to_disable = [edge for edge in self._active_edges - if edge.onset_energy.value >= iee] + to_disable = [ + edge for edge in self._active_edges if edge.onset_energy.value >= iee + ] E2 = iee - self._preedge_safe_window_width self.disable_edges(to_disable) else: @@ -648,10 +678,11 @@ def two_area_background_estimation(self, E1=None, E2=None, powerlaw=None): powerlaw = component else: _logger.warning( - 'There are more than two power law ' - 'background components defined in this model, ' - 'please use the powerlaw keyword to specify one' - ' of them') + "There are more than two power law " + "background components defined in this model, " + "please use the powerlaw keyword to specify one" + " of them" + ) return else: # No power law component return @@ -663,15 +694,14 @@ def two_area_background_estimation(self, E1=None, E2=None, powerlaw=None): if E2 is None: E2 = ea[-1] else: - E2 = E2 - \ - self._preedge_safe_window_width + E2 = E2 - self._preedge_safe_window_width - if not powerlaw.estimate_parameters( - self.signal, E1, E2, only_current=False): + if not powerlaw.estimate_parameters(self.signal, E1, E2, only_current=False): _logger.warning( "The power law background parameters could not " "be estimated.\n" - "Try choosing a different energy range for the estimation") + "Try choosing a different energy range for the estimation" + ) return def _fit_edge(self, edgenumber, start_energy=None, **kwargs): @@ -682,19 +712,22 @@ def _fit_edge(self, edgenumber, start_energy=None, **kwargs): # Declare variables active_edges = self._active_edges edge = active_edges[edgenumber] - if (edge.intensity.twin is not None or - edge.active is False or - edge.onset_energy.value < start_energy or - edge.onset_energy.value > ea[-1]): + if ( + edge.intensity.twin is not None + or edge.active is False + or edge.onset_energy.value < start_energy + or edge.onset_energy.value > ea[-1] + ): return 1 # Fitting edge 'edge.name' - last_index = len(self._active_edges) - 1 # Last edge index + last_index = len(self._active_edges) - 1 # Last edge index i = 1 twins = [] # find twins while edgenumber + i <= last_index and ( - active_edges[edgenumber + i].intensity.twin is not None or - active_edges[edgenumber + i].active is False): + active_edges[edgenumber + i].intensity.twin is not None + or active_edges[edgenumber + i].active is False + ): if active_edges[edgenumber + i].intensity.twin is not None: twins.append(self._active_edges[edgenumber + i]) i += 1 @@ -702,15 +735,20 @@ def _fit_edge(self, edgenumber, start_energy=None, **kwargs): nextedgeenergy = ea[-1] else: nextedgeenergy = ( - active_edges[edgenumber + i].onset_energy.value - - self._preedge_safe_window_width) + active_edges[edgenumber + i].onset_energy.value + - self._preedge_safe_window_width + ) # Backup the fsstate to_activate_fs = [] - for edge_ in [edge, ] + twins: - if (edge_.fine_structure_active is True and - edge_.fine_structure_coeff.free is True or - edge_.fine_structure_components): + for edge_ in [ + edge, + ] + twins: + if ( + edge_.fine_structure_active is True + and edge_.fine_structure_coeff.free is True + or edge_.fine_structure_components + ): to_activate_fs.append(edge_) self.disable_fine_structure(to_activate_fs) @@ -718,9 +756,8 @@ def _fit_edge(self, edgenumber, start_energy=None, **kwargs): # Without fine structure to determine onset_energy edges_to_activate = [] - for edge_ in self._active_edges[edgenumber + 1:]: - if (edge_.active is True and - edge_.onset_energy.value >= nextedgeenergy): + for edge_ in self._active_edges[edgenumber + 1 :]: + if edge_.active is True and edge_.onset_energy.value >= nextedgeenergy: edge_.active = False edges_to_activate.append(edge_) @@ -765,12 +802,12 @@ def quantify(self): for element in elements: if len(elements[element]) == 1: for subshell in elements[element]: - print("%s\t%f" % ( - element, elements[element][subshell])) + print("%s\t%f" % (element, elements[element][subshell])) else: for subshell in elements[element]: - print("%s_%s\t%f" % (element, subshell, - elements[element][subshell])) + print( + "%s_%s\t%f" % (element, subshell, elements[element][subshell]) + ) def remove_fine_structure_data(self, edges_list=None): """Remove the fine structure data from the fitting routine as @@ -798,8 +835,7 @@ def remove_fine_structure_data(self, edges_list=None): else: edges_list = [self._get_component(x) for x in edges_list] for edge in edges_list: - if (edge.isbackground is False and - edge.fine_structure_active is True): + if edge.isbackground is False and edge.fine_structure_active is True: start = edge.onset_energy.value stop = start + edge.fine_structure_width self.remove_signal_range(start, stop) @@ -866,16 +902,12 @@ def disable_edges(self, edges_list=None): self.resolve_fine_structure() def enable_background(self): - """Enable the background components. - - """ + """Enable the background components.""" for component in self._background_components: component.active = True def disable_background(self): - """Disable the background components. - - """ + """Disable the background components.""" for component in self._active_background_components: component.active = False diff --git a/exspy/signal_tools.py b/exspy/signal_tools.py index 8cabb5827..44bbca1ad 100644 --- a/exspy/signal_tools.py +++ b/exspy/signal_tools.py @@ -12,13 +12,12 @@ class EdgesRange(SpanSelectorInSignal1D): units = t.Unicode() edges_list = t.Tuple() only_major = t.Bool() - order = t.Unicode('closest') + order = t.Unicode("closest") complementary = t.Bool(True) def __init__(self, signal, interactive=True): if signal.axes_manager.signal_dimension != 1: - raise SignalDimensionError( - signal.axes_manager.signal_dimension, 1) + raise SignalDimensionError(signal.axes_manager.signal_dimension, 1) if interactive: super().__init__(signal) @@ -38,27 +37,32 @@ def __init__(self, signal, interactive=True): self._get_edges_info_within_energy_axis() self.signal.axes_manager.events.indices_changed.connect( - self._on_navigation_indices_changed, []) + self._on_navigation_indices_changed, [] + ) self.signal._plot.signal_plot.events.closed.connect( lambda: self.signal.axes_manager.events.indices_changed.disconnect( - self._on_navigation_indices_changed), []) + self._on_navigation_indices_changed + ), + [], + ) def _get_edges_info_within_energy_axis(self): mid_energy = (self.axis.low_value + self.axis.high_value) / 2 rng = self.axis.high_value - self.axis.low_value - self.edge_all = np.asarray(get_edges_near_energy(mid_energy, rng, - order=self.order)) + self.edge_all = np.asarray( + get_edges_near_energy(mid_energy, rng, order=self.order) + ) info = get_info_from_edges(self.edge_all) energy_all = [] relevance_all = [] description_all = [] for d in info: - onset = d['onset_energy (eV)'] - relevance = d['relevance'] - threshold = d['threshold'] - edge_ = d['edge'] - description = threshold + '. '*(threshold !='' and edge_ !='') + edge_ + onset = d["onset_energy (eV)"] + relevance = d["relevance"] + threshold = d["threshold"] + edge_ = d["edge"] + description = threshold + ". " * (threshold != "" and edge_ != "") + edge_ energy_all.append(onset) relevance_all.append(relevance) @@ -73,10 +77,11 @@ def _on_navigation_indices_changed(self): def update_table(self): if self.span_selector is not None: - energy_mask = (self.ss_left_value <= self.energy_all) & \ - (self.energy_all <= self.ss_right_value) + energy_mask = (self.ss_left_value <= self.energy_all) & ( + self.energy_all <= self.ss_right_value + ) if self.only_major: - relevance_mask = self.relevance_all == 'Major' + relevance_mask = self.relevance_all == "Major" else: relevance_mask = np.ones(len(self.edge_all), bool) @@ -96,21 +101,21 @@ def update_table(self): def _keep_valid_edges(self): edge_all = list(self.signal._edge_markers["names"]) for edge in edge_all: - if (edge not in self.edges_list): + if edge not in self.edges_list: if edge in self.active_edges: self.active_edges.remove(edge) elif edge in self.active_complementary_edges: self.active_complementary_edges.remove(edge) self.signal._remove_edge_labels([edge], render_figure=False) - elif (edge not in self.active_edges): + elif edge not in self.active_edges: self.active_edges.append(edge) self._on_complementary() self._update_labels() def update_active_edge(self, change): - state = change['new'] - edge = change['owner'].description + state = change["new"] + edge = change["owner"].description if state: self.active_edges.append(edge) @@ -125,9 +130,9 @@ def update_active_edge(self, change): def _on_complementary(self): if self.complementary: - self.active_complementary_edges = \ - self.signal._get_complementary_edges(self.active_edges, - self.only_major) + self.active_complementary_edges = self.signal._get_complementary_edges( + self.active_edges, self.only_major + ) else: self.active_complementary_edges = [] @@ -143,8 +148,9 @@ def check_btn_state(self): btn.value = True if btn.value is True and self.complementary: - comp = self.signal._get_complementary_edges(self.active_edges, - self.only_major) + comp = self.signal._get_complementary_edges( + self.active_edges, self.only_major + ) for cedge in comp: if cedge in edges: pos = edges.index(cedge) @@ -171,7 +177,7 @@ def _update_labels(self, active=None, complementary=None): self.signal._add_edge_labels(edge_add, render_figure=False) if edge_remove or edge_add: # Render figure only once - self.signal._render_figure(plot=['signal_plot']) + self.signal._render_figure(plot=["signal_plot"]) def _clear_markers(self): # Used in hyperspy_gui_ipywidgets diff --git a/exspy/signals/__init__.py b/exspy/signals/__init__.py index b9446ea1d..cf60e926f 100644 --- a/exspy/signals/__init__.py +++ b/exspy/signals/__init__.py @@ -1,4 +1,3 @@ - """ Modules containing the exSpy signals and their lazy counterparts. @@ -35,7 +34,7 @@ "LazyEELSSpectrum", "EDSSEMSpectrum", "LazyEDSSEMSpectrum", - ] +] def __dir__(): diff --git a/exspy/signals/dielectric_function.py b/exspy/signals/dielectric_function.py index 2bbf40c01..b6bdf2699 100644 --- a/exspy/signals/dielectric_function.py +++ b/exspy/signals/dielectric_function.py @@ -23,7 +23,7 @@ from hyperspy._signals.complex_signal1d import ( ComplexSignal1D, LazyComplexSignal1D, - ) +) from hyperspy.docstrings.signal import LAZYSIGNAL_DOC from exspy.misc.eels.tools import eels_constant @@ -71,49 +71,61 @@ def get_number_of_effective_electrons(self, nat, cumulative=False): Notes ----- - .. [*] Ray Egerton, "Electron Energy-Loss Spectroscopy + .. [*] Ray Egerton, "Electron Energy-Loss Spectroscopy in the Electron Microscope", Springer-Verlag, 2011. """ m0 = constants.value("electron mass") - epsilon0 = constants.epsilon_0 # Vacuum permittivity [F/m] - hbar = constants.hbar # Reduced Plank constant [J·s] - k = 2 * epsilon0 * m0 / (np.pi * nat * hbar ** 2) + epsilon0 = constants.epsilon_0 # Vacuum permittivity [F/m] + hbar = constants.hbar # Reduced Plank constant [J·s] + k = 2 * epsilon0 * m0 / (np.pi * nat * hbar**2) axis = self.axes_manager.signal_axes[0] if cumulative is False: - dneff1 = k * simps((-1. / self.data).imag * axis.axis, - x=axis.axis, - axis=axis.index_in_array) - dneff2 = k * simps(self.data.imag * axis.axis, - x=axis.axis, - axis=axis.index_in_array) + dneff1 = k * simps( + (-1.0 / self.data).imag * axis.axis, + x=axis.axis, + axis=axis.index_in_array, + ) + dneff2 = k * simps( + self.data.imag * axis.axis, x=axis.axis, axis=axis.index_in_array + ) neff1 = self._get_navigation_signal(data=dneff1) neff2 = self._get_navigation_signal(data=dneff2) else: neff1 = self._deepcopy_with_new_data( - k * cumtrapz((-1. / self.data).imag * axis.axis, - x=axis.axis, - axis=axis.index_in_array, - initial=0)) + k + * cumtrapz( + (-1.0 / self.data).imag * axis.axis, + x=axis.axis, + axis=axis.index_in_array, + initial=0, + ) + ) neff2 = self._deepcopy_with_new_data( - k * cumtrapz(self.data.imag * axis.axis, - x=axis.axis, - axis=axis.index_in_array, - initial=0)) + k + * cumtrapz( + self.data.imag * axis.axis, + x=axis.axis, + axis=axis.index_in_array, + initial=0, + ) + ) # Prepare return neff1.metadata.General.title = ( r"$n_{\mathrm{eff}}\left(-\Im\left(\epsilon^{-1}\right)\right)$ " - "calculated from " + - self.metadata.General.title + - " using the Bethe f-sum rule.") + "calculated from " + + self.metadata.General.title + + " using the Bethe f-sum rule." + ) neff2.metadata.General.title = ( r"$n_{\mathrm{eff}}\left(\epsilon_{2}\right)$ " - "calculated from " + - self.metadata.General.title + - " using the Bethe f-sum rule.") + "calculated from " + + self.metadata.General.title + + " using the Bethe f-sum rule." + ) return neff1, neff2 @@ -145,14 +157,17 @@ def get_electron_energy_loss_spectrum(self, zlp, t): for axis in self.axes_manager.signal_axes: if not axis.is_uniform: raise NotImplementedError( - "The function is not implemented for non-uniform axes.") - data = ((-1 / self.data).imag * eels_constant(self, zlp, t).data * - self.axes_manager.signal_axes[0].scale) + "The function is not implemented for non-uniform axes." + ) + data = ( + (-1 / self.data).imag + * eels_constant(self, zlp, t).data + * self.axes_manager.signal_axes[0].scale + ) s = self._deepcopy_with_new_data(data) s.data = s.data.real s.set_signal_type("EELS") - s.metadata.General.title = ("EELS calculated from " + - self.metadata.General.title) + s.metadata.General.title = "EELS calculated from " + self.metadata.General.title return s diff --git a/exspy/signals/eds.py b/exspy/signals/eds.py index b60bbd943..da088ef6f 100644 --- a/exspy/signals/eds.py +++ b/exspy/signals/eds.py @@ -31,10 +31,7 @@ from exspy.misc.eds import utils as utils_eds from hyperspy.misc.utils import isiterable from hyperspy.utils.markers import Texts, VerticalLines, Lines -from hyperspy.docstrings.plot import ( - BASE_PLOT_DOCSTRING_PARAMETERS, - PLOT1D_DOCSTRING -) +from hyperspy.docstrings.plot import BASE_PLOT_DOCSTRING_PARAMETERS, PLOT1D_DOCSTRING from hyperspy.docstrings.signal import LAZYSIGNAL_DOC _logger = logging.getLogger(__name__) @@ -47,10 +44,12 @@ class EDSSpectrum(Signal1D): def __init__(self, *args, **kwards): super().__init__(*args, **kwards) - if self.metadata.Signal.signal_type == 'EDS': - warnings.warn('The microscope type is not set. Use ' - 'set_signal_type(\'EDS_TEM\') ' - 'or set_signal_type(\'EDS_SEM\')') + if self.metadata.Signal.signal_type == "EDS": + warnings.warn( + "The microscope type is not set. Use " + "set_signal_type('EDS_TEM') " + "or set_signal_type('EDS_SEM')" + ) self.axes_manager.signal_axes[0].is_binned = True self._xray_markers = {} @@ -78,35 +77,39 @@ def _get_line_energy(self, Xray_line, FWHM_MnKa=None): units_name = self.axes_manager.signal_axes[0].units - if FWHM_MnKa == 'auto': + if FWHM_MnKa == "auto": if self.metadata.Signal.signal_type == "EDS_SEM": - FWHM_MnKa = self.metadata.Acquisition_instrument.SEM. \ - Detector.EDS.energy_resolution_MnKa + FWHM_MnKa = ( + self.metadata.Acquisition_instrument.SEM.Detector.EDS.energy_resolution_MnKa + ) elif self.metadata.Signal.signal_type == "EDS_TEM": - FWHM_MnKa = self.metadata.Acquisition_instrument.TEM. \ - Detector.EDS.energy_resolution_MnKa + FWHM_MnKa = ( + self.metadata.Acquisition_instrument.TEM.Detector.EDS.energy_resolution_MnKa + ) else: raise NotImplementedError( "This method only works for EDS_TEM or EDS_SEM signals. " "You can use `set_signal_type('EDS_TEM')` or" "`set_signal_type('EDS_SEM')` to convert to one of these" - "signal types.") + "signal types." + ) line_energy = utils_eds._get_energy_xray_line(Xray_line) - if units_name == 'eV': + if units_name == "eV": line_energy *= 1000 if FWHM_MnKa is not None: - line_FWHM = utils_eds.get_FWHM_at_Energy( - FWHM_MnKa, line_energy / 1000) * 1000 - elif units_name == 'keV': + line_FWHM = ( + utils_eds.get_FWHM_at_Energy(FWHM_MnKa, line_energy / 1000) * 1000 + ) + elif units_name == "keV": if FWHM_MnKa is not None: - line_FWHM = utils_eds.get_FWHM_at_Energy(FWHM_MnKa, - line_energy) + line_FWHM = utils_eds.get_FWHM_at_Energy(FWHM_MnKa, line_energy) else: raise ValueError( f"{units_name} is not a valid units for the energy axis. " "Only `eV` and `keV` are supported. " "If `s` is the variable containing this EDS spectrum:\n " - ">>> s.axes_manager.signal_axes[0].units = 'keV' \n") + ">>> s.axes_manager.signal_axes[0].units = 'keV' \n" + ) if FWHM_MnKa is None: return line_energy else: @@ -126,11 +129,12 @@ def _get_beam_energy(self): else: raise AttributeError( "The beam energy is not defined in `metadata`. " - "Use `set_microscope_parameters` to set it.") + "Use `set_microscope_parameters` to set it." + ) units_name = self.axes_manager.signal_axes[0].units - if units_name == 'eV': + if units_name == "eV": beam_energy *= 1000 return beam_energy @@ -173,11 +177,10 @@ def sum(self, axis=None, out=None, rechunk=False): s = out or s # Update live time by the change in navigation axes dimensions - time_factor = ( - np.prod([ax.size for ax in self.axes_manager.navigation_axes]) - / np.prod([ax.size for ax in s.axes_manager.navigation_axes]) - ) - aimd = s.metadata.get_item('Acquisition_instrument', None) + time_factor = np.prod( + [ax.size for ax in self.axes_manager.navigation_axes] + ) / np.prod([ax.size for ax in s.axes_manager.navigation_axes]) + aimd = s.metadata.get_item("Acquisition_instrument", None) if aimd is not None: aimd = s.metadata.Acquisition_instrument if "SEM.Detector.EDS.live_time" in aimd: @@ -185,24 +188,28 @@ def sum(self, axis=None, out=None, rechunk=False): elif "TEM.Detector.EDS.live_time" in aimd: aimd.TEM.Detector.EDS.live_time *= time_factor else: - _logger.info("Live_time could not be found in the metadata and " - "has not been updated.") + _logger.info( + "Live_time could not be found in the metadata and " + "has not been updated." + ) if out is None: return s sum.__doc__ = Signal1D.sum.__doc__ - def rebin(self, new_shape=None, scale=None, crop=True, dtype=None, - out=None): + def rebin(self, new_shape=None, scale=None, crop=True, dtype=None, out=None): factors = self._validate_rebin_args_and_get_factors( new_shape=new_shape, - scale=scale, ) - m = super().rebin(new_shape=new_shape, scale=scale, crop=crop, - dtype=dtype, out=out) + scale=scale, + ) + m = super().rebin( + new_shape=new_shape, scale=scale, crop=crop, dtype=dtype, out=out + ) m = out or m - time_factor = np.prod([factors[axis.index_in_array] - for axis in m.axes_manager.navigation_axes]) + time_factor = np.prod( + [factors[axis.index_in_array] for axis in m.axes_manager.navigation_axes] + ) aimd = m.metadata.Acquisition_instrument if "Acquisition_instrument.SEM.Detector.EDS.real_time" in m.metadata: aimd.SEM.Detector.EDS.real_time *= time_factor @@ -210,14 +217,16 @@ def rebin(self, new_shape=None, scale=None, crop=True, dtype=None, aimd.TEM.Detector.EDS.real_time *= time_factor else: _logger.info( - "real_time could not be found in the metadata and has not been updated.") + "real_time could not be found in the metadata and has not been updated." + ) if "Acquisition_instrument.SEM.Detector.EDS.live_time" in m.metadata: aimd.SEM.Detector.EDS.live_time *= time_factor elif "Acquisition_instrument.TEM.Detector.EDS.live_time" in m.metadata: aimd.TEM.Detector.EDS.live_time *= time_factor else: _logger.info( - "Live_time could not be found in the metadata and has not been updated.") + "Live_time could not be found in the metadata and has not been updated." + ) if out is None: return m @@ -283,7 +292,8 @@ def add_elements(self, elements): "Input must be in the form of a list. For example, " "if `s` is the variable containing this EDS spectrum:\n " ">>> s.add_elements(('C',))\n" - "See the docstring for more information.") + "See the docstring for more information." + ) if "Sample.elements" in self.metadata: elements_ = set(self.metadata.Sample.elements) else: @@ -292,29 +302,24 @@ def add_elements(self, elements): if element in elements_db: elements_.add(element) else: - raise ValueError( - f"{element} is not a valid chemical element symbol.") - self.metadata.set_item('Sample.elements', sorted(list(elements_))) + raise ValueError(f"{element} is not a valid chemical element symbol.") + self.metadata.set_item("Sample.elements", sorted(list(elements_))) - def _get_xray_lines(self, xray_lines=None, only_one=None, - only_lines=('a',)): + def _get_xray_lines(self, xray_lines=None, only_one=None, only_lines=("a",)): if xray_lines is None: - if 'Sample.xray_lines' in self.metadata: + if "Sample.xray_lines" in self.metadata: xray_lines = self.metadata.Sample.xray_lines - elif 'Sample.elements' in self.metadata: + elif "Sample.elements" in self.metadata: xray_lines = self._get_lines_from_elements( self.metadata.Sample.elements, only_one=only_one, - only_lines=only_lines) + only_lines=only_lines, + ) else: - raise ValueError( - "Not X-ray line, set them with `add_elements`.") + raise ValueError("Not X-ray line, set them with `add_elements`.") return xray_lines - def set_lines(self, - lines, - only_one=True, - only_lines=('a',)): + def set_lines(self, lines, only_one=True, only_lines=("a",)): """Erase all Xrays lines and set them. See add_lines for details. @@ -353,14 +358,9 @@ def set_lines(self, only_lines = utils_eds._parse_only_lines(only_lines) if "Sample.xray_lines" in self.metadata: del self.metadata.Sample.xray_lines - self.add_lines(lines=lines, - only_one=only_one, - only_lines=only_lines) - - def add_lines(self, - lines=(), - only_one=True, - only_lines=("a",)): + self.add_lines(lines=lines, only_one=only_one, only_lines=only_lines) + + def add_lines(self, lines=(), only_one=True, only_lines=("a",)): """Add X-rays lines to the internal list. Although most functions do not require an internal list of @@ -430,11 +430,11 @@ def add_lines(self, except ValueError: raise ValueError( "Invalid line symbol. " - "Please provide a valid line symbol e.g. Fe_Ka") + "Please provide a valid line symbol e.g. Fe_Ka" + ) if element in elements_db: elements.add(element) - if subshell in elements_db[element]['Atomic_properties' - ]['Xray_lines']: + if subshell in elements_db[element]["Atomic_properties"]["Xray_lines"]: lines_len = len(xray_lines) xray_lines.add(line) if lines_len != len(xray_lines): @@ -442,37 +442,28 @@ def add_lines(self, else: _logger.info(f"{line} line already in.") else: - raise ValueError( - f"{line} is not a valid line of {element}.") + raise ValueError(f"{line} is not a valid line of {element}.") else: - raise ValueError( - f"{element} is not a valid symbol of an element.") + raise ValueError(f"{element} is not a valid symbol of an element.") xray_not_here = self._get_xray_lines_in_spectral_range(xray_lines)[1] for xray in xray_not_here: - warnings.warn(f"{xray} is not in the data energy range.", - UserWarning) + warnings.warn(f"{xray} is not in the data energy range.", UserWarning) if "Sample.elements" in self.metadata: - extra_elements = (set(self.metadata.Sample.elements) - - elements) + extra_elements = set(self.metadata.Sample.elements) - elements if extra_elements: new_lines = self._get_lines_from_elements( - extra_elements, - only_one=only_one, - only_lines=only_lines) + extra_elements, only_one=only_one, only_lines=only_lines + ) if new_lines: self.add_lines(list(new_lines) + list(lines)) self.add_elements(elements) - if not hasattr(self.metadata, 'Sample'): - self.metadata.add_node('Sample') + if not hasattr(self.metadata, "Sample"): + self.metadata.add_node("Sample") if "Sample.xray_lines" in self.metadata: - xray_lines = xray_lines.union( - self.metadata.Sample.xray_lines) + xray_lines = xray_lines.union(self.metadata.Sample.xray_lines) self.metadata.Sample.xray_lines = sorted(list(xray_lines)) - def _get_lines_from_elements(self, - elements, - only_one=False, - only_lines=("a",)): + def _get_lines_from_elements(self, elements, only_one=False, only_lines=("a",)): """Returns the X-ray lines of the given elements in spectral range of the data. @@ -500,31 +491,34 @@ def _get_lines_from_elements(self, # Fall back to the high_value of the energy axis beam_energy = self.axes_manager.signal_axes[0].high_value lines = [] - elements = [el if isinstance(el, str) else el.decode() - for el in elements] + elements = [el if isinstance(el, str) else el.decode() for el in elements] for element in elements: # Possible line (existing and excited by electron) element_lines = [] - for subshell in list(elements_db[element]['Atomic_properties' - ]['Xray_lines'].keys()): + for subshell in list( + elements_db[element]["Atomic_properties"]["Xray_lines"].keys() + ): if only_lines and subshell not in only_lines: continue element_lines.append(element + "_" + subshell) - element_lines = self._get_xray_lines_in_spectral_range( - element_lines)[0] + element_lines = self._get_xray_lines_in_spectral_range(element_lines)[0] if only_one and element_lines: # Choose the best line select_this = -1 element_lines.sort() for i, line in enumerate(element_lines): - if (self._get_line_energy(line) < beam_energy / 2): + if self._get_line_energy(line) < beam_energy / 2: select_this = i break - element_lines = [element_lines[select_this], ] + element_lines = [ + element_lines[select_this], + ] if not element_lines: - _logger.info(f"There is no X-ray line for element {element} " - "in the data spectral range") + _logger.info( + f"There is no X-ray line for element {element} " + "in the data spectral range" + ) else: lines.extend(element_lines) lines.sort() @@ -532,24 +526,28 @@ def _get_lines_from_elements(self, def _parse_xray_lines(self, xray_lines, only_one, only_lines): only_lines = utils_eds._parse_only_lines(only_lines) - xray_lines = self._get_xray_lines(xray_lines, only_one=only_one, - only_lines=only_lines) - xray_lines, xray_not_here = self._get_xray_lines_in_spectral_range( - xray_lines) + xray_lines = self._get_xray_lines( + xray_lines, only_one=only_one, only_lines=only_lines + ) + xray_lines, xray_not_here = self._get_xray_lines_in_spectral_range(xray_lines) for xray in xray_not_here: - warnings.warn(f"{xray} is not in the data energy range. " - "You can remove it with: " - f"`s.metadata.Sample.xray_lines.remove('{xray}')`") + warnings.warn( + f"{xray} is not in the data energy range. " + "You can remove it with: " + f"`s.metadata.Sample.xray_lines.remove('{xray}')`" + ) return xray_lines - def get_lines_intensity(self, - xray_lines=None, - integration_windows=2., - background_windows=None, - plot_result=False, - only_one=True, - only_lines=("a",), - **kwargs): + def get_lines_intensity( + self, + xray_lines=None, + integration_windows=2.0, + background_windows=None, + plot_result=False, + only_one=True, + only_lines=("a",), + **kwargs, + ): """Return the intensity map of selected Xray lines. The intensities, the number of X-ray counts, are computed by @@ -630,27 +628,30 @@ def get_lines_intensity(self, plot """ - if xray_lines is not None and \ - (not isinstance(xray_lines, Iterable) or \ - isinstance(xray_lines, (str, dict))): + if xray_lines is not None and ( + not isinstance(xray_lines, Iterable) or isinstance(xray_lines, (str, dict)) + ): raise TypeError( "xray_lines must be a compatible iterable, but was " - f"mistakenly provided as a {type(xray_lines)}.") + f"mistakenly provided as a {type(xray_lines)}." + ) xray_lines = self._parse_xray_lines(xray_lines, only_one, only_lines) - if hasattr(integration_windows, '__iter__') is False: + if hasattr(integration_windows, "__iter__") is False: integration_windows = self.estimate_integration_windows( - windows_width=integration_windows, xray_lines=xray_lines) + windows_width=integration_windows, xray_lines=xray_lines + ) intensities = [] ax = self.axes_manager.signal_axes[0] # test Signal1D (0D problem) # signal_to_index = self.axes_manager.navigation_dimension - 2 - for i, (Xray_line, window) in enumerate( - zip(xray_lines, integration_windows)): + for i, (Xray_line, window) in enumerate(zip(xray_lines, integration_windows)): element, line = utils_eds._get_element_and_line(Xray_line) line_energy = self._get_line_energy(Xray_line) # Replace with `map` function for lazy large datasets - img = self.isig[window[0]:window[1]].integrate1D(-1) # integrate over window. + img = self.isig[window[0] : window[1]].integrate1D( + -1 + ) # integrate over window. if np.issubdtype(img.data.dtype, np.integer): # The operations below require a float dtype with the default # numpy casting rule ('same_kind') @@ -658,29 +659,32 @@ def get_lines_intensity(self, if background_windows is not None: bw = background_windows[i] # TODO: test to prevent slicing bug. To be reomved when fixed - indexes = [float(ax.value2index(de)) - for de in list(bw) + window] + indexes = [float(ax.value2index(de)) for de in list(bw) + window] if indexes[0] == indexes[1]: bck1 = self.isig[bw[0]] else: - bck1 = self.isig[bw[0]:bw[1]].integrate1D(-1) + bck1 = self.isig[bw[0] : bw[1]].integrate1D(-1) if indexes[2] == indexes[3]: bck2 = self.isig[bw[2]] else: - bck2 = self.isig[bw[2]:bw[3]].integrate1D(-1) + bck2 = self.isig[bw[2] : bw[3]].integrate1D(-1) corr_factor = (indexes[5] - indexes[4]) / ( - (indexes[1] - indexes[0]) + (indexes[3] - indexes[2])) + (indexes[1] - indexes[0]) + (indexes[3] - indexes[2]) + ) img = img - (bck1 + bck2) * corr_factor img.metadata.General.title = ( - f'X-ray line intensity of {self.metadata.General.title}: ' - f'{Xray_line} at {line_energy:.2f} ' - f'{self.axes_manager.signal_axes[0].units}') + f"X-ray line intensity of {self.metadata.General.title}: " + f"{Xray_line} at {line_energy:.2f} " + f"{self.axes_manager.signal_axes[0].units}" + ) img = img.transpose(signal_axes=[]) if plot_result and img.axes_manager.navigation_size == 1: if img._lazy: img.compute() - print(f"{Xray_line} at {line_energy} {ax.units} : " - f"Intensity = {img.data[0]:.2f}") + print( + f"{Xray_line} at {line_energy} {ax.units} : " + f"Intensity = {img.data[0]:.2f}" + ) img.metadata.set_item("Sample.elements", ([element])) img.metadata.set_item("Sample.xray_lines", ([Xray_line])) intensities.append(img) @@ -720,21 +724,16 @@ def get_take_off_angle(self): elif self.metadata.Signal.signal_type == "EDS_TEM": mp = self.metadata.Acquisition_instrument.TEM - tilt_stage = mp.get_item('Stage.tilt_alpha', None) - azimuth_angle = mp.get_item('Detector.EDS.azimuth_angle', None) - elevation_angle = mp.get_item('Detector.EDS.elevation_angle', None) - beta_tilt = mp.get_item('Stage.tilt_beta', 0.0) + tilt_stage = mp.get_item("Stage.tilt_alpha", None) + azimuth_angle = mp.get_item("Detector.EDS.azimuth_angle", None) + elevation_angle = mp.get_item("Detector.EDS.elevation_angle", None) + beta_tilt = mp.get_item("Stage.tilt_beta", 0.0) return utils_eds.take_off_angle( - tilt_stage, - azimuth_angle, - elevation_angle, - beta_tilt + tilt_stage, azimuth_angle, elevation_angle, beta_tilt ) - def estimate_integration_windows(self, - windows_width=2., - xray_lines=None): + def estimate_integration_windows(self, windows_width=2.0, xray_lines=None): """ Estimate a window of integration for each X-ray line. @@ -772,17 +771,15 @@ def estimate_integration_windows(self, xray_lines = self._get_xray_lines(xray_lines) integration_windows = [] for Xray_line in xray_lines: - line_energy, line_FWHM = self._get_line_energy(Xray_line, - FWHM_MnKa='auto') + line_energy, line_FWHM = self._get_line_energy(Xray_line, FWHM_MnKa="auto") element, line = utils_eds._get_element_and_line(Xray_line) - det = windows_width * line_FWHM / 2. + det = windows_width * line_FWHM / 2.0 integration_windows.append([line_energy - det, line_energy + det]) return integration_windows - def estimate_background_windows(self, - line_width=[2, 2], - windows_width=1, - xray_lines=None): + def estimate_background_windows( + self, line_width=[2, 2], windows_width=1, xray_lines=None + ): """ Estimate two windows around each X-ray line containing only the background. @@ -826,15 +823,12 @@ def estimate_background_windows(self, xray_lines = self._get_xray_lines(xray_lines) windows_position = [] for xray_line in xray_lines: - line_energy, line_FWHM = self._get_line_energy(xray_line, - FWHM_MnKa='auto') + line_energy, line_FWHM = self._get_line_energy(xray_line, FWHM_MnKa="auto") tmp = [ - line_energy - line_FWHM * line_width[0] - - line_FWHM * windows_width, + line_energy - line_FWHM * line_width[0] - line_FWHM * windows_width, line_energy - line_FWHM * line_width[0], line_energy + line_FWHM * line_width[1], - line_energy + line_FWHM * line_width[1] + - line_FWHM * windows_width + line_energy + line_FWHM * line_width[1] + line_FWHM * windows_width, ] windows_position.append(tmp) windows_position = np.array(windows_position) @@ -843,25 +837,26 @@ def estimate_background_windows(self, for i in range(len(index) - 1): ia, ib = index[i], index[i + 1] if windows_position[ia, 2] > windows_position[ib, 0]: - interv = np.append(windows_position[ia, :2], - windows_position[ib, 2:]) + interv = np.append(windows_position[ia, :2], windows_position[ib, 2:]) windows_position[ia] = interv windows_position[ib] = interv return windows_position - def plot(self, - xray_lines=False, - only_lines=("a", "b"), - only_one=False, - background_windows=None, - integration_windows=None, - navigator="auto", - plot_markers=True, - autoscale='v', - norm="auto", - axes_manager=None, - navigator_kwds={}, - **kwargs): + def plot( + self, + xray_lines=False, + only_lines=("a", "b"), + only_one=False, + background_windows=None, + integration_windows=None, + navigator="auto", + plot_markers=True, + autoscale="v", + norm="auto", + axes_manager=None, + navigator_kwds={}, + **kwargs, + ): """Plot the EDS spectrum. The following markers can be added - The position of the X-ray lines and their names. @@ -929,47 +924,64 @@ def plot(self, set_elements, add_elements, estimate_integration_windows, get_lines_intensity, estimate_background_windows """ - super().plot(navigator=navigator, - plot_markers=plot_markers, - autoscale=autoscale, - norm=norm, - axes_manager=axes_manager, - navigator_kwds=navigator_kwds, - **kwargs) - self._plot_xray_lines(xray_lines, only_lines, only_one, - background_windows, integration_windows, - render_figure=False) - self._render_figure(plot=['signal_plot']) - - plot.__doc__ %= (BASE_PLOT_DOCSTRING_PARAMETERS, - PLOT1D_DOCSTRING) - - def _plot_xray_lines(self, xray_lines=False, only_lines=("a", "b"), - only_one=False, background_windows=None, - integration_windows=None, render_figure=True): - if (xray_lines is not False or - background_windows is not None or - integration_windows is not None): + super().plot( + navigator=navigator, + plot_markers=plot_markers, + autoscale=autoscale, + norm=norm, + axes_manager=axes_manager, + navigator_kwds=navigator_kwds, + **kwargs, + ) + self._plot_xray_lines( + xray_lines, + only_lines, + only_one, + background_windows, + integration_windows, + render_figure=False, + ) + self._render_figure(plot=["signal_plot"]) + + plot.__doc__ %= (BASE_PLOT_DOCSTRING_PARAMETERS, PLOT1D_DOCSTRING) + + def _plot_xray_lines( + self, + xray_lines=False, + only_lines=("a", "b"), + only_one=False, + background_windows=None, + integration_windows=None, + render_figure=True, + ): + if ( + xray_lines is not False + or background_windows is not None + or integration_windows is not None + ): if xray_lines is False: xray_lines = True only_lines = utils_eds._parse_only_lines(only_lines) - if xray_lines is True or xray_lines == 'from_elements': - if ('Sample.xray_lines' in self.metadata and - xray_lines != 'from_elements'): + if xray_lines is True or xray_lines == "from_elements": + if ( + "Sample.xray_lines" in self.metadata + and xray_lines != "from_elements" + ): xray_lines = self.metadata.Sample.xray_lines - elif 'Sample.elements' in self.metadata: + elif "Sample.elements" in self.metadata: xray_lines = self._get_lines_from_elements( self.metadata.Sample.elements, only_one=only_one, - only_lines=only_lines) + only_lines=only_lines, + ) else: - _logger.warning( - "No elements defined, set them with `add_elements`") + _logger.warning("No elements defined, set them with `add_elements`") # No X-rays lines, nothing to do then return xray_lines, xray_not_here = self._get_xray_lines_in_spectral_range( - xray_lines) + xray_lines + ) for xray in xray_not_here: _logger.warning(f"{xray} is not in the data energy range.") @@ -977,24 +989,24 @@ def _plot_xray_lines(self, xray_lines=False, only_lines=("a", "b"), self.add_xray_lines_markers(xray_lines, render_figure=False) if background_windows is not None: - self._add_background_windows_markers(background_windows, - render_figure=False) + self._add_background_windows_markers( + background_windows, render_figure=False + ) if integration_windows is not None: - if integration_windows == 'auto': + if integration_windows == "auto": integration_windows = 2.0 - if hasattr(integration_windows, '__iter__') is False: + if hasattr(integration_windows, "__iter__") is False: integration_windows = self.estimate_integration_windows( - windows_width=integration_windows, - xray_lines=xray_lines) - self._add_vertical_lines_groups(integration_windows, - linestyle='--', - render_figure=False) + windows_width=integration_windows, xray_lines=xray_lines + ) + self._add_vertical_lines_groups( + integration_windows, linestyle="--", render_figure=False + ) # Render figure only at the end if render_figure: - self._render_figure(plot=['signal_plot']) + self._render_figure(plot=["signal_plot"]) - def _add_vertical_lines_groups(self, position, render_figure=True, - **kwargs): + def _add_vertical_lines_groups(self, position, render_figure=True, **kwargs): """ Add vertical markers for each group that shares the color. @@ -1006,16 +1018,15 @@ def _add_vertical_lines_groups(self, position, render_figure=True, kwargs keywords argument for :py:class:`~.api.plot.markers.VerticalLine` """ - colors = itertools.cycle(np.sort( - plt.rcParams['axes.prop_cycle'].by_key()["color"])) + colors = itertools.cycle( + np.sort(plt.rcParams["axes.prop_cycle"].by_key()["color"]) + ) for x, color in zip(position, colors): - line = VerticalLines(offsets=x, - color=color, - **kwargs) + line = VerticalLines(offsets=x, color=color, **kwargs) self.add_marker(line, render_figure=False) if render_figure: - self._render_figure(plot=['signal_plot']) + self._render_figure(plot=["signal_plot"]) def add_xray_lines_markers(self, xray_lines, render_figure=True): """ @@ -1030,33 +1041,39 @@ def add_xray_lines_markers(self, xray_lines, render_figure=True): if self._plot is None or not self._plot.is_active: raise RuntimeError("The signal needs to be plotted.") norm = self._plot.signal_plot.ax_lines[0].norm - minimum_intensity = self.data[self.data > 0].min() if norm == 'log' else 0 + minimum_intensity = self.data[self.data > 0].min() if norm == "log" else 0 line_names = [] segments = np.empty((len(xray_lines), 2, 2)) offsets = np.empty((len(xray_lines), 2)) # might want to set the intensity based on the alpha line intensity for i, xray_line in enumerate(xray_lines): element, line = utils_eds._get_element_and_line(xray_line) - relative_factor = elements_db[element][ - 'Atomic_properties']['Xray_lines'][line]['weight'] - eng = self._get_line_energy(f'{element}_{line}') + relative_factor = elements_db[element]["Atomic_properties"]["Xray_lines"][ + line + ]["weight"] + eng = self._get_line_energy(f"{element}_{line}") segments[i] = [[eng, 0], [eng, 1]] offsets[i] = [eng, 1] - line_names.append(r'$\mathrm{%s}_{\mathrm{%s}}$' % utils_eds._get_element_and_line(xray_line)) - - line_markers = Lines(segments=segments, - transform="relative", - color='black', - ) - text_markers = Texts(offsets=offsets, - texts=line_names, - offset_transform="relative", - rotation=np.pi/2, - horizontalalignment="left", - verticalalignment="bottom", - facecolor='black', - shift=.005, - ) + line_names.append( + r"$\mathrm{%s}_{\mathrm{%s}}$" + % utils_eds._get_element_and_line(xray_line) + ) + + line_markers = Lines( + segments=segments, + transform="relative", + color="black", + ) + text_markers = Texts( + offsets=offsets, + texts=line_names, + offset_transform="relative", + rotation=np.pi / 2, + horizontalalignment="left", + verticalalignment="bottom", + facecolor="black", + shift=0.005, + ) self.add_marker(line_markers, render_figure=False) self.add_marker(text_markers, render_figure=False) @@ -1069,7 +1086,7 @@ def add_xray_lines_markers(self, xray_lines, render_figure=True): self._xray_markers["names"] = xray_lines if render_figure: - self._render_figure(plot=['signal_plot']) + self._render_figure(plot=["signal_plot"]) def _xray_marker_closed(self, obj): self._xray_markers = {} @@ -1091,11 +1108,9 @@ def remove_xray_lines_markers(self, xray_lines, render_figure=True): self._xray_markers["texts"].remove_items(ind) self._xray_markers["names"] = np.delete(self._xray_markers["names"], ind) if render_figure: - self._render_figure(plot=['signal_plot']) + self._render_figure(plot=["signal_plot"]) - def _add_background_windows_markers(self, - windows_position, - render_figure=True): + def _add_background_windows_markers(self, windows_position, render_figure=True): """ Plot the background windows associated with each X-ray lines. @@ -1122,21 +1137,19 @@ def _add_background_windows_markers(self, if ax.value2index(bw[0]) == ax.value2index(bw[1]): y1 = self.isig[bw[0]].data else: - y1 = self.isig[bw[0]:bw[1]].mean(-1).data + y1 = self.isig[bw[0] : bw[1]].mean(-1).data if ax.value2index(bw[2]) == ax.value2index(bw[3]): y2 = self.isig[bw[2]].data else: - y2 = self.isig[bw[2]:bw[3]].mean(-1).data - x1 = (bw[0] + bw[1]) / 2. - x2 = (bw[2] + bw[3]) / 2. - segments.append([[x1, y1[0]], - [x2, y2[0]]]) + y2 = self.isig[bw[2] : bw[3]].mean(-1).data + x1 = (bw[0] + bw[1]) / 2.0 + x2 = (bw[2] + bw[3]) / 2.0 + segments.append([[x1, y1[0]], [x2, y2[0]]]) segments = np.array(segments) - lines = Lines(segments=segments, - color='black') + lines = Lines(segments=segments, color="black") self.add_marker(lines, render_figure=False) if render_figure: - self._render_figure(plot=['signal_plot']) + self._render_figure(plot=["signal_plot"]) class LazyEDSSpectrum(EDSSpectrum, LazySignal1D): diff --git a/exspy/signals/eds_sem.py b/exspy/signals/eds_sem.py index a1804fd5e..5f0e271f1 100644 --- a/exspy/signals/eds_sem.py +++ b/exspy/signals/eds_sem.py @@ -33,30 +33,20 @@ @add_gui_method(toolkey="exspy.microscope_parameters_EDS_SEM") class EDSSEMParametersUI(BaseSetMetadataItems): - - beam_energy = t.Float(t.Undefined, - label='Beam energy (keV)') - live_time = t.Float(t.Undefined, - label='Live time (s)') - tilt_stage = t.Float(t.Undefined, - label='Stage tilt (degree)') - azimuth_angle = t.Float(t.Undefined, - label='Azimuth angle (degree)') - elevation_angle = t.Float(t.Undefined, - label='Elevation angle (degree)') - energy_resolution_MnKa = t.Float(t.Undefined, - label='Energy resolution MnKa (eV)') + beam_energy = t.Float(t.Undefined, label="Beam energy (keV)") + live_time = t.Float(t.Undefined, label="Live time (s)") + tilt_stage = t.Float(t.Undefined, label="Stage tilt (degree)") + azimuth_angle = t.Float(t.Undefined, label="Azimuth angle (degree)") + elevation_angle = t.Float(t.Undefined, label="Elevation angle (degree)") + energy_resolution_MnKa = t.Float(t.Undefined, label="Energy resolution MnKa (eV)") mapping = { - 'Acquisition_instrument.SEM.beam_energy': 'beam_energy', - 'Acquisition_instrument.TEM.Stage.tilt_alpha': 'tilt_stage', - 'Acquisition_instrument.SEM.Detector.EDS.live_time': - 'live_time', - 'Acquisition_instrument.SEM.Detector.EDS.azimuth_angle': - 'azimuth_angle', - 'Acquisition_instrument.SEM.Detector.EDS.elevation_angle': - 'elevation_angle', - 'Acquisition_instrument.SEM.Detector.EDS.energy_resolution_MnKa': - 'energy_resolution_MnKa', } + "Acquisition_instrument.SEM.beam_energy": "beam_energy", + "Acquisition_instrument.TEM.Stage.tilt_alpha": "tilt_stage", + "Acquisition_instrument.SEM.Detector.EDS.live_time": "live_time", + "Acquisition_instrument.SEM.Detector.EDS.azimuth_angle": "azimuth_angle", + "Acquisition_instrument.SEM.Detector.EDS.elevation_angle": "elevation_angle", + "Acquisition_instrument.SEM.Detector.EDS.energy_resolution_MnKa": "energy_resolution_MnKa", + } class EDSSEMSpectrum(EDSSpectrum): @@ -68,11 +58,12 @@ class EDSSEMSpectrum(EDSSpectrum): def __init__(self, *args, **kwards): super().__init__(*args, **kwards) # Attributes defaults - if 'Acquisition_instrument.SEM.Detector.EDS' not in self.metadata: - if 'Acquisition_instrument.TEM' in self.metadata: + if "Acquisition_instrument.SEM.Detector.EDS" not in self.metadata: + if "Acquisition_instrument.TEM" in self.metadata: self.metadata.set_item( "Acquisition_instrument.SEM", - self.metadata.Acquisition_instrument.TEM) + self.metadata.Acquisition_instrument.TEM, + ) del self.metadata.Acquisition_instrument.TEM self._set_default_param() @@ -116,28 +107,31 @@ def get_calibration_from(self, ref, nb_pix=1): for _axis in [ax_m, ax_ref]: if not _axis.is_uniform: raise NotImplementedError( - "The function is not implemented for non-uniform axes.") + "The function is not implemented for non-uniform axes." + ) ax_m.scale = ax_ref.scale ax_m.units = ax_ref.units ax_m.offset = ax_ref.offset # Setup metadata - if 'Acquisition_instrument.SEM' in ref.metadata: + if "Acquisition_instrument.SEM" in ref.metadata: mp_ref = ref.metadata.Acquisition_instrument.SEM - elif 'Acquisition_instrument.TEM' in ref.metadata: + elif "Acquisition_instrument.TEM" in ref.metadata: mp_ref = ref.metadata.Acquisition_instrument.TEM else: raise ValueError( "The reference has no metadata.Acquisition_instrument.TEM" - "\n nor metadata.Acquisition_instrument.SEM ") + "\n nor metadata.Acquisition_instrument.SEM " + ) mp = self.metadata mp.Acquisition_instrument.SEM = mp_ref.deepcopy() - if hasattr(mp_ref.Detector.EDS, 'live_time'): - mp.Acquisition_instrument.SEM.Detector.EDS.live_time = \ + if hasattr(mp_ref.Detector.EDS, "live_time"): + mp.Acquisition_instrument.SEM.Detector.EDS.live_time = ( mp_ref.Detector.EDS.live_time / nb_pix + ) def _load_from_TEM_param(self): """Transfer metadata.Acquisition_instrument.TEM to @@ -146,51 +140,62 @@ def _load_from_TEM_param(self): """ mp = self.metadata - if mp.has_item('Acquisition_instrument.SEM') is False: - mp.add_node('Acquisition_instrument.SEM') - if mp.has_item('Acquisition_instrument.SEM.Detector.EDS') is False: - mp.Acquisition_instrument.SEM.add_node('EDS') + if mp.has_item("Acquisition_instrument.SEM") is False: + mp.add_node("Acquisition_instrument.SEM") + if mp.has_item("Acquisition_instrument.SEM.Detector.EDS") is False: + mp.Acquisition_instrument.SEM.add_node("EDS") mp.Signal.signal_type = "EDS_SEM" # Transfer - if 'Acquisition_instrument.TEM' in mp: + if "Acquisition_instrument.TEM" in mp: mp.Acquisition_instrument.SEM = mp.Acquisition_instrument.TEM del mp.Acquisition_instrument.TEM def _set_default_param(self): - """Set to value to default (defined in preferences) - - """ + """Set to value to default (defined in preferences)""" mp = self.metadata if "Acquisition_instrument.SEM.Stage.tilt_alpha" not in mp: mp.set_item( "Acquisition_instrument.SEM.Stage.tilt_alpha", - preferences.EDS.eds_tilt_stage) + preferences.EDS.eds_tilt_stage, + ) if "Acquisition_instrument.SEM.Detector.EDS.elevation_angle" not in mp: mp.set_item( "Acquisition_instrument.SEM.Detector.EDS.elevation_angle", - preferences.EDS.eds_detector_elevation) - if "Acquisition_instrument.SEM.Detector.EDS.energy_resolution_MnKa" \ - not in mp: + preferences.EDS.eds_detector_elevation, + ) + if "Acquisition_instrument.SEM.Detector.EDS.energy_resolution_MnKa" not in mp: mp.set_item( - "Acquisition_instrument.SEM.Detector.EDS." - "energy_resolution_MnKa", - preferences.EDS.eds_mn_ka) + "Acquisition_instrument.SEM.Detector.EDS." "energy_resolution_MnKa", + preferences.EDS.eds_mn_ka, + ) if "Acquisition_instrument.SEM.Detector.EDS.azimuth_angle" not in mp: mp.set_item( "Acquisition_instrument.SEM.Detector.EDS.azimuth_angle", - preferences.EDS.eds_detector_azimuth) - - def set_microscope_parameters(self, - beam_energy=None, - live_time=None, - tilt_stage=None, - azimuth_angle=None, - elevation_angle=None, - energy_resolution_MnKa=None, - display=True, toolkit=None): - if set([beam_energy, live_time, tilt_stage, azimuth_angle, - elevation_angle, energy_resolution_MnKa]) == {None}: + preferences.EDS.eds_detector_azimuth, + ) + + def set_microscope_parameters( + self, + beam_energy=None, + live_time=None, + tilt_stage=None, + azimuth_angle=None, + elevation_angle=None, + energy_resolution_MnKa=None, + display=True, + toolkit=None, + ): + if set( + [ + beam_energy, + live_time, + tilt_stage, + azimuth_angle, + elevation_angle, + energy_resolution_MnKa, + ] + ) == {None}: tem_par = EDSSEMParametersUI(self) return tem_par.gui(toolkit=toolkit, display=display) md = self.metadata @@ -198,28 +203,25 @@ def set_microscope_parameters(self, if beam_energy is not None: md.set_item("Acquisition_instrument.SEM.beam_energy", beam_energy) if live_time is not None: - md.set_item( - "Acquisition_instrument.SEM.Detector.EDS.live_time", - live_time) + md.set_item("Acquisition_instrument.SEM.Detector.EDS.live_time", live_time) if tilt_stage is not None: - md.set_item( - "Acquisition_instrument.SEM.Stage.tilt_alpha", - tilt_stage) + md.set_item("Acquisition_instrument.SEM.Stage.tilt_alpha", tilt_stage) if azimuth_angle is not None: md.set_item( - "Acquisition_instrument.SEM.Detector.EDS.azimuth_angle", - azimuth_angle) + "Acquisition_instrument.SEM.Detector.EDS.azimuth_angle", azimuth_angle + ) if elevation_angle is not None: md.set_item( "Acquisition_instrument.SEM.Detector.EDS.elevation_angle", - elevation_angle) + elevation_angle, + ) if energy_resolution_MnKa is not None: md.set_item( - "Acquisition_instrument.SEM.Detector.EDS." - "energy_resolution_MnKa", - energy_resolution_MnKa) - set_microscope_parameters.__doc__ = \ - """ + "Acquisition_instrument.SEM.Detector.EDS." "energy_resolution_MnKa", + energy_resolution_MnKa, + ) + + set_microscope_parameters.__doc__ = """ Set the microscope parameters. If no arguments are given, raises an interactive mode to fill @@ -255,7 +257,9 @@ def set_microscope_parameters(self, Default value 130.0 eV Now set to 135.0 eV - """.format(DISPLAY_DT, TOOLKIT_DT) + """.format( + DISPLAY_DT, TOOLKIT_DT + ) def _are_microscope_parameters_missing(self): """Check if the EDS parameters necessary for quantification @@ -264,8 +268,9 @@ def _are_microscope_parameters_missing(self): """ must_exist = ( - 'Acquisition_instrument.SEM.beam_energy', - 'Acquisition_instrument.SEM.Detector.EDS.live_time', ) + "Acquisition_instrument.SEM.beam_energy", + "Acquisition_instrument.SEM.Detector.EDS.live_time", + ) missing_parameters = [] for item in must_exist: @@ -278,8 +283,7 @@ def _are_microscope_parameters_missing(self): else: return False - def create_model(self, auto_background=True, auto_add_lines=True, - *args, **kwargs): + def create_model(self, auto_background=True, auto_add_lines=True, *args, **kwargs): """Create a model for the current SEM EDS data. Parameters @@ -302,10 +306,14 @@ def create_model(self, auto_background=True, auto_add_lines=True, """ from exspy.models.edssemmodel import EDSSEMModel - model = EDSSEMModel(self, - auto_background=auto_background, - auto_add_lines=auto_add_lines, - *args, **kwargs) + + model = EDSSEMModel( + self, + auto_background=auto_background, + auto_add_lines=auto_add_lines, + *args, + **kwargs + ) return model diff --git a/exspy/signals/eds_tem.py b/exspy/signals/eds_tem.py index 575685841..e5639469b 100755 --- a/exspy/signals/eds_tem.py +++ b/exspy/signals/eds_tem.py @@ -46,40 +46,26 @@ @add_gui_method(toolkey="exspy.microscope_parameters_EDS_TEM") class EDSTEMParametersUI(BaseSetMetadataItems): - beam_energy = t.Float(t.Undefined, - label='Beam energy (keV)') - real_time = t.Float(t.Undefined, - label='Real time (s)') - tilt_stage = t.Float(t.Undefined, - label='Stage tilt (degree)') - live_time = t.Float(t.Undefined, - label='Live time (s)') - probe_area = t.Float(t.Undefined, - label='Beam/probe area (nm²)') - azimuth_angle = t.Float(t.Undefined, - label='Azimuth angle (degree)') - elevation_angle = t.Float(t.Undefined, - label='Elevation angle (degree)') - energy_resolution_MnKa = t.Float(t.Undefined, - label='Energy resolution MnKa (eV)') - beam_current = t.Float(t.Undefined, - label='Beam current (nA)') + beam_energy = t.Float(t.Undefined, label="Beam energy (keV)") + real_time = t.Float(t.Undefined, label="Real time (s)") + tilt_stage = t.Float(t.Undefined, label="Stage tilt (degree)") + live_time = t.Float(t.Undefined, label="Live time (s)") + probe_area = t.Float(t.Undefined, label="Beam/probe area (nm²)") + azimuth_angle = t.Float(t.Undefined, label="Azimuth angle (degree)") + elevation_angle = t.Float(t.Undefined, label="Elevation angle (degree)") + energy_resolution_MnKa = t.Float(t.Undefined, label="Energy resolution MnKa (eV)") + beam_current = t.Float(t.Undefined, label="Beam current (nA)") mapping = { - 'Acquisition_instrument.TEM.beam_energy': 'beam_energy', - 'Acquisition_instrument.TEM.Stage.tilt_alpha': 'tilt_stage', - 'Acquisition_instrument.TEM.Detector.EDS.live_time': 'live_time', - 'Acquisition_instrument.TEM.Detector.EDS.azimuth_angle': - 'azimuth_angle', - 'Acquisition_instrument.TEM.Detector.EDS.elevation_angle': - 'elevation_angle', - 'Acquisition_instrument.TEM.Detector.EDS.energy_resolution_MnKa': - 'energy_resolution_MnKa', - 'Acquisition_instrument.TEM.beam_current': - 'beam_current', - 'Acquisition_instrument.TEM.probe_area': - 'probe_area', - 'Acquisition_instrument.TEM.Detector.EDS.real_time': - 'real_time', } + "Acquisition_instrument.TEM.beam_energy": "beam_energy", + "Acquisition_instrument.TEM.Stage.tilt_alpha": "tilt_stage", + "Acquisition_instrument.TEM.Detector.EDS.live_time": "live_time", + "Acquisition_instrument.TEM.Detector.EDS.azimuth_angle": "azimuth_angle", + "Acquisition_instrument.TEM.Detector.EDS.elevation_angle": "elevation_angle", + "Acquisition_instrument.TEM.Detector.EDS.energy_resolution_MnKa": "energy_resolution_MnKa", + "Acquisition_instrument.TEM.beam_current": "beam_current", + "Acquisition_instrument.TEM.probe_area": "probe_area", + "Acquisition_instrument.TEM.Detector.EDS.real_time": "real_time", + } class EDSTEMSpectrum(EDSSpectrum): @@ -91,17 +77,17 @@ class EDSTEMSpectrum(EDSSpectrum): def __init__(self, *args, **kwards): super().__init__(*args, **kwards) # Attributes defaults - if 'Acquisition_instrument.TEM.Detector.EDS' not in self.metadata: - if 'Acquisition_instrument.SEM.Detector.EDS' in self.metadata: + if "Acquisition_instrument.TEM.Detector.EDS" not in self.metadata: + if "Acquisition_instrument.SEM.Detector.EDS" in self.metadata: self.metadata.set_item( "Acquisition_instrument.TEM", - self.metadata.Acquisition_instrument.SEM) + self.metadata.Acquisition_instrument.SEM, + ) del self.metadata.Acquisition_instrument.SEM self._set_default_param() def _set_default_param(self): - """Set to value to default (defined in preferences) - """ + """Set to value to default (defined in preferences)""" mp = self.metadata mp.Signal.signal_type = "EDS_TEM" @@ -110,36 +96,51 @@ def _set_default_param(self): if "Acquisition_instrument.TEM.Stage.tilt_alpha" not in mp: mp.set_item( "Acquisition_instrument.TEM.Stage.tilt_alpha", - preferences.EDS.eds_tilt_stage) + preferences.EDS.eds_tilt_stage, + ) if "Acquisition_instrument.TEM.Detector.EDS.elevation_angle" not in mp: mp.set_item( "Acquisition_instrument.TEM.Detector.EDS.elevation_angle", - preferences.EDS.eds_detector_elevation) - if "Acquisition_instrument.TEM.Detector.EDS.energy_resolution_MnKa"\ - not in mp: - mp.set_item("Acquisition_instrument.TEM.Detector.EDS." + - "energy_resolution_MnKa", - preferences.EDS.eds_mn_ka) + preferences.EDS.eds_detector_elevation, + ) + if "Acquisition_instrument.TEM.Detector.EDS.energy_resolution_MnKa" not in mp: + mp.set_item( + "Acquisition_instrument.TEM.Detector.EDS." + "energy_resolution_MnKa", + preferences.EDS.eds_mn_ka, + ) if "Acquisition_instrument.TEM.Detector.EDS.azimuth_angle" not in mp: mp.set_item( "Acquisition_instrument.TEM.Detector.EDS.azimuth_angle", - preferences.EDS.eds_detector_azimuth) - - def set_microscope_parameters(self, - beam_energy=None, - live_time=None, - tilt_stage=None, - azimuth_angle=None, - elevation_angle=None, - energy_resolution_MnKa=None, - beam_current=None, - probe_area=None, - real_time=None, - display=True, - toolkit=None): - if set([beam_energy, live_time, tilt_stage, azimuth_angle, - elevation_angle, energy_resolution_MnKa, beam_current, - probe_area, real_time]) == {None}: + preferences.EDS.eds_detector_azimuth, + ) + + def set_microscope_parameters( + self, + beam_energy=None, + live_time=None, + tilt_stage=None, + azimuth_angle=None, + elevation_angle=None, + energy_resolution_MnKa=None, + beam_current=None, + probe_area=None, + real_time=None, + display=True, + toolkit=None, + ): + if set( + [ + beam_energy, + live_time, + tilt_stage, + azimuth_angle, + elevation_angle, + energy_resolution_MnKa, + beam_current, + probe_area, + real_time, + ] + ) == {None}: tem_par = EDSTEMParametersUI(self) return tem_par.gui(display=display, toolkit=toolkit) md = self.metadata @@ -147,41 +148,31 @@ def set_microscope_parameters(self, if beam_energy is not None: md.set_item("Acquisition_instrument.TEM.beam_energy ", beam_energy) if live_time is not None: - md.set_item( - "Acquisition_instrument.TEM.Detector.EDS.live_time", - live_time) + md.set_item("Acquisition_instrument.TEM.Detector.EDS.live_time", live_time) if tilt_stage is not None: - md.set_item( - "Acquisition_instrument.TEM.Stage.tilt_alpha", - tilt_stage) + md.set_item("Acquisition_instrument.TEM.Stage.tilt_alpha", tilt_stage) if azimuth_angle is not None: md.set_item( - "Acquisition_instrument.TEM.Detector.EDS.azimuth_angle", - azimuth_angle) + "Acquisition_instrument.TEM.Detector.EDS.azimuth_angle", azimuth_angle + ) if elevation_angle is not None: md.set_item( "Acquisition_instrument.TEM.Detector.EDS.elevation_angle", - elevation_angle) + elevation_angle, + ) if energy_resolution_MnKa is not None: md.set_item( - "Acquisition_instrument.TEM.Detector.EDS." + - "energy_resolution_MnKa", - energy_resolution_MnKa) + "Acquisition_instrument.TEM.Detector.EDS." + "energy_resolution_MnKa", + energy_resolution_MnKa, + ) if beam_current is not None: - md.set_item( - "Acquisition_instrument.TEM.beam_current", - beam_current) + md.set_item("Acquisition_instrument.TEM.beam_current", beam_current) if probe_area is not None: - md.set_item( - "Acquisition_instrument.TEM.probe_area", - probe_area) + md.set_item("Acquisition_instrument.TEM.probe_area", probe_area) if real_time is not None: - md.set_item( - "Acquisition_instrument.TEM.Detector.EDS.real_time", - real_time) + md.set_item("Acquisition_instrument.TEM.Detector.EDS.real_time", real_time) - set_microscope_parameters.__doc__ = \ - """ + set_microscope_parameters.__doc__ = """ Set the microscope parameters. If no arguments are given, raises an interactive mode to fill @@ -221,14 +212,17 @@ def set_microscope_parameters(self, 133.312296 135.0 - """.format(DISPLAY_DT, TOOLKIT_DT) + """.format( + DISPLAY_DT, TOOLKIT_DT + ) def _are_microscope_parameters_missing(self): """Check if the EDS parameters necessary for quantification are defined in metadata.""" must_exist = ( - 'Acquisition_instrument.TEM.beam_energy', - 'Acquisition_instrument.TEM.Detector.EDS.live_time',) + "Acquisition_instrument.TEM.beam_energy", + "Acquisition_instrument.TEM.Detector.EDS.live_time", + ) missing_parameters = [] for item in must_exist: @@ -281,43 +275,49 @@ def get_calibration_from(self, ref, nb_pix=1): for _axis in [ax_m, ax_ref]: if not _axis.is_uniform: raise NotImplementedError( - "The function is not implemented for non-uniform axes.") + "The function is not implemented for non-uniform axes." + ) ax_m.scale = ax_ref.scale ax_m.units = ax_ref.units ax_m.offset = ax_ref.offset # Setup metadata - if 'Acquisition_instrument.TEM' in ref.metadata: + if "Acquisition_instrument.TEM" in ref.metadata: mp_ref = ref.metadata.Acquisition_instrument.TEM - elif 'Acquisition_instrument.SEM' in ref.metadata: + elif "Acquisition_instrument.SEM" in ref.metadata: mp_ref = ref.metadata.Acquisition_instrument.SEM else: - raise ValueError("The reference has no metadata " - "'Acquisition_instrument.TEM '" - "or 'metadata.Acquisition_instrument.SEM'.") + raise ValueError( + "The reference has no metadata " + "'Acquisition_instrument.TEM '" + "or 'metadata.Acquisition_instrument.SEM'." + ) mp = self.metadata mp.Acquisition_instrument.TEM = mp_ref.deepcopy() if mp_ref.has_item("Detector.EDS.live_time"): - mp.Acquisition_instrument.TEM.Detector.EDS.live_time = \ + mp.Acquisition_instrument.TEM.Detector.EDS.live_time = ( mp_ref.Detector.EDS.live_time / nb_pix - - def quantification(self, - intensities, - method, - factors, - composition_units='atomic', - absorption_correction=False, - take_off_angle='auto', - thickness='auto', - convergence_criterion=0.5, - navigation_mask=1.0, - closing=True, - plot_result=False, - probe_area='auto', - max_iterations=30, - show_progressbar=None, - **kwargs): + ) + + def quantification( + self, + intensities, + method, + factors, + composition_units="atomic", + absorption_correction=False, + take_off_angle="auto", + thickness="auto", + convergence_criterion=0.5, + navigation_mask=1.0, + closing=True, + plot_result=False, + probe_area="auto", + max_iterations=30, + show_progressbar=None, + **kwargs, + ): """ Absorption corrected quantification using Cliff-Lorimer, the zeta-factor method, or ionization cross sections. The function iterates through @@ -405,120 +405,124 @@ def quantification(self, -------- vacuum_mask """ - if (not isinstance(intensities, (list, tuple)) or - not isinstance(intensities[0], BaseSignal)): - raise ValueError( - "The parameter `intensities` must be a list of signals." - ) + if not isinstance(intensities, (list, tuple)) or not isinstance( + intensities[0], BaseSignal + ): + raise ValueError("The parameter `intensities` must be a list of signals.") elif len(intensities) <= 1: raise ValueError("Several X-ray line intensities are required.") - + if isinstance(navigation_mask, float): if self.axes_manager.navigation_dimension > 0: navigation_mask = self.vacuum_mask(navigation_mask, closing) else: navigation_mask = None - xray_lines = [intensity.metadata.Sample.xray_lines[0] - for intensity in intensities] + xray_lines = [ + intensity.metadata.Sample.xray_lines[0] for intensity in intensities + ] it = 0 if absorption_correction: if show_progressbar is None: # pragma: no cover show_progressbar = hs.preferences.General.show_progressbar if show_progressbar: - pbar = progressbar(total=None, - desc='Absorption correction calculation') + pbar = progressbar(total=None, desc="Absorption correction calculation") - composition = utils.stack(intensities, lazy=False, - show_progressbar=False) + composition = utils.stack(intensities, lazy=False, show_progressbar=False) - if take_off_angle == 'auto': + if take_off_angle == "auto": toa = self.get_take_off_angle() else: toa = take_off_angle - #determining illumination area for cross sections quantification. - if method == 'cross_section': - if probe_area == 'auto': + # determining illumination area for cross sections quantification. + if method == "cross_section": + if probe_area == "auto": parameters = self.metadata.Acquisition_instrument.TEM if probe_area in parameters: probe_area = parameters.TEM.probe_area else: probe_area = self.get_probe_area( - navigation_axes=self.axes_manager.navigation_axes) + navigation_axes=self.axes_manager.navigation_axes + ) - int_stack = utils.stack(intensities, lazy=False, - show_progressbar=False) + int_stack = utils.stack(intensities, lazy=False, show_progressbar=False) comp_old = np.zeros_like(int_stack.data) - abs_corr_factor = None # initial + abs_corr_factor = None # initial - if method == 'CL': + if method == "CL": quantification_method = utils_eds.quantification_cliff_lorimer - kwargs = {"intensities" : int_stack.data, - "kfactors" : factors, - "absorption_correction" : abs_corr_factor, - "mask": navigation_mask} - - elif method == 'zeta': + kwargs = { + "intensities": int_stack.data, + "kfactors": factors, + "absorption_correction": abs_corr_factor, + "mask": navigation_mask, + } + + elif method == "zeta": quantification_method = utils_eds.quantification_zeta_factor - kwargs = {"intensities" : int_stack.data, - "zfactors" : factors, - "dose" : self._get_dose(method), - "absorption_correction" : abs_corr_factor} - - elif method =='cross_section': + kwargs = { + "intensities": int_stack.data, + "zfactors": factors, + "dose": self._get_dose(method), + "absorption_correction": abs_corr_factor, + } + + elif method == "cross_section": quantification_method = utils_eds.quantification_cross_section - kwargs = {"intensities" : int_stack.data, - "cross_sections" : factors, - "dose" : self._get_dose(method, **kwargs), - "absorption_correction" : abs_corr_factor} + kwargs = { + "intensities": int_stack.data, + "cross_sections": factors, + "dose": self._get_dose(method, **kwargs), + "absorption_correction": abs_corr_factor, + } else: - raise ValueError('Please specify method for quantification, ' - 'as "CL", "zeta" or "cross_section".') + raise ValueError( + "Please specify method for quantification, " + 'as "CL", "zeta" or "cross_section".' + ) while True: results = quantification_method(**kwargs) - if method == 'CL': - composition.data = results * 100. + if method == "CL": + composition.data = results * 100.0 if absorption_correction: if thickness is not None: mass_thickness = intensities[0].deepcopy() mass_thickness.data = self.CL_get_mass_thickness( - composition.split(), - thickness - ) - mass_thickness.metadata.General.title = 'Mass thickness' + composition.split(), thickness + ) + mass_thickness.metadata.General.title = "Mass thickness" else: raise ValueError( - 'Thickness is required for absorption correction ' - 'with k-factor method. Results will contain no ' - 'correction for absorption.' + "Thickness is required for absorption correction " + "with k-factor method. Results will contain no " + "correction for absorption." ) - elif method == 'zeta': + elif method == "zeta": composition.data = results[0] * 100 mass_thickness = intensities[0].deepcopy() mass_thickness.data = results[1] else: - composition.data = results[0] * 100. + composition.data = results[0] * 100.0 number_of_atoms = composition._deepcopy_with_new_data(results[1]) - if method == 'cross_section': + if method == "cross_section": if absorption_correction: - abs_corr_factor = utils_eds.get_abs_corr_cross_section(composition.split(), - number_of_atoms.split(), - toa, - probe_area) + abs_corr_factor = utils_eds.get_abs_corr_cross_section( + composition.split(), number_of_atoms.split(), toa, probe_area + ) kwargs["absorption_correction"] = abs_corr_factor else: if absorption_correction: - abs_corr_factor = utils_eds.get_abs_corr_zeta(composition.split(), - mass_thickness, - toa) + abs_corr_factor = utils_eds.get_abs_corr_zeta( + composition.split(), mass_thickness, toa + ) kwargs["absorption_correction"] = abs_corr_factor res_max = np.max(composition.data - comp_old) @@ -530,68 +534,68 @@ def quantification(self, if not absorption_correction or abs(res_max) < convergence_criterion: break elif it >= max_iterations: - raise Exception('Absorption correction failed as solution ' - f'did not converge after {max_iterations} ' - 'iterations') + raise Exception( + "Absorption correction failed as solution " + f"did not converge after {max_iterations} " + "iterations" + ) - if method == 'cross_section': + if method == "cross_section": number_of_atoms = composition._deepcopy_with_new_data(results[1]) number_of_atoms = number_of_atoms.split() composition = composition.split() else: composition = composition.split() - #convert ouput units to selection as required. - if composition_units == 'atomic': - if method != 'cross_section': + # convert ouput units to selection as required. + if composition_units == "atomic": + if method != "cross_section": composition = material.weight_to_atomic(composition) else: - if method == 'cross_section': + if method == "cross_section": composition = material.atomic_to_weight(composition) - #Label each of the elemental maps in the image stacks for composition. + # Label each of the elemental maps in the image stacks for composition. for i, xray_line in enumerate(xray_lines): element, line = utils_eds._get_element_and_line(xray_line) - composition[i].metadata.General.title = composition_units + \ - ' percent of ' + element + composition[i].metadata.General.title = ( + composition_units + " percent of " + element + ) composition[i].metadata.set_item("Sample.elements", ([element])) - composition[i].metadata.set_item( - "Sample.xray_lines", ([xray_line])) + composition[i].metadata.set_item("Sample.xray_lines", ([xray_line])) if plot_result and composition[i].axes_manager.navigation_size == 1: c = float(composition[i].data) print(f"{element} ({xray_line}): Composition = {c:.2f} percent") - #For the cross section method this is repeated for the number of atom maps - if method == 'cross_section': + # For the cross section method this is repeated for the number of atom maps + if method == "cross_section": for i, xray_line in enumerate(xray_lines): element, line = utils_eds._get_element_and_line(xray_line) - number_of_atoms[i].metadata.General.title = \ - 'atom counts of ' + element - number_of_atoms[i].metadata.set_item("Sample.elements", - ([element])) - number_of_atoms[i].metadata.set_item( - "Sample.xray_lines", ([xray_line])) + number_of_atoms[i].metadata.General.title = "atom counts of " + element + number_of_atoms[i].metadata.set_item("Sample.elements", ([element])) + number_of_atoms[i].metadata.set_item("Sample.xray_lines", ([xray_line])) if plot_result and composition[i].axes_manager.navigation_size != 1: utils.plot.plot_signals(composition, **kwargs) if absorption_correction: - _logger.info(f'Convergence reached after {it} interations.') + _logger.info(f"Convergence reached after {it} interations.") - if method == 'zeta': - mass_thickness.metadata.General.title = 'Mass thickness' + if method == "zeta": + mass_thickness.metadata.General.title = "Mass thickness" self.metadata.set_item("Sample.mass_thickness", mass_thickness) return composition, mass_thickness - elif method == 'cross_section': + elif method == "cross_section": return composition, number_of_atoms - elif method == 'CL': + elif method == "CL": if absorption_correction: - mass_thickness.metadata.General.title = 'Mass thickness' + mass_thickness.metadata.General.title = "Mass thickness" return composition, mass_thickness else: return composition else: - raise ValueError('Please specify method for quantification, as ' - '"CL", "zeta" or "cross_section"') - + raise ValueError( + "Please specify method for quantification, as " + '"CL", "zeta" or "cross_section"' + ) def vacuum_mask(self, threshold=1.0, closing=True, opening=False): """ @@ -624,10 +628,13 @@ def vacuum_mask(self, threshold=1.0, closing=True, opening=False): array([False, False, False, True], dtype=bool) """ if self.axes_manager.navigation_dimension == 0: - raise RuntimeError('Navigation dimenstion must be higher than 0 ' - 'to estimate a vacuum mask.') + raise RuntimeError( + "Navigation dimenstion must be higher than 0 " + "to estimate a vacuum mask." + ) from scipy.ndimage import binary_dilation, binary_erosion - mask = (self.max(-1) <= threshold) + + mask = self.max(-1) <= threshold if closing: mask.data = binary_dilation(mask.data, border_value=0) mask.data = binary_erosion(mask.data, border_value=1) @@ -636,12 +643,14 @@ def vacuum_mask(self, threshold=1.0, closing=True, opening=False): mask.data = binary_dilation(mask.data, border_value=0) return mask - def decomposition(self, - normalize_poissonian_noise=True, - navigation_mask=1.0, - closing=True, - *args, - **kwargs): + def decomposition( + self, + normalize_poissonian_noise=True, + navigation_mask=1.0, + closing=True, + *args, + **kwargs, + ): """Apply a decomposition to a dataset with a choice of algorithms. The results are stored in ``self.learning_results``. @@ -751,12 +760,13 @@ def decomposition(self, navigation_mask = self.vacuum_mask(navigation_mask, closing) super().decomposition( normalize_poissonian_noise=normalize_poissonian_noise, - navigation_mask=navigation_mask, *args, **kwargs) - self.learning_results.loadings = np.nan_to_num( - self.learning_results.loadings) + navigation_mask=navigation_mask, + *args, + **kwargs, + ) + self.learning_results.loadings = np.nan_to_num(self.learning_results.loadings) - def create_model(self, auto_background=True, auto_add_lines=True, - *args, **kwargs): + def create_model(self, auto_background=True, auto_add_lines=True, *args, **kwargs): """Create a model for the current TEM EDS data. Parameters @@ -778,10 +788,14 @@ def create_model(self, auto_background=True, auto_add_lines=True, """ from exspy.models.edstemmodel import EDSTEMModel - model = EDSTEMModel(self, - auto_background=auto_background, - auto_add_lines=auto_add_lines, - *args, **kwargs) + + model = EDSTEMModel( + self, + auto_background=auto_background, + auto_add_lines=auto_add_lines, + *args, + **kwargs, + ) return model def get_probe_area(self, navigation_axes=None): @@ -817,22 +831,27 @@ def get_probe_area(self, navigation_axes=None): elif not isiterable(navigation_axes): navigation_axes = [navigation_axes] if len(navigation_axes) == 0: - raise ValueError("The navigation dimension is zero, the probe " - "area can not be calculated automatically.") + raise ValueError( + "The navigation dimension is zero, the probe " + "area can not be calculated automatically." + ) elif len(navigation_axes) > 2: - raise ValueError("The navigation axes corresponding to the probe " - "are ambiguous and the probe area can not be " - "calculated automatically.") + raise ValueError( + "The navigation axes corresponding to the probe " + "are ambiguous and the probe area can not be " + "calculated automatically." + ) scales = [] for axis in navigation_axes: try: if not isinstance(navigation_axes, DataAxis): axis = self.axes_manager[axis] - scales.append(axis.convert_to_units('nm', inplace=False)[0]) + scales.append(axis.convert_to_units("nm", inplace=False)[0]) except pint.DimensionalityError: - raise ValueError(f"The unit of the axis {axis} has not the " - "dimension of length.") + raise ValueError( + f"The unit of the axis {axis} has not the " "dimension of length." + ) if len(scales) == 1: probe_area = scales[0] ** 2 @@ -840,16 +859,18 @@ def get_probe_area(self, navigation_axes=None): probe_area = scales[0] * scales[1] if probe_area == 1: - warnings.warn("Please note that the probe area has been " - "calculated to be 1 nm², meaning that it is highly " - "likley that the scale of the navigation axes have not " - "been set correctly. Please read the user " - "guide for how to set this.") + warnings.warn( + "Please note that the probe area has been " + "calculated to be 1 nm², meaning that it is highly " + "likley that the scale of the navigation axes have not " + "been set correctly. Please read the user " + "guide for how to set this." + ) return probe_area - - def _get_dose(self, method, beam_current='auto', live_time='auto', - probe_area='auto'): + def _get_dose( + self, method, beam_current="auto", live_time="auto", probe_area="auto" + ): """ Calculates the total electron dose for the zeta-factor or cross section methods of quantification. @@ -887,34 +908,38 @@ def _get_dose(self, method, beam_current='auto', live_time='auto', parameters = self.metadata.Acquisition_instrument.TEM - if beam_current == 'auto': - beam_current = parameters.get_item('beam_current') + if beam_current == "auto": + beam_current = parameters.get_item("beam_current") if beam_current is None: - raise Exception('Electron dose could not be calculated as the ' - 'beam current is not set. It can set using ' - '`set_microscope_parameters()`.') + raise Exception( + "Electron dose could not be calculated as the " + "beam current is not set. It can set using " + "`set_microscope_parameters()`." + ) - if live_time == 'auto': - live_time = parameters.get_item('Detector.EDS.live_time') + if live_time == "auto": + live_time = parameters.get_item("Detector.EDS.live_time") if live_time is None: - raise Exception('Electron dose could not be calculated as ' - 'live time is not set. It can set using ' - '`set_microscope_parameters()`.') + raise Exception( + "Electron dose could not be calculated as " + "live time is not set. It can set using " + "`set_microscope_parameters()`." + ) - if method == 'cross_section': - if probe_area == 'auto': - probe_area = parameters.get_item('probe_area') + if method == "cross_section": + if probe_area == "auto": + probe_area = parameters.get_item("probe_area") if probe_area is None: probe_area = self.get_probe_area( - navigation_axes=self.axes_manager.navigation_axes) + navigation_axes=self.axes_manager.navigation_axes + ) return (live_time * beam_current * 1e-9) / (constants.e * probe_area) # 1e-9 is included here because the beam_current is in nA. - elif method == 'zeta': + elif method == "zeta": return live_time * beam_current * 1e-9 / constants.e else: raise Exception("Method need to be 'zeta' or 'cross_section'.") - @staticmethod def CL_get_mass_thickness(weight_percent, thickness): """ @@ -941,16 +966,21 @@ def CL_get_mass_thickness(weight_percent, thickness): else: thickness_map = thickness - elements = [intensity.metadata.Sample.elements[0] for intensity in weight_percent] + elements = [ + intensity.metadata.Sample.elements[0] for intensity in weight_percent + ] mass_thickness = np.zeros_like(weight_percent[0]) densities = np.array( - [elements_db[element]['Physical_properties']['density (g/cm^3)'] - for element in elements]) + [ + elements_db[element]["Physical_properties"]["density (g/cm^3)"] + for element in elements + ] + ) for density, element_composition in zip(densities, weight_percent): # convert composition from % to fraction: factor of 1E-2 # convert thickness from nm to m: factor of 1E-9 # convert density from g/cm3 to kg/m2: factor of 1E3 - elemental_mt = element_composition * thickness_map * density * 1E-8 + elemental_mt = element_composition * thickness_map * density * 1e-8 mass_thickness += elemental_mt return mass_thickness diff --git a/exspy/signals/eels.py b/exspy/signals/eels.py index d092f8e60..822c70924 100644 --- a/exspy/signals/eels.py +++ b/exspy/signals/eels.py @@ -28,7 +28,7 @@ import hyperspy.api as hs from hyperspy.signal import BaseSetMetadataItems, BaseSignal -from hyperspy._signals.signal1d import (Signal1D, LazySignal1D) +from hyperspy._signals.signal1d import Signal1D, LazySignal1D import hyperspy.axes from hyperspy.components1d import PowerLaw from hyperspy.misc.utils import display, isiterable, underline @@ -41,7 +41,7 @@ SPIKES_DIAGNOSIS_DOCSTRING, MASK_ZERO_LOSS_PEAK_WIDTH, SPIKES_REMOVAL_TOOL_DOCSTRING, - ) +) from hyperspy.docstrings.signal import ( SHOW_PROGRESSBAR_ARG, NUM_WORKERS_ARG, @@ -53,7 +53,10 @@ from exspy.docstrings.model import EELSMODEL_PARAMETERS from exspy.misc.elements import elements as elements_db from exspy.misc.eels.tools import get_edges_near_energy -from exspy.misc.eels.electron_inelastic_mean_free_path import iMFP_Iakoubovskii, iMFP_angular_correction +from exspy.misc.eels.electron_inelastic_mean_free_path import ( + iMFP_Iakoubovskii, + iMFP_angular_correction, +) from exspy.signal_tools import EdgesRange @@ -62,19 +65,13 @@ @add_gui_method(toolkey="exspy.microscope_parameters_EELS") class EELSTEMParametersUI(BaseSetMetadataItems): - convergence_angle = t.Float(t.Undefined, - label='Convergence semi-angle (mrad)') - beam_energy = t.Float(t.Undefined, - label='Beam energy (keV)') - collection_angle = t.Float(t.Undefined, - label='Collection semi-angle (mrad)') + convergence_angle = t.Float(t.Undefined, label="Convergence semi-angle (mrad)") + beam_energy = t.Float(t.Undefined, label="Beam energy (keV)") + collection_angle = t.Float(t.Undefined, label="Collection semi-angle (mrad)") mapping = { - 'Acquisition_instrument.TEM.convergence_angle': - 'convergence_angle', - 'Acquisition_instrument.TEM.beam_energy': - 'beam_energy', - 'Acquisition_instrument.TEM.Detector.EELS.collection_angle': - 'collection_angle', + "Acquisition_instrument.TEM.convergence_angle": "convergence_angle", + "Acquisition_instrument.TEM.beam_energy": "beam_energy", + "Acquisition_instrument.TEM.Detector.EELS.collection_angle": "collection_angle", } @@ -91,13 +88,12 @@ def __init__(self, *args, **kwargs): self.subshells = set() self.elements = set() self.edges = list() - if hasattr(self.metadata, 'Sample') and \ - hasattr(self.metadata.Sample, 'elements'): + if hasattr(self.metadata, "Sample") and hasattr( + self.metadata.Sample, "elements" + ): self.add_elements(self.metadata.Sample.elements) self.axes_manager.signal_axes[0].is_binned = True - self._edge_markers = {"names": [], - "lines": None, - "texts": None} + self._edge_markers = {"names": [], "lines": None, "texts": None} def add_elements(self, elements, include_pre_edges=False): """Declare the elemental composition of the sample. @@ -131,7 +127,8 @@ def add_elements(self, elements, include_pre_edges=False): "Input must be in the form of a tuple. For example, " "if `s` is the variable containing this EELS spectrum:\n " ">>> s.add_elements(('C',))\n" - "See the docstring for more information.") + "See the docstring for more information." + ) for element in elements: if isinstance(element, bytes): @@ -140,10 +137,10 @@ def add_elements(self, elements, include_pre_edges=False): self.elements.add(element) else: raise ValueError( - "%s is not a valid symbol of a chemical element" - % element) - if not hasattr(self.metadata, 'Sample'): - self.metadata.add_node('Sample') + "%s is not a valid symbol of a chemical element" % element + ) + if not hasattr(self.metadata, "Sample"): + self.metadata.add_node("Sample") self.metadata.Sample.elements = list(self.elements) if self.elements: self.generate_subshells(include_pre_edges) @@ -163,24 +160,30 @@ def generate_subshells(self, include_pre_edges=False): if not include_pre_edges: start_energy = Eaxis[0] else: - start_energy = 0. + start_energy = 0.0 end_energy = Eaxis[-1] for element in self.elements: e_shells = list() - for shell in elements_db[element][ - 'Atomic_properties']['Binding_energies']: - if shell[-1] != 'a': - energy = (elements_db[element]['Atomic_properties'] - ['Binding_energies'][shell]['onset_energy (eV)']) + for shell in elements_db[element]["Atomic_properties"]["Binding_energies"]: + if shell[-1] != "a": + energy = elements_db[element]["Atomic_properties"][ + "Binding_energies" + ][shell]["onset_energy (eV)"] if start_energy <= energy <= end_energy: - subshell = '%s_%s' % (element, shell) + subshell = "%s_%s" % (element, shell) if subshell not in self.subshells: - self.subshells.add( - '%s_%s' % (element, shell)) + self.subshells.add("%s_%s" % (element, shell)) e_shells.append(subshell) - def edges_at_energy(self, energy='interactive', width=10, only_major=False, - order='closest', display=True, toolkit=None): + def edges_at_energy( + self, + energy="interactive", + width=10, + only_major=False, + order="closest", + display=True, + toolkit=None, + ): """Show EELS edges according to an energy range selected from the spectrum or within a provided energy window @@ -208,15 +211,16 @@ def edges_at_energy(self, energy='interactive', width=10, only_major=False, table or ASCII table, depends on the environment. """ - if energy == 'interactive': + if energy == "interactive": er = EdgesRange(self, interactive=True) return er.gui(display=display, toolkit=toolkit) else: self.print_edges_near_energy(energy, width, only_major, order) @staticmethod - def print_edges_near_energy(energy=None, width=10, only_major=False, - order='closest', edges=None): + def print_edges_near_energy( + energy=None, width=10, only_major=False, order="closest", edges=None + ): """Find and print a table of edges near a given energy that are within the given energy window. @@ -245,28 +249,26 @@ def print_edges_near_energy(energy=None, width=10, only_major=False, """ if edges is None and energy is not None: - edges = get_edges_near_energy(energy=energy, width=width, - only_major=only_major, order=order) + edges = get_edges_near_energy( + energy=energy, width=width, only_major=only_major, order=order + ) elif edges is None and energy is None: - raise ValueError('Either energy or edges should be provided.') + raise ValueError("Either energy or edges should be provided.") table = PrettyTable() - table.field_names = [ - 'edge', - 'onset energy (eV)', - 'relevance', - 'description'] + table.field_names = ["edge", "onset energy (eV)", "relevance", "description"] for edge in edges: - element, shell = edge.split('_') - shell_dict = elements_db[element]['Atomic_properties'][ - 'Binding_energies'][shell] + element, shell = edge.split("_") + shell_dict = elements_db[element]["Atomic_properties"]["Binding_energies"][ + shell + ] - onset = shell_dict['onset_energy (eV)'] - relevance = shell_dict['relevance'] - threshold = shell_dict['threshold'] - edge_ = shell_dict['edge'] - description = threshold + '. '*(threshold !='' and edge_ !='') + edge_ + onset = shell_dict["onset_energy (eV)"] + relevance = shell_dict["relevance"] + threshold = shell_dict["threshold"] + edge_ = shell_dict["edge"] + description = threshold + ". " * (threshold != "" and edge_ != "") + edge_ table.add_row([edge, onset, relevance, description]) @@ -319,16 +321,17 @@ def estimate_zero_loss_peak_centre(self, mask=None): return zlpc def align_zero_loss_peak( - self, - calibrate=True, - also_align=[], - print_stats=True, - subpixel=True, - mask=None, - signal_range=None, - show_progressbar=None, - crop=True, - **kwargs): + self, + calibrate=True, + also_align=[], + print_stats=True, + subpixel=True, + mask=None, + signal_range=None, + show_progressbar=None, + crop=True, + **kwargs, + ): """Align the zero-loss peak. This function first aligns the spectra using the result of @@ -401,8 +404,10 @@ def align_zero_loss_peak( def substract_from_offset(value, signals): # Test that axes is uniform if not self.axes_manager[-1].is_uniform: - raise NotImplementedError("Support for EELS signals with " - "non-uniform signal axes is not yet implemented.") + raise NotImplementedError( + "Support for EELS signals with " + "non-uniform signal axes is not yet implemented." + ) if isinstance(value, da.Array): value = value.compute() for signal in signals: @@ -411,14 +416,16 @@ def substract_from_offset(value, signals): def estimate_zero_loss_peak_centre(s, mask, signal_range): if signal_range: - zlpc = s.isig[signal_range[0]:signal_range[1]].\ - estimate_zero_loss_peak_centre(mask=mask) + zlpc = s.isig[ + signal_range[0] : signal_range[1] + ].estimate_zero_loss_peak_centre(mask=mask) else: zlpc = s.estimate_zero_loss_peak_centre(mask=mask) return zlpc zlpc = estimate_zero_loss_peak_centre( - self, mask=mask, signal_range=signal_range) + self, mask=mask, signal_range=signal_range + ) mean_ = np.nanmean(zlpc.data) @@ -433,26 +440,31 @@ def estimate_zero_loss_peak_centre(s, mask, signal_range): # axes_manager of the signal later in the workflow may result in # a wrong shift_array shift_array = shift_array.compute() - signal.shift1D( - shift_array, crop=crop, show_progressbar=show_progressbar) + signal.shift1D(shift_array, crop=crop, show_progressbar=show_progressbar) if calibrate is True: zlpc = estimate_zero_loss_peak_centre( - self, mask=mask, signal_range=signal_range) - substract_from_offset(np.nanmean(zlpc.data), - also_align + [self]) + self, mask=mask, signal_range=signal_range + ) + substract_from_offset(np.nanmean(zlpc.data), also_align + [self]) if subpixel is False: return - left, right = -3., 3. + left, right = -3.0, 3.0 if calibrate is False: left += mean_ right += mean_ - left = (left if left > self.axes_manager[-1].axis[0] - else self.axes_manager[-1].axis[0]) - right = (right if right < self.axes_manager[-1].axis[-1] - else self.axes_manager[-1].axis[-1]) + left = ( + left + if left > self.axes_manager[-1].axis[0] + else self.axes_manager[-1].axis[0] + ) + right = ( + right + if right < self.axes_manager[-1].axis[-1] + else self.axes_manager[-1].axis[-1] + ) if self.axes_manager.navigation_size > 1: self.align1D( @@ -462,16 +474,17 @@ def estimate_zero_loss_peak_centre(s, mask, signal_range): show_progressbar=show_progressbar, mask=mask, crop=crop, - **kwargs) + **kwargs, + ) if calibrate is True: zlpc = estimate_zero_loss_peak_centre( - self, mask=mask, signal_range=signal_range) - substract_from_offset(np.nanmean(zlpc.data), - also_align + [self]) + self, mask=mask, signal_range=signal_range + ) + substract_from_offset(np.nanmean(zlpc.data), also_align + [self]) + align_zero_loss_peak.__doc__ %= (SHOW_PROGRESSBAR_ARG, CROP_PARAMETER_DOC) - def get_zero_loss_peak_mask(self, zero_loss_peak_mask_width=5.0, - signal_mask=None): + def get_zero_loss_peak_mask(self, zero_loss_peak_mask_width=5.0, signal_mask=None): """Return boolean array with True value at the position of the zero loss peak. This mask can be used to restrict operation to the signal locations not marked as True (masked). @@ -487,7 +500,7 @@ def get_zero_loss_peak_mask(self, zero_loss_peak_mask_width=5.0, bool array """ zlpc = self.estimate_zero_loss_peak_centre() - (signal_axis, ) = self.axes_manager[self.axes_manager.signal_axes] + (signal_axis,) = self.axes_manager[self.axes_manager.signal_axes] axis = signal_axis.axis mini_value = zlpc.data.mean() - zero_loss_peak_mask_width / 2 maxi_value = zlpc.data.mean() + zero_loss_peak_mask_width / 2 @@ -498,43 +511,64 @@ def get_zero_loss_peak_mask(self, zero_loss_peak_mask_width=5.0, signal_mask = mask return signal_mask - get_zero_loss_peak_mask.__doc__ %= (SIGNAL_MASK_ARG) + get_zero_loss_peak_mask.__doc__ %= SIGNAL_MASK_ARG - def spikes_diagnosis(self, signal_mask=None, navigation_mask=None, - zero_loss_peak_mask_width=None, **kwargs): + def spikes_diagnosis( + self, + signal_mask=None, + navigation_mask=None, + zero_loss_peak_mask_width=None, + **kwargs, + ): if zero_loss_peak_mask_width is not None: - signal_mask = self.get_zero_loss_peak_mask(zero_loss_peak_mask_width, - signal_mask) - super().spikes_diagnosis(signal_mask=signal_mask, navigation_mask=None, - **kwargs) + signal_mask = self.get_zero_loss_peak_mask( + zero_loss_peak_mask_width, signal_mask + ) + super().spikes_diagnosis( + signal_mask=signal_mask, navigation_mask=None, **kwargs + ) spikes_diagnosis.__doc__ = SPIKES_DIAGNOSIS_DOCSTRING % MASK_ZERO_LOSS_PEAK_WIDTH - def spikes_removal_tool(self, signal_mask=None, - navigation_mask=None, - threshold='auto', - zero_loss_peak_mask_width=None, - interactive=True, - display=True, - toolkit=None): + def spikes_removal_tool( + self, + signal_mask=None, + navigation_mask=None, + threshold="auto", + zero_loss_peak_mask_width=None, + interactive=True, + display=True, + toolkit=None, + ): if zero_loss_peak_mask_width is not None: axis = self.axes_manager.signal_axes[0].axis # check the zero_loss is in the signal - if (axis[0] - zero_loss_peak_mask_width / 2 > 0 or - axis[-1] + zero_loss_peak_mask_width / 2 < 0): + if ( + axis[0] - zero_loss_peak_mask_width / 2 > 0 + or axis[-1] + zero_loss_peak_mask_width / 2 < 0 + ): raise ValueError("The zero loss peaks isn't in the energy range.") - signal_mask = self.get_zero_loss_peak_mask(zero_loss_peak_mask_width, - signal_mask) - super().spikes_removal_tool(signal_mask=signal_mask, - navigation_mask=navigation_mask, - threshold=threshold, - interactive=interactive, - display=display, toolkit=toolkit) + signal_mask = self.get_zero_loss_peak_mask( + zero_loss_peak_mask_width, signal_mask + ) + super().spikes_removal_tool( + signal_mask=signal_mask, + navigation_mask=navigation_mask, + threshold=threshold, + interactive=interactive, + display=display, + toolkit=toolkit, + ) + spikes_removal_tool.__doc__ = SPIKES_REMOVAL_TOOL_DOCSTRING % ( - SIGNAL_MASK_ARG, NAVIGATION_MASK_ARG, MASK_ZERO_LOSS_PEAK_WIDTH, DISPLAY_DT, TOOLKIT_DT,) + SIGNAL_MASK_ARG, + NAVIGATION_MASK_ARG, + MASK_ZERO_LOSS_PEAK_WIDTH, + DISPLAY_DT, + TOOLKIT_DT, + ) - def estimate_elastic_scattering_intensity( - self, threshold, show_progressbar=None): + def estimate_elastic_scattering_intensity(self, threshold, show_progressbar=None): """Rough estimation of the elastic scattering intensity by truncation of a EELS low-loss spectrum. @@ -588,31 +622,32 @@ def estimating_function(data, threshold=None): return data.sum() else: from scipy.integrate import simps + axis = ax.axis[:ind] return simps(y=data, x=axis) - I0 = self.map(estimating_function, threshold=threshold, - ragged=False, show_progressbar=show_progressbar, - inplace=False) - I0.metadata.General.title = ( - self.metadata.General.title + ' elastic intensity') + I0 = self.map( + estimating_function, + threshold=threshold, + ragged=False, + show_progressbar=show_progressbar, + inplace=False, + ) + I0.metadata.General.title = self.metadata.General.title + " elastic intensity" I0.set_signal_type("") - if self.tmp_parameters.has_item('filename'): + if self.tmp_parameters.has_item("filename"): I0.tmp_parameters.filename = ( - self.tmp_parameters.filename + - '_elastic_intensity') + self.tmp_parameters.filename + "_elastic_intensity" + ) I0.tmp_parameters.folder = self.tmp_parameters.folder - I0.tmp_parameters.extension = \ - self.tmp_parameters.extension + I0.tmp_parameters.extension = self.tmp_parameters.extension return I0 + estimate_elastic_scattering_intensity.__doc__ %= SHOW_PROGRESSBAR_ARG - def estimate_elastic_scattering_threshold(self, - window=10., - tol=None, - window_length=5, - polynomial_order=3, - start=1.): + def estimate_elastic_scattering_threshold( + self, window=10.0, tol=None, window_length=5, polynomial_order=3, start=1.0 + ): """Calculate the first inflexion point of the spectrum derivative within a window. @@ -679,15 +714,16 @@ def estimate_elastic_scattering_threshold(self, # Progress Bar axis = self.axes_manager.signal_axes[0] - min_index, max_index = axis.value_range_to_indices(start, - start + window) + min_index, max_index = axis.value_range_to_indices(start, start + window) if max_index < min_index + 10: raise ValueError("Please select a bigger window") s = self.isig[min_index:max_index].deepcopy() if window_length: - s.smooth_savitzky_golay(polynomial_order=polynomial_order, - window_length=window_length, - differential_order=1) + s.smooth_savitzky_golay( + polynomial_order=polynomial_order, + window_length=window_length, + differential_order=1, + ) else: s = s.derivative(-1) if tol is None: @@ -706,26 +742,28 @@ def estimate_elastic_scattering_threshold(self, if np.isnan(threshold.data).any(): _logger.warning( "No inflexion point could be found in some positions " - "that have been marked with nans.") + "that have been marked with nans." + ) # Create spectrum image, stop and return value threshold.metadata.General.title = ( - self.metadata.General.title + - ' elastic scattering threshold') - if self.tmp_parameters.has_item('filename'): + self.metadata.General.title + " elastic scattering threshold" + ) + if self.tmp_parameters.has_item("filename"): threshold.tmp_parameters.filename = ( - self.tmp_parameters.filename + - '_elastic_scattering_threshold') + self.tmp_parameters.filename + "_elastic_scattering_threshold" + ) threshold.tmp_parameters.folder = self.tmp_parameters.folder - threshold.tmp_parameters.extension = \ - self.tmp_parameters.extension + threshold.tmp_parameters.extension = self.tmp_parameters.extension threshold.set_signal_type("") return threshold - def estimate_thickness(self, - threshold=None, - zlp=None, - density=None, - mean_free_path=None,): + def estimate_thickness( + self, + threshold=None, + zlp=None, + density=None, + mean_free_path=None, + ): """Estimates the thickness (relative and absolute) of a sample using the log-ratio method. @@ -770,13 +808,15 @@ def estimate_thickness(self, axis = self.axes_manager.signal_axes[0] total_intensity = self.integrate1D(axis.index_in_array).data if threshold is None and zlp is None: - raise ValueError("Please provide one of the following keywords: " - "`threshold`, `zlp`") + raise ValueError( + "Please provide one of the following keywords: " "`threshold`, `zlp`" + ) if zlp is not None: I0 = zlp.integrate1D(axis.index_in_array).data else: I0 = self.estimate_elastic_scattering_intensity( - threshold=threshold,).data + threshold=threshold, + ).data t_over_lambda = np.log(total_intensity / I0) @@ -798,44 +838,40 @@ def estimate_thickness(self, if mean_free_path is None: mean_free_path = iMFP_Iakoubovskii( electron_energy=self.metadata.Acquisition_instrument.TEM.beam_energy, - density=density) + density=density, + ) _logger.info(f"The estimated iMFP is {mean_free_path} nm") else: _logger.warning( "Computing the thickness without taking into account the effect of " "the limited collection angle, what usually leads to underestimating " "the thickness. To perform the angular corrections you must provide " - "the density of the material.") + "the density of the material." + ) s = self._get_navigation_signal(data=t_over_lambda) if mean_free_path is not None: s.data *= mean_free_path - s.metadata.General.title = ( - self.metadata.General.title + - ' thickness (nm)') + s.metadata.General.title = self.metadata.General.title + " thickness (nm)" s.metadata.Signal.quantity = "thickness (nm)" else: _logger.warning( "Computing the relative thickness. To compute the absolute " - "thickness provide the `mean_free_path` and/or the `density`") - s.metadata.General.title = (self.metadata.General.title + - ' $\\frac{t}{\\lambda}$') + "thickness provide the `mean_free_path` and/or the `density`" + ) + s.metadata.General.title = ( + self.metadata.General.title + " $\\frac{t}{\\lambda}$" + ) s.metadata.Signal.quantity = "$\\frac{t}{\\lambda}$" - if self.tmp_parameters.has_item('filename'): - s.tmp_parameters.filename = ( - self.tmp_parameters.filename + - '_thickness') + if self.tmp_parameters.has_item("filename"): + s.tmp_parameters.filename = self.tmp_parameters.filename + "_thickness" s.tmp_parameters.folder = self.tmp_parameters.folder - s.tmp_parameters.extension = \ - self.tmp_parameters.extension + s.tmp_parameters.extension = self.tmp_parameters.extension s = s.transpose(signal_axes=[]) s.set_signal_type("") return s - def fourier_log_deconvolution(self, - zlp, - add_zlp=False, - crop=False): + def fourier_log_deconvolution(self, zlp, add_zlp=False, crop=False): """Performs fourier-log deconvolution. Parameters @@ -868,7 +904,8 @@ def fourier_log_deconvolution(self, self._check_signal_dimension_equals_one() if not self.axes_manager.signal_axes[0].is_uniform: raise NotImplementedError( - "This operation is not yet implemented for non-uniform energy axes") + "This operation is not yet implemented for non-uniform energy axes" + ) s = self.deepcopy() zlp_size = zlp.axes_manager.signal_axes[0].size self_size = self.axes_manager.signal_axes[0].size @@ -876,7 +913,7 @@ def fourier_log_deconvolution(self, # Conservative new size to solve the wrap-around problem size = zlp_size + self_size - 1 # Calculate optimal FFT padding for performance - complex_result = (zlp.data.dtype.kind == 'c' or s.data.dtype.kind == 'c') + complex_result = zlp.data.dtype.kind == "c" or s.data.dtype.kind == "c" size = optimal_fft_size(size, not complex_result) axis = self.axes_manager.signal_axes[0] @@ -886,46 +923,69 @@ def fourier_log_deconvolution(self, if self._lazy or zlp._lazy: j1 = z * da.log(j / z).map_blocks(np.nan_to_num) else: - j1 = z * np.nan_to_num(np.log(j / z)) sdata = np.fft.irfft(j1, axis=axis.index_in_array) - s.data = sdata[s.axes_manager._get_data_slice( - [(axis.index_in_array, slice(None, self_size)), ])] + s.data = sdata[ + s.axes_manager._get_data_slice( + [ + (axis.index_in_array, slice(None, self_size)), + ] + ) + ] if add_zlp is True: if self_size >= zlp_size: if self._lazy: _slices_before = s.axes_manager._get_data_slice( - [(axis.index_in_array, slice(None, zlp_size)), ]) + [ + (axis.index_in_array, slice(None, zlp_size)), + ] + ) _slices_after = s.axes_manager._get_data_slice( - [(axis.index_in_array, slice(zlp_size, None)), ]) - s.data = da.stack((s.data[_slices_before] + zlp.data, - s.data[_slices_after]), - axis=axis.index_in_array) + [ + (axis.index_in_array, slice(zlp_size, None)), + ] + ) + s.data = da.stack( + (s.data[_slices_before] + zlp.data, s.data[_slices_after]), + axis=axis.index_in_array, + ) else: - s.data[s.axes_manager._get_data_slice( - [(axis.index_in_array, slice(None, zlp_size)), ]) + s.data[ + s.axes_manager._get_data_slice( + [ + (axis.index_in_array, slice(None, zlp_size)), + ] + ) ] += zlp.data else: - s.data += zlp.data[s.axes_manager._get_data_slice( - [(axis.index_in_array, slice(None, self_size)), ])] - - s.metadata.General.title = (s.metadata.General.title + - ' after Fourier-log deconvolution') - if s.tmp_parameters.has_item('filename'): + s.data += zlp.data[ + s.axes_manager._get_data_slice( + [ + (axis.index_in_array, slice(None, self_size)), + ] + ) + ] + + s.metadata.General.title = ( + s.metadata.General.title + " after Fourier-log deconvolution" + ) + if s.tmp_parameters.has_item("filename"): s.tmp_parameters.filename = ( - self.tmp_parameters.filename + - '_after_fourier_log_deconvolution') + self.tmp_parameters.filename + "_after_fourier_log_deconvolution" + ) if crop is True: - s.crop(axis.index_in_axes_manager, - None, int(-tapped_channels)) + s.crop(axis.index_in_axes_manager, None, int(-tapped_channels)) return s - def fourier_ratio_deconvolution(self, ll, - fwhm=None, - threshold=None, - extrapolate_lowloss=True, - extrapolate_coreloss=True): + def fourier_ratio_deconvolution( + self, + ll, + fwhm=None, + threshold=None, + extrapolate_lowloss=True, + extrapolate_coreloss=True, + ): """Performs Fourier-ratio deconvolution. The core-loss should have the background removed. To reduce the noise @@ -962,27 +1022,25 @@ def fourier_ratio_deconvolution(self, ll, self._check_signal_dimension_equals_one() if not self.axes_manager.signal_axes[0].is_uniform: raise NotImplementedError( - "This operation is not yet implemented for non-uniform energy axes.") + "This operation is not yet implemented for non-uniform energy axes." + ) if not ll.axes_manager.signal_axes[0].is_uniform: raise NotImplementedError( "The low-loss energy axis is non-uniform. " - "This operation is not yet implemented for non-uniform energy axes") + "This operation is not yet implemented for non-uniform energy axes" + ) orig_cl_size = self.axes_manager.signal_axes[0].size if threshold is None: threshold = ll.estimate_elastic_scattering_threshold() if extrapolate_coreloss is True: - cl = self.power_law_extrapolation( - window_size=20, - extrapolation_size=100) + cl = self.power_law_extrapolation(window_size=20, extrapolation_size=100) else: cl = self.deepcopy() if extrapolate_lowloss is True: - ll = ll.power_law_extrapolation( - window_size=100, - extrapolation_size=100) + ll = ll.power_law_extrapolation(window_size=100, extrapolation_size=100) else: ll = ll.deepcopy() @@ -998,7 +1056,9 @@ def fourier_ratio_deconvolution(self, ll, axis = ll.axes_manager.signal_axes[0] if fwhm is None: - fwhm = float(ll.get_current_signal().estimate_peak_width()._get_current_data()) + fwhm = float( + ll.get_current_signal().estimate_peak_width()._get_current_data() + ) _logger.info("FWHM = %1.2f" % fwhm) I0 = ll.estimate_elastic_scattering_intensity(threshold=threshold) @@ -1009,34 +1069,36 @@ def fourier_ratio_deconvolution(self, ll, I0 = I0.reshape(I0_shape) from hyperspy.components1d import Gaussian + g = Gaussian() g.sigma.value = fwhm / 2.3548 g.A.value = 1 g.centre.value = 0 zl = g.function( - np.linspace(axis.offset, - axis.offset + axis.scale * (size - 1), - size)) + np.linspace(axis.offset, axis.offset + axis.scale * (size - 1), size) + ) z = np.fft.rfft(zl) jk = np.fft.rfft(cl.data, n=size, axis=axis.index_in_array) jl = np.fft.rfft(ll.data, n=size, axis=axis.index_in_array) - zshape = [1, ] * len(cl.data.shape) + zshape = [ + 1, + ] * len(cl.data.shape) zshape[axis.index_in_array] = jk.shape[axis.index_in_array] - cl.data = np.fft.irfft(z.reshape(zshape) * jk / jl, - axis=axis.index_in_array) + cl.data = np.fft.irfft(z.reshape(zshape) * jk / jl, axis=axis.index_in_array) cl.data *= I0 cl.crop(-1, None, int(orig_cl_size)) - cl.metadata.General.title = (self.metadata.General.title + - ' after Fourier-ratio deconvolution') - if cl.tmp_parameters.has_item('filename'): + cl.metadata.General.title = ( + self.metadata.General.title + " after Fourier-ratio deconvolution" + ) + if cl.tmp_parameters.has_item("filename"): cl.tmp_parameters.filename = ( - self.tmp_parameters.filename + - 'after_fourier_ratio_deconvolution') + self.tmp_parameters.filename + "after_fourier_ratio_deconvolution" + ) return cl - def richardson_lucy_deconvolution(self, psf, iterations=15, - show_progressbar=None, - num_workers=None): + def richardson_lucy_deconvolution( + self, psf, iterations=15, show_progressbar=None, num_workers=None + ): """1D Richardson-Lucy Poissonian deconvolution of the spectrum by the given kernel. @@ -1067,7 +1129,8 @@ def richardson_lucy_deconvolution(self, psf, iterations=15, """ if not self.axes_manager.signal_axes[0].is_uniform: raise NotImplementedError( - "This operation is not yet implemented for non-uniform energy axes.") + "This operation is not yet implemented for non-uniform energy axes." + ) if show_progressbar is None: show_progressbar = hs.preferences.General.show_progressbar self._check_signal_dimension_equals_one() @@ -1075,27 +1138,33 @@ def richardson_lucy_deconvolution(self, psf, iterations=15, maxval = self.axes_manager.navigation_size show_progressbar = show_progressbar and (maxval > 0) - def deconv_function(signal, kernel=None, - iterations=15, psf_size=None): + def deconv_function(signal, kernel=None, iterations=15, psf_size=None): imax = kernel.argmax() result = np.array(signal).copy() mimax = psf_size - 1 - imax for _ in range(iterations): - first = np.convolve(kernel, result)[imax: imax + psf_size] - result *= np.convolve(kernel[::-1], signal / - first)[mimax:mimax + psf_size] + first = np.convolve(kernel, result)[imax : imax + psf_size] + result *= np.convolve(kernel[::-1], signal / first)[ + mimax : mimax + psf_size + ] return result - ds = self.map(deconv_function, kernel=psf, iterations=iterations, - psf_size=psf_size, show_progressbar=show_progressbar, - num_workers=num_workers, ragged=False, inplace=False) + ds = self.map( + deconv_function, + kernel=psf, + iterations=iterations, + psf_size=psf_size, + show_progressbar=show_progressbar, + num_workers=num_workers, + ragged=False, + inplace=False, + ) ds.metadata.General.title += ( - ' after Richardson-Lucy deconvolution %i iterations' % - iterations) - if ds.tmp_parameters.has_item('filename'): - ds.tmp_parameters.filename += ( - '_after_R-L_deconvolution_%iiter' % iterations) + " after Richardson-Lucy deconvolution %i iterations" % iterations + ) + if ds.tmp_parameters.has_item("filename"): + ds.tmp_parameters.filename += "_after_R-L_deconvolution_%iiter" % iterations return ds richardson_lucy_deconvolution.__doc__ %= (SHOW_PROGRESSBAR_ARG, NUM_WORKERS_ARG) @@ -1108,14 +1177,14 @@ def _are_microscope_parameters_missing(self, ignore_parameters=[]): The `ignore_parameters` list can be to ignore parameters. """ must_exist = ( - 'Acquisition_instrument.TEM.convergence_angle', - 'Acquisition_instrument.TEM.beam_energy', - 'Acquisition_instrument.TEM.Detector.EELS.collection_angle',) + "Acquisition_instrument.TEM.convergence_angle", + "Acquisition_instrument.TEM.beam_energy", + "Acquisition_instrument.TEM.Detector.EELS.collection_angle", + ) missing_parameters = [] for item in must_exist: exists = self.metadata.has_item(item) - if exists is False and item.split( - '.')[-1] not in ignore_parameters: + if exists is False and item.split(".")[-1] not in ignore_parameters: missing_parameters.append(item) if missing_parameters: _logger.info("Missing parameters {}".format(missing_parameters)) @@ -1123,12 +1192,14 @@ def _are_microscope_parameters_missing(self, ignore_parameters=[]): else: return False - def set_microscope_parameters(self, - beam_energy=None, - convergence_angle=None, - collection_angle=None, - toolkit=None, - display=True): + def set_microscope_parameters( + self, + beam_energy=None, + convergence_angle=None, + collection_angle=None, + toolkit=None, + display=True, + ): if set((beam_energy, convergence_angle, collection_angle)) == {None}: tem_par = EELSTEMParametersUI(self) return tem_par.gui(toolkit=toolkit, display=display) @@ -1137,14 +1208,15 @@ def set_microscope_parameters(self, mp.set_item("Acquisition_instrument.TEM.beam_energy", beam_energy) if convergence_angle is not None: mp.set_item( - "Acquisition_instrument.TEM.convergence_angle", - convergence_angle) + "Acquisition_instrument.TEM.convergence_angle", convergence_angle + ) if collection_angle is not None: mp.set_item( "Acquisition_instrument.TEM.Detector.EELS.collection_angle", - collection_angle) - set_microscope_parameters.__doc__ = \ - """ + collection_angle, + ) + + set_microscope_parameters.__doc__ = """ Set the microscope parameters that are necessary to calculate the GOS. @@ -1159,13 +1231,13 @@ def set_microscope_parameters(self, The collection semi-angle in mrad. {} {} - """.format(TOOLKIT_DT, DISPLAY_DT) + """.format( + TOOLKIT_DT, DISPLAY_DT + ) - def power_law_extrapolation(self, - window_size=20, - extrapolation_size=1024, - add_noise=False, - fix_neg_r=False): + def power_law_extrapolation( + self, window_size=20, extrapolation_size=1024, add_noise=False, fix_neg_r=False + ): """ Extrapolate the spectrum to the right using a powerlaw. @@ -1192,12 +1264,11 @@ def power_law_extrapolation(self, self._check_signal_dimension_equals_one() axis = self.axes_manager.signal_axes[0] s = self.deepcopy() - s.metadata.General.title += ( - ' %i channels extrapolated' % - extrapolation_size) - if s.tmp_parameters.has_item('filename'): + s.metadata.General.title += " %i channels extrapolated" % extrapolation_size + if s.tmp_parameters.has_item("filename"): s.tmp_parameters.filename += ( - '_%i_channels_extrapolated' % extrapolation_size) + "_%i_channels_extrapolated" % extrapolation_size + ) new_shape = list(self.data.shape) new_shape[axis.index_in_array] += extrapolation_size if self._lazy: @@ -1205,17 +1276,17 @@ def power_law_extrapolation(self, right_shape = list(self.data.shape) right_shape[axis.index_in_array] = extrapolation_size right_chunks = list(self.data.chunks) - right_chunks[axis.index_in_array] = (extrapolation_size, ) + right_chunks[axis.index_in_array] = (extrapolation_size,) right_data = da.zeros( shape=tuple(right_shape), chunks=tuple(right_chunks), - dtype=self.data.dtype) - s.data = da.concatenate( - [left_data, right_data], axis=axis.index_in_array) + dtype=self.data.dtype, + ) + s.data = da.concatenate([left_data, right_data], axis=axis.index_in_array) else: # just old code s.data = np.zeros(new_shape) - s.data[..., :axis.size] = self.data + s.data[..., : axis.size] = self.data s.get_dimensions_from_data() pl = PowerLaw() pl._axes_manager = self.axes_manager @@ -1223,7 +1294,8 @@ def power_law_extrapolation(self, s, axis.index2value(axis.size - window_size), axis.index2value(axis.size - 1), - out=True) + out=True, + ) if fix_neg_r is True: A = np.where(r <= 0, 0, A) # If the signal is binned we need to bin the extrapolated power law @@ -1239,31 +1311,29 @@ def power_law_extrapolation(self, rightslice = (..., None) axisslice = (None, slice(axis.size, None)) else: - rightslice = (..., ) - axisslice = (slice(axis.size, None), ) + rightslice = (...,) + axisslice = (slice(axis.size, None),) right_chunks[axis.index_in_array] = 1 x = da.from_array( s.axes_manager.signal_axes[0].axis[axisslice], - chunks=(extrapolation_size, )) + chunks=(extrapolation_size,), + ) A = A[rightslice] r = r[rightslice] - right_data = factor * A * x**(-r) - s.data = da.concatenate( - [left_data, right_data], axis=axis.index_in_array) + right_data = factor * A * x ** (-r) + s.data = da.concatenate([left_data, right_data], axis=axis.index_in_array) else: - s.data[..., axis.size:] = ( - factor * A[..., np.newaxis] * - s.axes_manager.signal_axes[0].axis[np.newaxis, axis.size:]**( - -r[..., np.newaxis])) + s.data[..., axis.size :] = ( + factor + * A[..., np.newaxis] + * s.axes_manager.signal_axes[0].axis[np.newaxis, axis.size :] + ** (-r[..., np.newaxis]) + ) return s - def kramers_kronig_analysis(self, - zlp=None, - iterations=1, - n=None, - t=None, - delta=0.5, - full_output=False): + def kramers_kronig_analysis( + self, zlp=None, iterations=1, n=None, t=None, delta=0.5, full_output=False + ): r""" Calculate the complex dielectric function from a single scattering distribution (SSD) using the Kramers-Kronig relations. @@ -1358,77 +1428,86 @@ def kramers_kronig_analysis(self, """ if not self.axes_manager.signal_axes[0].is_uniform: raise NotImplementedError( - "This operation is not yet implemented for non-uniform energy axes.") + "This operation is not yet implemented for non-uniform energy axes." + ) output = {} if iterations == 1: # In this case s.data is not modified so there is no need to make # a deep copy. - s = self.isig[0.:] + s = self.isig[0.0:] else: - s = self.isig[0.:].deepcopy() + s = self.isig[0.0:].deepcopy() - sorig = self.isig[0.:] + sorig = self.isig[0.0:] # Avoid singularity at 0 if s.axes_manager.signal_axes[0].axis[0] == 0: s = s.isig[1:] sorig = self.isig[1:] # Constants and units - me = constants.value( - 'electron mass energy equivalent in MeV') * 1e3 # keV + me = constants.value("electron mass energy equivalent in MeV") * 1e3 # keV # Mapped parameters - self._are_microscope_parameters_missing( - ignore_parameters=['convergence_angle']) + self._are_microscope_parameters_missing(ignore_parameters=["convergence_angle"]) e0 = s.metadata.Acquisition_instrument.TEM.beam_energy - beta = s.metadata.Acquisition_instrument.TEM.Detector.EELS.\ - collection_angle + beta = s.metadata.Acquisition_instrument.TEM.Detector.EELS.collection_angle axis = s.axes_manager.signal_axes[0] eaxis = axis.axis.copy() if isinstance(zlp, hyperspy.signal.BaseSignal): - if (zlp.axes_manager.navigation_dimension == - self.axes_manager.navigation_dimension): + if ( + zlp.axes_manager.navigation_dimension + == self.axes_manager.navigation_dimension + ): if zlp.axes_manager.signal_dimension == 0: i0 = zlp.data else: i0 = zlp.integrate1D(axis.index_in_axes_manager).data else: - raise ValueError('The ZLP signal dimensions are not ' - 'compatible with the dimensions of the ' - 'low-loss signal') + raise ValueError( + "The ZLP signal dimensions are not " + "compatible with the dimensions of the " + "low-loss signal" + ) # The following prevents errors if the signal is a single spectrum if len(i0) != 1: - i0 = i0.reshape( - np.insert(i0.shape, axis.index_in_array, 1)) + i0 = i0.reshape(np.insert(i0.shape, axis.index_in_array, 1)) elif isinstance(zlp, numbers.Number): i0 = zlp else: - raise ValueError('The zero-loss peak input is not valid, it must be\ - in the BaseSignal class or a Number.') + raise ValueError( + "The zero-loss peak input is not valid, it must be\ + in the BaseSignal class or a Number." + ) if isinstance(t, hyperspy.signal.BaseSignal): - if (t.axes_manager.navigation_dimension == - self.axes_manager.navigation_dimension) and ( - t.axes_manager.signal_dimension == 0): + if ( + t.axes_manager.navigation_dimension + == self.axes_manager.navigation_dimension + ) and (t.axes_manager.signal_dimension == 0): t = t.data - t = t.reshape( - np.insert(t.shape, axis.index_in_array, 1)) + t = t.reshape(np.insert(t.shape, axis.index_in_array, 1)) else: - raise ValueError('The thickness signal dimensions are not ' - 'compatible with the dimensions of the ' - 'low-loss signal') + raise ValueError( + "The thickness signal dimensions are not " + "compatible with the dimensions of the " + "low-loss signal" + ) elif isinstance(t, np.ndarray) and t.shape and t.shape != (1,): - raise ValueError("thickness must be a HyperSpy signal or a number," - " not a NumPy array.") + raise ValueError( + "thickness must be a HyperSpy signal or a number," " not a NumPy array." + ) # Slicer to get the signal data from 0 to axis.size slicer = s.axes_manager._get_data_slice( - [(axis.index_in_array, slice(None, axis.size)), ]) + [ + (axis.index_in_array, slice(None, axis.size)), + ] + ) # Kinetic definitions - ke = e0 * (1 + e0 / 2. / me) / (1 + e0 / me) ** 2 + ke = e0 * (1 + e0 / 2.0 / me) / (1 + e0 / me) ** 2 tgt = e0 * (2 * me + e0) / (me + e0) rk0 = 2590 * (1 + e0 / me) * np.sqrt(2 * ke / me) @@ -1439,28 +1518,34 @@ def kramers_kronig_analysis(self, # We start by the "angular corrections" Im = s.data / (np.log(1 + (beta * tgt / eaxis) ** 2)) / axis.scale if n is None and t is None: - raise ValueError("The thickness and the refractive index are " - "not defined. Please provide one of them.") + raise ValueError( + "The thickness and the refractive index are " + "not defined. Please provide one of them." + ) elif n is not None and t is not None: - raise ValueError("Please provide the refractive index OR the " - "thickness information, not both") + raise ValueError( + "Please provide the refractive index OR the " + "thickness information, not both" + ) elif n is not None: # normalize using the refractive index. - K = (Im / eaxis).sum(axis=axis.index_in_array, keepdims=True) \ - * axis.scale - K = (K / (np.pi / 2) / (1 - 1. / n ** 2)) + K = (Im / eaxis).sum( + axis=axis.index_in_array, keepdims=True + ) * axis.scale + K = K / (np.pi / 2) / (1 - 1.0 / n**2) # K = (K / (np.pi / 2) / (1 - 1. / n ** 2)).reshape( # np.insert(K.shape, axis.index_in_array, 1)) # Calculate the thickness only if possible and required - if zlp is not None and (full_output is True or - iterations > 1): - te = (332.5 * K * ke / i0) + if zlp is not None and (full_output is True or iterations > 1): + te = 332.5 * K * ke / i0 if full_output is True: - output['thickness'] = te + output["thickness"] = te elif t is not None: if zlp is None: - raise ValueError("The ZLP must be provided when the " - "thickness is used for normalization.") + raise ValueError( + "The ZLP must be provided when the " + "thickness is used for normalization." + ) # normalize using the thickness K = t * i0 / (332.5 * ke) te = t @@ -1473,8 +1558,7 @@ def kramers_kronig_analysis(self, # make it double the closest upper value to workaround the # wrap-around problem. esize = optimal_fft_size(2 * axis.size) - q = -2 * np.fft.fft(Im, esize, - axis.index_in_array).imag / esize + q = -2 * np.fft.fft(Im, esize, axis.index_in_array).imag / esize q[slicer] *= -1 q = np.fft.fft(q, axis=axis.index_in_array) @@ -1494,56 +1578,64 @@ def kramers_kronig_analysis(self, # Epsilon appears: # We calculate the real and imaginary parts of the CDF - e1 = Re / (Re ** 2 + Im ** 2) - e2 = Im / (Re ** 2 + Im ** 2) + e1 = Re / (Re**2 + Im**2) + e2 = Im / (Re**2 + Im**2) if iterations > 1 and zlp is not None: # Surface losses correction: # Calculates the surface ELF from a vacuum border effect # A simulated surface plasmon is subtracted from the ELF - Srfelf = 4 * e2 / ((e1 + 1) ** 2 + e2 ** 2) - Im - adep = (tgt / (eaxis + delta) * - np.arctan(beta * tgt / axis.axis) - - beta / 1000. / - (beta ** 2 + axis.axis ** 2. / tgt ** 2)) + Srfelf = 4 * e2 / ((e1 + 1) ** 2 + e2**2) - Im + adep = tgt / (eaxis + delta) * np.arctan( + beta * tgt / axis.axis + ) - beta / 1000.0 / (beta**2 + axis.axis**2.0 / tgt**2) Srfint = 2000 * K * adep * Srfelf / rk0 / te * axis.scale s.data = sorig.data - Srfint - _logger.debug('Iteration number: %d / %d', io + 1, iterations) + _logger.debug("Iteration number: %d / %d", io + 1, iterations) if iterations == io + 1 and full_output is True: sp = sorig._deepcopy_with_new_data(Srfint) sp.metadata.General.title += ( - " estimated surface plasmon excitation.") - output['surface plasmon estimation'] = sp + " estimated surface plasmon excitation." + ) + output["surface plasmon estimation"] = sp del sp del Srfint eps = s._deepcopy_with_new_data(e1 + e2 * 1j) del s eps.set_signal_type("DielectricFunction") - eps.metadata.General.title = (self.metadata.General.title + - 'dielectric function ' - '(from Kramers-Kronig analysis)') - if eps.tmp_parameters.has_item('filename'): + eps.metadata.General.title = ( + self.metadata.General.title + "dielectric function " + "(from Kramers-Kronig analysis)" + ) + if eps.tmp_parameters.has_item("filename"): eps.tmp_parameters.filename = ( - self.tmp_parameters.filename + - '_CDF_after_Kramers_Kronig_transform') - if 'thickness' in output: + self.tmp_parameters.filename + "_CDF_after_Kramers_Kronig_transform" + ) + if "thickness" in output: # As above,prevent errors if the signal is a single spectrum if len(te) != 1: - te = te[self.axes_manager._get_data_slice( - [(axis.index_in_array, 0)])] + te = te[self.axes_manager._get_data_slice([(axis.index_in_array, 0)])] thickness = eps._get_navigation_signal(data=te) thickness.metadata.General.title = ( - self.metadata.General.title + ' thickness ' - '(calculated using Kramers-Kronig analysis)') - output['thickness'] = thickness + self.metadata.General.title + " thickness " + "(calculated using Kramers-Kronig analysis)" + ) + output["thickness"] = thickness if full_output is False: return eps else: return eps, output - def create_model(self, low_loss=None, auto_background=True, auto_add_edges=True, - GOS="gosh", gos_file_path=None, dictionary=None): + def create_model( + self, + low_loss=None, + auto_background=True, + auto_add_edges=True, + GOS="gosh", + gos_file_path=None, + dictionary=None, + ): """Create a model for the current EELS data. Parameters @@ -1560,24 +1652,26 @@ def create_model(self, low_loss=None, auto_background=True, auto_add_edges=True, If the signal axis is a non-uniform axis. """ from exspy.models.eelsmodel import EELSModel + if low_loss is not None and not self.axes_manager.signal_axes[0].is_uniform: raise NotImplementedError( "Multiple scattering is not implemented for spectra with a " "non-uniform energy axis. To create a model that does not " "account for multiple-scattering do not set the `ll` keyword." - ) - model = EELSModel(self, - low_loss=low_loss, - auto_background=auto_background, - auto_add_edges=auto_add_edges, - GOS=GOS, - dictionary=dictionary) + ) + model = EELSModel( + self, + low_loss=low_loss, + auto_background=auto_background, + auto_add_edges=auto_add_edges, + GOS=GOS, + dictionary=dictionary, + ) return model create_model.__doc__ %= EELSMODEL_PARAMETERS - def plot(self, plot_edges=False, only_edges=('Major', 'Minor'), - **kwargs): + def plot(self, plot_edges=False, only_edges=("Major", "Minor"), **kwargs): """ Plot the EELS spectrum. Markers indicating the position of the EELS edges can be added. @@ -1603,14 +1697,13 @@ def plot(self, plot_edges=False, only_edges=('Major', 'Minor'), edges = self._get_edges_to_plot(plot_edges, only_edges) self._plot_edge_labels(edges) - self._plot.signal_plot.events.closed.connect( - self._on_signal_plot_closing, []) + self._plot.signal_plot.events.closed.connect(self._on_signal_plot_closing, []) def _on_signal_plot_closing(self): self._edge_markers = {"lines": None, "texts": None, "names": []} def _get_offsets_and_segments(self, edges): - index = np.array([float(v) for v in edges.values()]) # dictionaries + index = np.array([float(v) for v in edges.values()]) # dictionaries segments = np.empty((len(index), 2, 2)) offsets = np.empty((len(index), 2)) for i, ind in enumerate(index): @@ -1620,19 +1713,21 @@ def _get_offsets_and_segments(self, edges): def _initialise_markers(self): self._edge_markers["lines"] = Lines( - segments=np.empty((0, 2, 2)), transform="relative", - color='black', shift=np.array([0., 0.19]), - ) + segments=np.empty((0, 2, 2)), + transform="relative", + color="black", + shift=np.array([0.0, 0.19]), + ) self._edge_markers["texts"] = Texts( offsets=np.empty((0, 2)), texts=np.empty((0,)), offset_transform="relative", - rotation=np.pi/2, + rotation=np.pi / 2, horizontalalignment="left", verticalalignment="bottom", - facecolor='black', - shift=.2, - ) + facecolor="black", + shift=0.2, + ) for key in ["lines", "texts"]: self.add_marker(self._edge_markers[key], render_figure=False) @@ -1660,9 +1755,11 @@ def _get_edges_to_plot(self, plot_edges, only_edges): try: elements = self.metadata.Sample.elements except AttributeError: - raise ValueError("No elements defined. Add them with " - "s.add_elements, or specify elements, edge " - "families or edges directly") + raise ValueError( + "No elements defined. Add them with " + "s.add_elements, or specify elements, edge " + "families or edges directly" + ) else: extra_element_edge_family.extend(np.atleast_1d(plot_edges)) try: @@ -1670,7 +1767,6 @@ def _get_edges_to_plot(self, plot_edges, only_edges): except: elements = [] - element_edge_family = elements + extra_element_edge_family edges_dict = self._get_edges(element_edge_family, only_edges) @@ -1685,27 +1781,28 @@ def _get_edges(self, element_edge_family, only_edges): names_and_energies = {} shells = ["K", "L", "M", "N", "O"] - errmsg = ("Edge family '{}' is not supported. Supported edge family " - "is {}.") + errmsg = "Edge family '{}' is not supported. Supported edge family " "is {}." for member in element_edge_family: try: element, ss = member.split("_") if len(ss) == 1: - memtype = 'family' + memtype = "family" if ss not in shells: raise AttributeError(errmsg.format(ss, shells)) if len(ss) == 2: - memtype = 'edge' + memtype = "edge" if ss[0] not in shells: raise AttributeError(errmsg.format(ss[0], shells)) except ValueError: element = member - ss = '' - memtype = 'element' + ss = "" + memtype = "element" try: - Binding_energies = elements_db[element]["Atomic_properties"]["Binding_energies"] + Binding_energies = elements_db[element]["Atomic_properties"][ + "Binding_energies" + ] except KeyError as err: raise ValueError("'{}' is not a valid element".format(element)) from err @@ -1717,13 +1814,13 @@ def _get_edges(self, element_edge_family, only_edges): isInRng = axis_min < energy < axis_max isSameFamily = ss in edge - if memtype == 'element': + if memtype == "element": flag = isInRel & isInRng edge_key = element + "_" + edge - elif memtype == 'edge': + elif memtype == "edge": flag = isInRng & (edge == ss) edge_key = member - elif memtype == 'family': + elif memtype == "family": flag = isInRel & isInRng & isSameFamily edge_key = element + "_" + edge @@ -1762,7 +1859,7 @@ def _remove_edge_labels(self, edge_names=None, render_figure=True): self._edge_markers["names"] = np.delete(self._edge_markers["names"], ind) if render_figure: - self._render_figure(plot=['signal_plot']) + self._render_figure(plot=["signal_plot"]) def _add_edge_labels(self, edges, render_figure=True): """ @@ -1781,7 +1878,9 @@ def _add_edge_labels(self, edges, render_figure=True): edges_dict = {} for edge in edges: element, ss = edge.split("_") - Binding_energies = elements_db[element]["Atomic_properties"]["Binding_energies"] + Binding_energies = elements_db[element]["Atomic_properties"][ + "Binding_energies" + ] edges_dict[edge] = Binding_energies[ss]["onset_energy (eV)"] edges = edges_dict @@ -1795,7 +1894,7 @@ def _add_edge_labels(self, edges, render_figure=True): self._edge_markers["names"] = np.append(self._edge_markers["names"], names) if render_figure: - self._render_figure(plot=['signal_plot']) + self._render_figure(plot=["signal_plot"]) def _get_complementary_edges(self, edges, only_major=False): """ @@ -1823,38 +1922,41 @@ def _get_complementary_edges(self, edges, only_major=False): elements = set() for edge in edges: - element, _ = edge.split('_') + element, _ = edge.split("_") elements.update([element]) for element in elements: - ss_info = elements_db[element]['Atomic_properties'][ - 'Binding_energies'] + ss_info = elements_db[element]["Atomic_properties"]["Binding_energies"] for subshell in ss_info: - sse = ss_info[subshell]['onset_energy (eV)'] - ssr = ss_info[subshell]['relevance'] + sse = ss_info[subshell]["onset_energy (eV)"] + ssr = ss_info[subshell]["relevance"] if only_major: - if ssr != 'Major': + if ssr != "Major": continue - edge = element + '_' + subshell - if (emin <= sse <= emax) and (subshell[-1] != 'a') and \ - (edge not in edges): + edge = element + "_" + subshell + if ( + (emin <= sse <= emax) + and (subshell[-1] != "a") + and (edge not in edges) + ): complmt_edges.append(edge) return complmt_edges - def rebin(self, new_shape=None, scale=None, crop=True, dtype=None, - out=None): + def rebin(self, new_shape=None, scale=None, crop=True, dtype=None, out=None): factors = self._validate_rebin_args_and_get_factors( - new_shape=new_shape, - scale=scale) - m = super().rebin(new_shape=new_shape, scale=scale, crop=crop, - dtype=dtype, out=out) + new_shape=new_shape, scale=scale + ) + m = super().rebin( + new_shape=new_shape, scale=scale, crop=crop, dtype=dtype, out=out + ) m = out or m - time_factor = np.prod([factors[axis.index_in_array] - for axis in m.axes_manager.navigation_axes]) + time_factor = np.prod( + [factors[axis.index_in_array] for axis in m.axes_manager.navigation_axes] + ) mdeels = m.metadata m.get_dimensions_from_data() if m.metadata.get_item("Acquisition_instrument.TEM.Detector.EELS"): @@ -1864,17 +1966,21 @@ def rebin(self, new_shape=None, scale=None, crop=True, dtype=None, if "exposure" in mdeels: mdeels.exposure *= time_factor else: - _logger.info('No dwell_time could be found in the metadata so ' - 'this has not been updated.') + _logger.info( + "No dwell_time could be found in the metadata so " + "this has not been updated." + ) if out is None: return m else: out.events.data_changed.trigger(obj=out) return m + rebin.__doc__ = hyperspy.signal.BaseSignal.rebin.__doc__ - def vacuum_mask(self, threshold=10.0, start_energy=None, - closing=True, opening=False): + def vacuum_mask( + self, threshold=10.0, start_energy=None, closing=True, opening=False + ): """ Generate mask of the vacuum region @@ -1898,15 +2004,18 @@ def vacuum_mask(self, threshold=10.0, start_energy=None, The mask of the region. """ if self.axes_manager.navigation_dimension == 0: - raise RuntimeError('Navigation dimenstion must be higher than 0 ' - 'to estimate a vacuum mask.') + raise RuntimeError( + "Navigation dimenstion must be higher than 0 " + "to estimate a vacuum mask." + ) signal_axis = self.axes_manager.signal_axes[0] if start_energy is None: start_energy = 0.75 * signal_axis.high_value - mask = (self.isig[start_energy:].mean(-1) <= threshold) + mask = self.isig[start_energy:].mean(-1) <= threshold from scipy.ndimage import binary_dilation, binary_erosion + if closing: mask.data = binary_dilation(mask.data, border_value=0) mask.data = binary_erosion(mask.data, border_value=1) diff --git a/exspy/test/components/test_EELSarctan.py b/exspy/test/components/test_EELSarctan.py index cf15e8198..55d7d20b2 100644 --- a/exspy/test/components/test_EELSarctan.py +++ b/exspy/test/components/test_EELSarctan.py @@ -27,6 +27,5 @@ def test_function(): g.k.value = 2 g.x0.value = 1 np.testing.assert_allclose(g.function(0), 4.63647609) - np.testing.assert_allclose(g.function(1), 10*np.pi/2) - np.testing.assert_allclose(g.function(1e4), 10*np.pi, 1e-4) - + np.testing.assert_allclose(g.function(1), 10 * np.pi / 2) + np.testing.assert_allclose(g.function(1e4), 10 * np.pi, 1e-4) diff --git a/exspy/test/components/test_double_power_law.py b/exspy/test/components/test_double_power_law.py index 83021e90f..bde908c3d 100644 --- a/exspy/test/components/test_double_power_law.py +++ b/exspy/test/components/test_double_power_law.py @@ -39,13 +39,12 @@ def test_function(): np.testing.assert_allclose(g.function(10), 0.15948602) assert g.grad_A(2) == 3 np.testing.assert_allclose(g.grad_r(4), -0.3662041) - assert g.grad_origin(2) == -6 - assert g.grad_shift(2) == -12 - assert g.grad_ratio(2) == 3 + assert g.grad_origin(2) == -6 + assert g.grad_shift(2) == -12 + assert g.grad_ratio(2) == 3 class TestDoublePowerLaw: - def setup_method(self, method): s = hs.signals.Signal1D(np.zeros(1024)) s.axes_manager[0].offset = 100 @@ -53,6 +52,7 @@ def setup_method(self, method): m = s.create_model() exspy = pytest.importorskip("exspy") from exspy.components import DoublePowerLaw + m.append(DoublePowerLaw()) m[0].A.value = 1000 m[0].r.value = 4 @@ -76,4 +76,4 @@ def test_fit(self, binned): m.fit_component(g, signal_range=(None, None)) np.testing.assert_allclose(g.A.value, 1000.0) np.testing.assert_allclose(g.r.value, 4.0) - np.testing.assert_allclose(g.ratio.value, 200.) \ No newline at end of file + np.testing.assert_allclose(g.ratio.value, 200.0) diff --git a/exspy/test/components/test_pes_core_line_shape.py b/exspy/test/components/test_pes_core_line_shape.py index 1fbfc22af..914f1d3c8 100644 --- a/exspy/test/components/test_pes_core_line_shape.py +++ b/exspy/test/components/test_pes_core_line_shape.py @@ -29,10 +29,21 @@ def test_PESCoreLineShape(): x = np.linspace(-5, 15, 10) np.testing.assert_allclose( core_line.function(x), - np.array([8.97054744e-04, 0.365234208, 7.09463858, 6.57499512, - 0.290714653, 6.13260141e-04, 6.17204216e-08, 2.96359844e-13, - 6.78916184e-20, 7.42026292e-28]) - ) + np.array( + [ + 8.97054744e-04, + 0.365234208, + 7.09463858, + 6.57499512, + 0.290714653, + 6.13260141e-04, + 6.17204216e-08, + 2.96359844e-13, + 6.78916184e-20, + 7.42026292e-28, + ] + ), + ) assert core_line._position is core_line.origin @@ -43,14 +54,25 @@ def test_PESCoreLineShape_shirley(): x = np.linspace(-5, 15, 10) np.testing.assert_allclose( core_line.function(x), - np.array([0.144159014, 0.504843825, 7.16330182, 6.57790840, - 0.290720786, 6.13260758e-04, 6.17204245e-08, 2.96359844e-13, - 6.78916184e-20, 7.42026292e-28]) - ) + np.array( + [ + 0.144159014, + 0.504843825, + 7.16330182, + 6.57790840, + 0.290720786, + 6.13260758e-04, + 6.17204245e-08, + 2.96359844e-13, + 6.78916184e-20, + 7.42026292e-28, + ] + ), + ) np.testing.assert_allclose(core_line.function(x), core_line.function_nd(x)) -@pytest.mark.parametrize('Shirley', [False, True]) +@pytest.mark.parametrize("Shirley", [False, True]) def test_PESCoreLineShape_fit(Shirley): # Component parameter values A = 10 @@ -59,7 +81,7 @@ def test_PESCoreLineShape_fit(Shirley): shirley = 0.01 if Shirley else 0.0 offset, scale, size = 0, 0.1, 100 - x = np.linspace(offset, scale*size, size) + x = np.linspace(offset, scale * size, size) comp = PESCoreLineShape(A=A, FWHM=FWHM, origin=origin) comp.Shirley = Shirley comp.shirley.value = shirley @@ -72,29 +94,29 @@ def test_PESCoreLineShape_fit(Shirley): core_line = PESCoreLineShape(A=1, FWHM=1.5, origin=0.5) core_line.Shirley = Shirley m.append(core_line) - m.fit(grad='analytical') + m.fit(grad="analytical") np.testing.assert_allclose(core_line.A.value, A, rtol=0.1) np.testing.assert_allclose(abs(core_line.FWHM.value), FWHM, rtol=0.1) np.testing.assert_allclose(core_line.origin.value, origin, rtol=0.1) np.testing.assert_allclose(core_line.shirley.value, shirley, rtol=0.1) -@pytest.mark.parametrize('Shirley', [False, True]) +@pytest.mark.parametrize("Shirley", [False, True]) def test_PESCoreLineShape_function_nd(Shirley): - A, FWHM, origin = 10, 1.5, 0. + A, FWHM, origin = 10, 1.5, 0.0 core_line = PESCoreLineShape(A=A, FWHM=FWHM, origin=origin) core_line.Shirley = Shirley core_line.shirley.value = 0.01 if Shirley else 0.0 x = np.linspace(-5, 15, 1000) - s = hs.signals.Signal1D(np.array([x]*2)) + s = hs.signals.Signal1D(np.array([x] * 2)) # Manually set to test function_nd core_line._axes_manager = s.axes_manager core_line._create_arrays() - core_line.A.map['values'] = [A] * 2 - core_line.FWHM.map['values'] = [FWHM] * 2 - core_line.origin.map['values'] = [origin] * 2 - core_line.shirley.map['values'] = [core_line.shirley.value] * 2 + core_line.A.map["values"] = [A] * 2 + core_line.FWHM.map["values"] = [FWHM] * 2 + core_line.origin.map["values"] = [origin] * 2 + core_line.shirley.map["values"] = [core_line.shirley.value] * 2 values = core_line.function_nd(x) assert values.shape == (2, len(x)) @@ -102,7 +124,7 @@ def test_PESCoreLineShape_function_nd(Shirley): np.testing.assert_allclose(v, core_line.function(x), rtol=0.5) -@pytest.mark.parametrize('Shirley', [False, True]) +@pytest.mark.parametrize("Shirley", [False, True]) def test_recreate_component(Shirley): core_line = PESCoreLineShape(A=10, FWHM=1.5, origin=0.5) core_line.Shirley = Shirley @@ -115,5 +137,3 @@ def test_recreate_component(Shirley): m2 = s.create_model() m2._load_dictionary(model_dict) assert m2[0].Shirley == Shirley - - diff --git a/exspy/test/components/test_pes_see.py b/exspy/test/components/test_pes_see.py index 81238ddd1..b3506bb1f 100644 --- a/exspy/test/components/test_pes_see.py +++ b/exspy/test/components/test_pes_see.py @@ -24,15 +24,26 @@ from exspy.components import SEE - def test_see(): see = SEE(A=10, Phi=1.5, B=0.5) x = np.linspace(-5, 15, 10) np.testing.assert_allclose( see.function(x), - np.array([0.0, 0.0, 0.0, 8.4375, 0.342983001, 0.0675685032, - 0.0236279967, 0.010861538, 0.005860978, 0.003514161]) - ) + np.array( + [ + 0.0, + 0.0, + 0.0, + 8.4375, + 0.342983001, + 0.0675685032, + 0.0236279967, + 0.010861538, + 0.005860978, + 0.003514161, + ] + ), + ) np.testing.assert_allclose(see.function(x), see.function_nd(x)) _ = SEE(A=10, Phi=1.5, B=0.5, sigma=0) @@ -45,7 +56,7 @@ def test_see_fit(): B = 0.5 offset, scale, size = 0, 0.1, 100 - x = np.linspace(offset, scale*size, size) + x = np.linspace(offset, scale * size, size) s = hs.signals.Signal1D(SEE(A=A, Phi=Phi, B=B).function(x)) axis = s.axes_manager[0] axis.offset, axis.scale = offset, scale @@ -53,9 +64,8 @@ def test_see_fit(): m = s.create_model() see = SEE(A=1, Phi=1.5, B=0.5) m.append(see) - with ignore_warning(message="divide by zero", - category=RuntimeWarning): - m.fit(grad='analytical') + with ignore_warning(message="divide by zero", category=RuntimeWarning): + m.fit(grad="analytical") np.testing.assert_allclose(see.A.value, A, rtol=0.1) np.testing.assert_allclose(see.Phi.value, Phi, rtol=0.1) np.testing.assert_allclose(see.B.value, B, rtol=0.1) @@ -65,14 +75,14 @@ def test_see_function_nd(): A, Phi, B = 10, 1.5, 0.5 see = SEE(A=A, Phi=Phi, B=B) x = np.linspace(-5, 15, 10) - s = hs.signals.Signal1D(np.array([x]*2)) + s = hs.signals.Signal1D(np.array([x] * 2)) # Manually set to test function_nd see._axes_manager = s.axes_manager see._create_arrays() - see.A.map['values'] = [A] * 2 - see.Phi.map['values'] = [Phi] * 2 - see.B.map['values'] = [B] * 2 + see.A.map["values"] = [A] * 2 + see.Phi.map["values"] = [Phi] * 2 + see.B.map["values"] = [B] * 2 values = see.function_nd(x) assert values.shape == (2, 10) diff --git a/exspy/test/components/test_pes_voigt.py b/exspy/test/components/test_pes_voigt.py index a329fce2e..de3025182 100644 --- a/exspy/test/components/test_pes_voigt.py +++ b/exspy/test/components/test_pes_voigt.py @@ -41,6 +41,7 @@ def test_function(): np.testing.assert_allclose(g.function(1), 5.06863535) assert g._position is g.centre + def test_function_resolution(): g = PESVoigt() g.area.value = 5 @@ -52,19 +53,21 @@ def test_function_resolution(): np.testing.assert_allclose(g.function(1), 3.70472923) assert g._position is g.centre + def test_function_spinorbit(): g = PESVoigt() g.area.value = 5 g.FWHM.value = 0.5 g.gamma.value = 0.2 g.centre.value = 1 - g.spin_orbit_splitting=True - spin_orbit_branching_ratio=0.4 - spin_orbit_splitting_energy=0.72 + g.spin_orbit_splitting = True + spin_orbit_branching_ratio = 0.4 + spin_orbit_splitting_energy = 0.72 np.testing.assert_allclose(g.function(0), 1.553312) np.testing.assert_allclose(g.function(1), 5.612734) assert g._position is g.centre + def test_function_shirleybackground(): g = PESVoigt() g.area.value = 5 @@ -77,6 +80,7 @@ def test_function_shirleybackground(): np.testing.assert_allclose(g.function(1), 5.06863535) assert g._position is g.centre + @pytest.mark.parametrize(("lazy"), (True, False)) @pytest.mark.parametrize(("uniform"), (True, False)) @pytest.mark.parametrize(("mapnone"), (True, False)) @@ -85,11 +89,11 @@ def test_estimate_parameters_binned(only_current, binned, lazy, uniform, mapnone s = Signal1D(np.empty((200,))) s.axes_manager.signal_axes[0].is_binned = binned axis = s.axes_manager.signal_axes[0] - axis.scale = .05 + axis.scale = 0.05 axis.offset = -5 g1 = PESVoigt() g1.centre.value = 1 - g1.area.value = 5. + g1.area.value = 5.0 g1.gamma.value = 0.001 g1.FWHM.value = 0.5 s.data = g1.function(axis.axis) @@ -106,11 +110,10 @@ def test_estimate_parameters_binned(only_current, binned, lazy, uniform, mapnone factor = 1 if mapnone: g2.area.map = None - assert g2.estimate_parameters(s, axis.low_value, axis.high_value, - only_current=only_current) + assert g2.estimate_parameters( + s, axis.low_value, axis.high_value, only_current=only_current + ) assert g2._axes_manager[-1].is_binned == binned np.testing.assert_allclose(g2.FWHM.value, 1, 0.5) np.testing.assert_allclose(g1.area.value, g2.area.value * factor, 0.04) np.testing.assert_allclose(g2.centre.value, 1, 1e-3) - - diff --git a/exspy/test/data/test_data.py b/exspy/test/data/test_data.py index e5f32995a..b5e690a02 100644 --- a/exspy/test/data/test_data.py +++ b/exspy/test/data/test_data.py @@ -25,34 +25,32 @@ def test_eds_sem(): s = exspy.data.EDS_SEM_TM002() assert isinstance(s, exspy.signals.EDSSEMSpectrum) assert s.axes_manager.navigation_dimension == 0 - assert s.metadata.Sample.elements == ['Al', 'C', 'Cu', 'Mn', 'Zr'] + assert s.metadata.Sample.elements == ["Al", "C", "Cu", "Mn", "Zr"] def test_eds_tem(): s = exspy.data.EDS_TEM_FePt_nanoparticles() assert isinstance(s, exspy.signals.EDSTEMSpectrum) assert s.axes_manager.navigation_dimension == 0 - assert s.metadata.Sample.elements == ['Fe', 'Pt'] + assert s.metadata.Sample.elements == ["Fe", "Pt"] -@pytest.mark.parametrize('navigation_shape', [(), (2, ), (3, 4), (5, 6, 7)]) +@pytest.mark.parametrize("navigation_shape", [(), (2,), (3, 4), (5, 6, 7)]) @pytest.mark.parametrize( - ['add_noise', 'random_state'], - [[True, 0], [True, None], [False, None]] - ) + ["add_noise", "random_state"], [[True, 0], [True, None], [False, None]] +) def test_EELS_low_loss(add_noise, random_state, navigation_shape): s = exspy.data.EELS_low_loss(add_noise, random_state, navigation_shape) assert s.axes_manager.navigation_shape == navigation_shape -@pytest.mark.parametrize('add_powerlaw', [True, False]) -@pytest.mark.parametrize('navigation_shape', [(1,), (2, )]) +@pytest.mark.parametrize("add_powerlaw", [True, False]) +@pytest.mark.parametrize("navigation_shape", [(1,), (2,)]) @pytest.mark.parametrize( - ['add_noise', 'random_state'], - [[True, 0], [True, None], [False, None]] - ) + ["add_noise", "random_state"], [[True, 0], [True, None], [False, None]] +) def test_EELS_MnFe(add_powerlaw, add_noise, random_state, navigation_shape): s = exspy.data.EELS_MnFe(add_powerlaw, add_noise, random_state, navigation_shape) - if navigation_shape == (1, ): + if navigation_shape == (1,): navigation_shape = () assert s.axes_manager.navigation_shape == navigation_shape diff --git a/exspy/test/data/test_eelsdb.py b/exspy/test/data/test_eelsdb.py index 2d4f83c82..f8d1f5040 100644 --- a/exspy/test/data/test_eelsdb.py +++ b/exspy/test/data/test_eelsdb.py @@ -31,7 +31,8 @@ def _eelsdb(**kwargs): except SSLError: warnings.warn( "The https://eelsdb.eu certificate seems to be invalid. " - "Consider notifying the issue to the EELSdb webmaster.") + "Consider notifying the issue to the EELSdb webmaster." + ) ss = eelsdb(verify_certificate=False, **kwargs) except Exception as e: # e.g. failures such as ConnectionError or MaxRetryError @@ -41,10 +42,10 @@ def _eelsdb(**kwargs): def eelsdb_down(): try: - _ = requests.get('https://api.eelsdb.eu', verify=True) + _ = requests.get("https://api.eelsdb.eu", verify=True) return False except SSLError: - _ = requests.get('https://api.eelsdb.eu', verify=False) + _ = requests.get("https://api.eelsdb.eu", verify=False) return False except requests.exceptions.ConnectionError: return True @@ -65,15 +66,14 @@ def test_eelsdb_eels(): resolution_compare="lt", max_n=2, order="spectrumMin", - order_direction='DESC', + order_direction="DESC", monochromated=False, - ) + ) assert len(ss) == 2 md = ss[0].metadata assert md.General.author == "Odile Stephan" - assert ( - md.Acquisition_instrument.TEM.Detector.EELS.collection_angle == 24) + assert md.Acquisition_instrument.TEM.Detector.EELS.collection_angle == 24 assert md.Acquisition_instrument.TEM.convergence_angle == 15 assert md.Acquisition_instrument.TEM.beam_energy == 100 assert md.Signal.signal_type == "EELS" @@ -85,7 +85,10 @@ def test_eelsdb_eels(): @pytest.mark.skipif(eelsdb_down(), reason="Unable to connect to EELSdb") def test_eelsdb_xas(): - ss = _eelsdb(spectrum_type="xrayabs", max_n=1,) + ss = _eelsdb( + spectrum_type="xrayabs", + max_n=1, + ) assert len(ss) == 1 md = ss[0].metadata assert md.Signal.signal_type == "XAS" @@ -93,17 +96,15 @@ def test_eelsdb_xas(): @pytest.mark.skipif(eelsdb_down(), reason="Unable to connect to EELSdb") def test_eelsdb_corrupted_file(): - ss = _eelsdb( - spectrum_type='coreloss', element='Cu', edge='K', formula='Cu4O3' - ) + ss = _eelsdb(spectrum_type="coreloss", element="Cu", edge="K", formula="Cu4O3") assert len(ss) == 1 - assert ss[0].metadata.General.title == 'O-K edge in Cu4O3' + assert ss[0].metadata.General.title == "O-K edge in Cu4O3" @pytest.mark.skipif(eelsdb_down(), reason="Unable to connect to EELSdb") def test_eelsdb_elements_no(): title = "Zero-loss c-FEG Hitachi Disp 0.214 eV" - ss = _eelsdb(author='Luc Lajaunie', title=title) + ss = _eelsdb(author="Luc Lajaunie", title=title) assert len(ss) == 1 assert ss[0].metadata.General.title == title diff --git a/exspy/test/drawing/test_plot_model.py b/exspy/test/drawing/test_plot_model.py index a4880bc02..b76a463f4 100644 --- a/exspy/test/drawing/test_plot_model.py +++ b/exspy/test/drawing/test_plot_model.py @@ -26,15 +26,15 @@ from exspy.signals import EELSSpectrum my_path = Path(__file__).resolve().parent -baseline_dir = 'plot_model' +baseline_dir = "plot_model" default_tol = 2.0 def create_ll_signal(signal_shape=1000): offset = 0 - zlp_param = {'A': 10000.0, 'centre': 0.0 + offset, 'sigma': 15.0} + zlp_param = {"A": 10000.0, "centre": 0.0 + offset, "sigma": 15.0} zlp = Gaussian(**zlp_param) - plasmon_param = {'A': 2000.0, 'centre': 200.0 + offset, 'sigma': 75.0} + plasmon_param = {"A": 2000.0, "centre": 200.0 + offset, "sigma": 75.0} plasmon = Gaussian(**plasmon_param) axis = np.arange(signal_shape) data = zlp.function(axis) + plasmon.function(axis) @@ -51,17 +51,23 @@ def create_ll_signal(signal_shape=1000): def create_sum_of_gaussians(convolved=False): - param1 = {'A': A_value_gaussian[0], - 'centre': centre_value_gaussian[0] / scale, - 'sigma': sigma_value_gaussian[0] / scale} + param1 = { + "A": A_value_gaussian[0], + "centre": centre_value_gaussian[0] / scale, + "sigma": sigma_value_gaussian[0] / scale, + } gs1 = Gaussian(**param1) - param2 = {'A': A_value_gaussian[1], - 'centre': centre_value_gaussian[1] / scale, - 'sigma': sigma_value_gaussian[1] / scale} + param2 = { + "A": A_value_gaussian[1], + "centre": centre_value_gaussian[1] / scale, + "sigma": sigma_value_gaussian[1] / scale, + } gs2 = Gaussian(**param2) - param3 = {'A': A_value_gaussian[2], - 'centre': centre_value_gaussian[2] / scale, - 'sigma': sigma_value_gaussian[2] / scale} + param3 = { + "A": A_value_gaussian[2], + "centre": centre_value_gaussian[2] / scale, + "sigma": sigma_value_gaussian[2] / scale, + } gs3 = Gaussian(**param3) axis = np.arange(1000) @@ -79,13 +85,13 @@ def create_sum_of_gaussians(convolved=False): @pytest.mark.parametrize("binned", [True, False]) @pytest.mark.parametrize("plot_component", [True, False]) @pytest.mark.parametrize("convolved", [True, False]) -@pytest.mark.mpl_image_compare( - baseline_dir=baseline_dir, tolerance=default_tol) +@pytest.mark.mpl_image_compare(baseline_dir=baseline_dir, tolerance=default_tol) def test_plot_gaussian_EELSSpectrum(convolved, plot_component, binned): s = create_sum_of_gaussians(convolved) s.axes_manager[-1].is_binned == binned - s.metadata.General.title = 'Convolved: {}, plot_component: {}, binned: {}'.format( - convolved, plot_component, binned) + s.metadata.General.title = "Convolved: {}, plot_component: {}, binned: {}".format( + convolved, plot_component, binned + ) s.axes_manager[-1].is_binned = binned m = s.create_model(auto_add_edges=False, auto_background=False) @@ -99,8 +105,7 @@ def set_gaussian(gaussian, centre, sigma): gaussian.sigma.value = sigma gaussian.sigma.free = False - for gaussian, centre, sigma in zip(m, centre_value_gaussian, - sigma_value_gaussian): + for gaussian, centre, sigma in zip(m, centre_value_gaussian, sigma_value_gaussian): set_gaussian(gaussian, centre, sigma) m.fit() @@ -125,21 +130,20 @@ def A_value(s, component, binned): @pytest.mark.parametrize(("convolved"), [False, True]) -@pytest.mark.mpl_image_compare( - baseline_dir=baseline_dir, tolerance=default_tol) +@pytest.mark.mpl_image_compare(baseline_dir=baseline_dir, tolerance=default_tol) def test_fit_EELS_convolved(convolved): # Keep this test here to avoid having to add image comparison in exspy pytest.importorskip("exspy", reason="exspy not installed.") - dname = my_path.joinpath('data') + dname = my_path.joinpath("data") with pytest.warns(VisibleDeprecationWarning): - cl = hs.load(dname.joinpath('Cr_L_cl.hspy')) + cl = hs.load(dname.joinpath("Cr_L_cl.hspy")) cl.axes_manager[-1].is_binned = False - cl.metadata.General.title = 'Convolved: {}'.format(convolved) + cl.metadata.General.title = "Convolved: {}".format(convolved) ll = None if convolved: with pytest.warns(VisibleDeprecationWarning): - ll = hs.load(dname.joinpath('Cr_L_ll.hspy')) - m = cl.create_model(auto_background=False, low_loss=ll, GOS='hydrogenic') - m.fit(kind='smart') + ll = hs.load(dname.joinpath("Cr_L_ll.hspy")) + m = cl.create_model(auto_background=False, low_loss=ll, GOS="hydrogenic") + m.fit(kind="smart") m.plot(plot_components=True) - return m._plot.signal_plot.figure \ No newline at end of file + return m._plot.signal_plot.figure diff --git a/exspy/test/drawing/test_plot_spectra_markers.py b/exspy/test/drawing/test_plot_spectra_markers.py index 674c66ce6..59ef5b2db 100644 --- a/exspy/test/drawing/test_plot_spectra_markers.py +++ b/exspy/test/drawing/test_plot_spectra_markers.py @@ -1,4 +1,3 @@ - import logging import pytest @@ -9,15 +8,17 @@ import exspy default_tol = 2.0 -baseline_dir = 'plot_spectra_markers' -style_pytest_mpl = 'default' +baseline_dir = "plot_spectra_markers" +style_pytest_mpl = "default" class TestEDSMarkers: - @pytest.mark.mpl_image_compare( - baseline_dir=baseline_dir, tolerance=default_tol, style=style_pytest_mpl) - def test_plot_eds_lines(self,): + baseline_dir=baseline_dir, tolerance=default_tol, style=style_pytest_mpl + ) + def test_plot_eds_lines( + self, + ): a = exspy.data.EDS_SEM_TM002() s = stack([a, a * 5]) s.plot(True) @@ -30,13 +31,18 @@ def test_plot_eds_lines_norm(self, norm): s = stack([a, a * 5]) # When norm is None, don't specify (use default) # otherwise use specify value - kwargs = {"norm":norm} if norm else {} + kwargs = {"norm": norm} if norm else {} s.plot(True, **kwargs) @pytest.mark.mpl_image_compare( - baseline_dir=baseline_dir, tolerance=default_tol, style=style_pytest_mpl, - filename='test_plot_eds_lines.png') - def test_plot_xray_lines(self,): + baseline_dir=baseline_dir, + tolerance=default_tol, + style=style_pytest_mpl, + filename="test_plot_eds_lines.png", + ) + def test_plot_xray_lines( + self, + ): # It should be the same image as with previous test (test_plot_eds_lines) a = exspy.data.EDS_SEM_TM002() s = stack([a, a * 5]) @@ -49,26 +55,32 @@ def test_plot_eds_lines_not_in_range(self, caplog): s = exspy.data.EDS_SEM_TM002().isig[5.0:8.0] s.plot() with caplog.at_level(logging.WARNING): - s._plot_xray_lines(xray_lines=['Pt_Ka']) + s._plot_xray_lines(xray_lines=["Pt_Ka"]) assert "Pt_Ka is not in the data energy range." in caplog.text - def test_plot_eds_lines_background(self,): + def test_plot_eds_lines_background( + self, + ): s = exspy.data.EDS_SEM_TM002().isig[5.0:8.0] s.plot() bw = s.estimate_background_windows() s._plot_xray_lines(background_windows=bw) - def test_plot_add_background_windows(self,): + def test_plot_add_background_windows( + self, + ): s = exspy.data.EDS_SEM_TM002().isig[5.0:8.0] s.plot() bw = s.estimate_background_windows() s._add_background_windows_markers(bw) # Add integration windows - iw = s.estimate_integration_windows(windows_width=2.0, xray_lines=['Fe_Ka']) - s._add_vertical_lines_groups(iw, linestyle='--') + iw = s.estimate_integration_windows(windows_width=2.0, xray_lines=["Fe_Ka"]) + s._add_vertical_lines_groups(iw, linestyle="--") - def test_plot_eds_markers_no_energy(self,): + def test_plot_eds_markers_no_energy( + self, + ): s = exspy.data.EDS_SEM_TM002() del s.metadata.Acquisition_instrument.SEM.beam_energy s.plot(True) @@ -76,23 +88,28 @@ def test_plot_eds_markers_no_energy(self,): class TestEELSMarkers: @pytest.mark.mpl_image_compare( - baseline_dir=baseline_dir, tolerance=default_tol, style=style_pytest_mpl) - def test_plot_eels_labels(self,): + baseline_dir=baseline_dir, tolerance=default_tol, style=style_pytest_mpl + ) + def test_plot_eels_labels( + self, + ): s = exspy.data.EELS_MnFe(True, add_noise=False) - s.add_elements(['Cr']) + s.add_elements(["Cr"]) s.plot(plot_edges=True) return s._plot.signal_plot.figure - def test_plot_eels_labels_nav(self,): + def test_plot_eels_labels_nav( + self, + ): s = exspy.data.EELS_MnFe(True, add_noise=False) - s.add_elements(['Cr', 'Fe']) + s.add_elements(["Cr", "Fe"]) s.plot(plot_edges=True) s.axes_manager.indices = (9,) s._plot.close() def test_remove_edges(self): s = exspy.data.EELS_MnFe(True, add_noise=False) - s.plot(plot_edges=['Cr']) + s.plot(plot_edges=["Cr"]) lines = s._edge_markers["lines"] texts = s._edge_markers["texts"] diff --git a/exspy/test/misc/test_eds.py b/exspy/test/misc/test_eds.py index cd4b36e0f..51a170b4d 100644 --- a/exspy/test/misc/test_eds.py +++ b/exspy/test/misc/test_eds.py @@ -24,31 +24,61 @@ def test_xray_lines_near_energy(): E = 1.36 lines = get_xray_lines_near_energy(E) - assert ( - lines == - ['Pm_M2N4', 'Ho_Ma', 'Eu_Mg', 'Se_La', 'Br_Ln', 'W_Mz', 'As_Lb3', - 'Kr_Ll', 'Ho_Mb', 'Ta_Mz', 'Dy_Mb', 'As_Lb1', 'Gd_Mg', 'Er_Ma', - 'Sm_M2N4', 'Mg_Kb', 'Se_Lb1', 'Ge_Lb3', 'Br_Ll', 'Sm_Mg', 'Dy_Ma', - 'Nd_M2N4', 'As_La', 'Re_Mz', 'Hf_Mz', 'Kr_Ln', 'Er_Mb', 'Tb_Mb']) + assert lines == [ + "Pm_M2N4", + "Ho_Ma", + "Eu_Mg", + "Se_La", + "Br_Ln", + "W_Mz", + "As_Lb3", + "Kr_Ll", + "Ho_Mb", + "Ta_Mz", + "Dy_Mb", + "As_Lb1", + "Gd_Mg", + "Er_Ma", + "Sm_M2N4", + "Mg_Kb", + "Se_Lb1", + "Ge_Lb3", + "Br_Ll", + "Sm_Mg", + "Dy_Ma", + "Nd_M2N4", + "As_La", + "Re_Mz", + "Hf_Mz", + "Kr_Ln", + "Er_Mb", + "Tb_Mb", + ] lines = get_xray_lines_near_energy(E, 0.02) - assert lines == ['Pm_M2N4'] + assert lines == ["Pm_M2N4"] E = 5.4 lines = get_xray_lines_near_energy(E) - assert ( - lines == - ['Cr_Ka', 'La_Lb2', 'V_Kb', 'Pm_La', 'Pm_Ln', 'Ce_Lb3', 'Gd_Ll', - 'Pr_Lb1', 'Xe_Lg3', 'Pr_Lb4']) - lines = get_xray_lines_near_energy(E, only_lines=('a', 'b')) - assert ( - lines == - ['Cr_Ka', 'V_Kb', 'Pm_La', 'Pr_Lb1']) - lines = get_xray_lines_near_energy(E, only_lines=('a')) - assert ( - lines == - ['Cr_Ka', 'Pm_La']) + assert lines == [ + "Cr_Ka", + "La_Lb2", + "V_Kb", + "Pm_La", + "Pm_Ln", + "Ce_Lb3", + "Gd_Ll", + "Pr_Lb1", + "Xe_Lg3", + "Pr_Lb4", + ] + lines = get_xray_lines_near_energy(E, only_lines=("a", "b")) + assert lines == ["Cr_Ka", "V_Kb", "Pm_La", "Pr_Lb1"] + lines = get_xray_lines_near_energy(E, only_lines=("a")) + assert lines == ["Cr_Ka", "Pm_La"] + def test_takeoff_angle(): - np.testing.assert_allclose(40.,take_off_angle(30.,0.,10.)) - np.testing.assert_allclose(40.,take_off_angle(0.,90.,10.,beta_tilt=30.)) - np.testing.assert_allclose(73.15788376370121,take_off_angle(45.,45.,45., - 45.)) + np.testing.assert_allclose(40.0, take_off_angle(30.0, 0.0, 10.0)) + np.testing.assert_allclose(40.0, take_off_angle(0.0, 90.0, 10.0, beta_tilt=30.0)) + np.testing.assert_allclose( + 73.15788376370121, take_off_angle(45.0, 45.0, 45.0, 45.0) + ) diff --git a/exspy/test/misc/test_eds_utils.py b/exspy/test/misc/test_eds_utils.py index 769cfaf04..02eec2ff9 100755 --- a/exspy/test/misc/test_eds_utils.py +++ b/exspy/test/misc/test_eds_utils.py @@ -22,7 +22,7 @@ def test_get_element_and_line(): - assert _get_element_and_line('Mn_Ka') == ('Mn', 'Ka') + assert _get_element_and_line("Mn_Ka") == ("Mn", "Ka") with pytest.raises(ValueError): - _get_element_and_line('MnKa') == -1 + _get_element_and_line("MnKa") == -1 diff --git a/exspy/test/misc/test_eels.py b/exspy/test/misc/test_eels.py index f6b17c9f0..7aebc2d00 100755 --- a/exspy/test/misc/test_eels.py +++ b/exspy/test/misc/test_eels.py @@ -24,34 +24,76 @@ def test_single_edge(): edges = get_edges_near_energy(532, width=0) assert len(edges) == 1 - assert edges == ['O_K'] + assert edges == ["O_K"] def test_multiple_edges(): edges = get_edges_near_energy(640, width=100) assert len(edges) == 16 assert edges == [ - 'Mn_L3', 'Ra_N4', 'I_M4', 'Cd_M2', 'Mn_L2', 'V_L1', 'I_M5', 'Cd_M3', - 'In_M3', 'Xe_M5', 'Ac_N4', 'Ra_N5', 'Fr_N4', 'Ag_M2', 'F_K', 'Xe_M4', - ] + "Mn_L3", + "Ra_N4", + "I_M4", + "Cd_M2", + "Mn_L2", + "V_L1", + "I_M5", + "Cd_M3", + "In_M3", + "Xe_M5", + "Ac_N4", + "Ra_N5", + "Fr_N4", + "Ag_M2", + "F_K", + "Xe_M4", + ] def test_multiple_edges_ascending(): - edges = get_edges_near_energy(640, width=100, order='ascending') + edges = get_edges_near_energy(640, width=100, order="ascending") assert len(edges) == 16 assert edges == [ - 'Ag_M2', 'Ra_N5', 'Fr_N4', 'Cd_M3', 'I_M5', 'V_L1', 'I_M4', 'Ra_N4', - 'Mn_L3', 'Cd_M2', 'Mn_L2', 'In_M3', 'Xe_M5', 'Ac_N4', 'F_K', 'Xe_M4', - ] + "Ag_M2", + "Ra_N5", + "Fr_N4", + "Cd_M3", + "I_M5", + "V_L1", + "I_M4", + "Ra_N4", + "Mn_L3", + "Cd_M2", + "Mn_L2", + "In_M3", + "Xe_M5", + "Ac_N4", + "F_K", + "Xe_M4", + ] def test_multiple_edges_descending(): - edges = get_edges_near_energy(640, width=100, order='descending') + edges = get_edges_near_energy(640, width=100, order="descending") assert len(edges) == 16 assert edges == [ - 'F_K', 'Xe_M4', 'Ac_N4', 'Xe_M5', 'In_M3', 'Cd_M2', 'Mn_L2', 'Mn_L3', - 'Ra_N4', 'I_M4', 'V_L1', 'I_M5', 'Cd_M3', 'Ra_N5', 'Fr_N4', 'Ag_M2', - ] + "F_K", + "Xe_M4", + "Ac_N4", + "Xe_M5", + "In_M3", + "Cd_M2", + "Mn_L2", + "Mn_L3", + "Ra_N4", + "I_M4", + "V_L1", + "I_M5", + "Cd_M3", + "Ra_N5", + "Fr_N4", + "Ag_M2", + ] def test_negative_energy_width(): @@ -61,19 +103,19 @@ def test_negative_energy_width(): def test_wrong_ordering(): with pytest.raises(ValueError): - get_edges_near_energy(532, order='random') + get_edges_near_energy(532, order="random") def test_info_one_edge(): - info = get_info_from_edges('O_K') + info = get_info_from_edges("O_K") assert len(info) == 1 def test_info_multiple_edges(): - info = get_info_from_edges(['O_K', 'N_K', 'Cr_L3']) + info = get_info_from_edges(["O_K", "N_K", "Cr_L3"]) assert len(info) == 3 def test_info_wrong_edge_format(): with pytest.raises(ValueError): - get_info_from_edges(['O_K', 'NK']) + get_info_from_edges(["O_K", "NK"]) diff --git a/exspy/test/misc/test_gos.py b/exspy/test/misc/test_gos.py index 63a06131e..3e53fcdf3 100644 --- a/exspy/test/misc/test_gos.py +++ b/exspy/test/misc/test_gos.py @@ -36,31 +36,33 @@ ) -@pytest.mark.skipif(not Path(preferences.EELS.eels_gos_files_path).exists(), - reason="Hartree-Slater GOS not available") +@pytest.mark.skipif( + not Path(preferences.EELS.eels_gos_files_path).exists(), + reason="Hartree-Slater GOS not available", +) def test_hartree_slater_gos(): - gos = HartreeSlaterGOS('Ti_L3') + gos = HartreeSlaterGOS("Ti_L3") gos.read_elements() def test_hydrogenic_gos_error_M_shells(): with pytest.raises(ValueError): - _ = HydrogenicGOS('Ti_M2') + _ = HydrogenicGOS("Ti_M2") def test_element_not_in_database(): with pytest.raises(ValueError): - _ = HydrogenicGOS('Lr_M2') + _ = HydrogenicGOS("Lr_M2") def test_subshell_not_in_database(): with pytest.raises(ValueError): - _ = HydrogenicGOS('Ti_L4') + _ = HydrogenicGOS("Ti_L4") def test_gosh_not_in_conventions(): - gos = GoshGOS('Ti_L2') - gos.subshell = 'L234' + gos = GoshGOS("Ti_L2") + gos.subshell = "L234" with pytest.raises(ValueError): gos.read_gos_data() @@ -68,13 +70,13 @@ def test_gosh_not_in_conventions(): def test_gosh_not_in_file(): # Use version 1.0 which doesn't have the Ac element with pytest.raises(ValueError): - _ = GoshGOS('Ac_L3', gos_file_path=GOSH10) + _ = GoshGOS("Ac_L3", gos_file_path=GOSH10) def test_binding_energy_database(): - gos = GoshGOS('Ti_L3') + gos = GoshGOS("Ti_L3") gosh15 = h5py.File(gos.gos_file_path) for element in gosh15.keys(): # These elements are not in the database - if element not in ['Bk', 'Cf', 'Cm', 'metadata']: - assert 'Binding_energies' in elements[element]['Atomic_properties'].keys() + if element not in ["Bk", "Cf", "Cm", "metadata"]: + assert "Binding_energies" in elements[element]["Atomic_properties"].keys() diff --git a/exspy/test/misc/test_material.py b/exspy/test/misc/test_material.py index 361f8422c..d9d1b7173 100644 --- a/exspy/test/misc/test_material.py +++ b/exspy/test/misc/test_material.py @@ -25,15 +25,17 @@ class TestWeightToFromAtomic: - def setup_method(self, method): # TiO2 self.elements = ("Ti", "O") natoms = (1, 2) self.at = [100 * nat / float(sum(natoms)) for nat in natoms] atomic_weight = np.array( - [elements_db[element].General_properties.atomic_weight for element - in self.elements]) + [ + elements_db[element].General_properties.atomic_weight + for element in self.elements + ] + ) mol_weight = atomic_weight * natoms self.wt = [100 * w / mol_weight.sum() for w in mol_weight] @@ -52,7 +54,8 @@ def test_multi_dim(self): wt = np.array([[[88] * 2] * 3, [[12] * 2] * 3]) at = ex.material.weight_to_atomic(wt, elements) np.testing.assert_allclose( - at[:, 0, 0], np.array([93.196986, 6.803013]), atol=1e-3) + at[:, 0, 0], np.array([93.196986, 6.803013]), atol=1e-3 + ) wt2 = ex.material.atomic_to_weight(at, elements) np.testing.assert_allclose(wt, wt2) @@ -60,58 +63,62 @@ def test_multi_dim(self): def test_density_of_mixture(): # Bronze elements = ("Cu", "Sn") - wt = (88., 12.) + wt = (88.0, 12.0) densities = np.array( - [elements_db[element].Physical_properties.density_gcm3 for element in - elements]) + [elements_db[element].Physical_properties.density_gcm3 for element in elements] + ) volumes = wt * densities - density = volumes.sum() / 100. + density = volumes.sum() / 100.0 np.testing.assert_allclose( - density, ex.material.density_of_mixture(wt, elements, mean='weighted')) + density, ex.material.density_of_mixture(wt, elements, mean="weighted") + ) volumes = wt / densities - density = 100. / volumes.sum() - np.testing.assert_allclose( - density, ex.material.density_of_mixture(wt, elements)) + density = 100.0 / volumes.sum() + np.testing.assert_allclose(density, ex.material.density_of_mixture(wt, elements)) wt = np.array([[[88] * 2] * 3, [[12] * 2] * 3]) np.testing.assert_allclose( - density, ex.material.density_of_mixture(wt, elements)[0, 0]) + density, ex.material.density_of_mixture(wt, elements)[0, 0] + ) # Testing whether the correct exception is raised upon unknown density elements = ("Cu", "Sn", "At") - wt = (87., 12., 1.) + wt = (87.0, 12.0, 1.0) with pytest.raises(ValueError): - ex.material.density_of_mixture(wt,elements) + ex.material.density_of_mixture(wt, elements) def test_mac(): np.testing.assert_allclose( - ex.material.mass_absorption_coefficient('Al', 3.5), 506.0153356472) + ex.material.mass_absorption_coefficient("Al", 3.5), 506.0153356472 + ) np.testing.assert_allclose( - ex.material.mass_absorption_coefficient('Ta', [1, 3.2, 2.3]), - [3343.7083701143229, 1540.0819991890, 3011.264941118]) + ex.material.mass_absorption_coefficient("Ta", [1, 3.2, 2.3]), + [3343.7083701143229, 1540.0819991890, 3011.264941118], + ) np.testing.assert_allclose( - ex.material.mass_absorption_coefficient('Zn', 'Zn_La'), - 1413.291119134) + ex.material.mass_absorption_coefficient("Zn", "Zn_La"), 1413.291119134 + ) np.testing.assert_allclose( - ex.material.mass_absorption_coefficient( - 'Zn', ['Cu_La', 'Nb_La']), [1704.7912903000029, - 1881.2081950943339]) + ex.material.mass_absorption_coefficient("Zn", ["Cu_La", "Nb_La"]), + [1704.7912903000029, 1881.2081950943339], + ) def test_mixture_mac(): - np.testing.assert_allclose(ex.material.mass_absorption_mixture([50, 50], - ['Al', 'Zn'], - 'Al_Ka'), - 2587.4161643905127) + np.testing.assert_allclose( + ex.material.mass_absorption_mixture([50, 50], ["Al", "Zn"], "Al_Ka"), + 2587.4161643905127, + ) elements = ("Cu", "Sn") - lines = [0.5, 'Al_Ka'] - wt = np.array([[[88.] * 2] * 3, [[12.] * 2] * 3]) + lines = [0.5, "Al_Ka"] + wt = np.array([[[88.0] * 2] * 3, [[12.0] * 2] * 3]) np.testing.assert_array_almost_equal( ex.material.mass_absorption_mixture(wt, elements, lines)[:, 0, 0], - np.array([8003.05391481, 4213.4235561])) + np.array([8003.05391481, 4213.4235561]), + ) wt = hs.signals.Signal2D(wt).split() mac = ex.material.mass_absorption_mixture(wt, elements, lines) np.testing.assert_array_almost_equal(mac[0].data[0, 0], 8003.053914) diff --git a/exspy/test/models/test_edsmodel.py b/exspy/test/models/test_edsmodel.py index 6953be3d8..72b90aa94 100644 --- a/exspy/test/models/test_edsmodel.py +++ b/exspy/test/models/test_edsmodel.py @@ -47,7 +47,6 @@ @lazifyTestClass class TestlineFit: - def setup_method(self, method): self.s = s.deepcopy() @@ -55,18 +54,22 @@ def test_fit(self): s = self.s m = s.create_model() m.fit() - np.testing.assert_allclose([i.data for i in - m.get_lines_intensity()], - [[0.5], [0.2], [0.3]], atol=1E-4) + np.testing.assert_allclose( + [i.data for i in m.get_lines_intensity()], [[0.5], [0.2], [0.3]], atol=1e-4 + ) def _check_model_creation(self): s = self.s # Default: m = s.create_model() - assert ( - [c.name for c in m] == - ['background_order_6', 'Cr_Ka', 'Cr_Kb', - 'Fe_Ka', 'Fe_Kb', 'Zn_Ka']) + assert [c.name for c in m] == [ + "background_order_6", + "Cr_Ka", + "Cr_Kb", + "Fe_Ka", + "Fe_Kb", + "Zn_Ka", + ] # No auto componentes: m = s.create_model(False, False) assert [c.name for c in m] == [] @@ -91,10 +94,8 @@ def _check_model_store(self): m.remove(["Cr_Ka", "background_order_6"]) m.store() m1 = s.models.a.restore() - assert ( - [c.name for c in m] == [c.name for c in m1]) - assert ([c.name for c in m.xray_lines] == - [c.name for c in m1.xray_lines]) + assert [c.name for c in m] == [c.name for c in m1] + assert [c.name for c in m.xray_lines] == [c.name for c in m1.xray_lines] assert "Cr_Ka" not in m1.xray_lines assert "background_order_6" not in m1.background_components @@ -114,13 +115,16 @@ def test_calibrate_energy_resolution(self): m = s.create_model() m.fit() m.fit_background() - reso = s.metadata.Acquisition_instrument.TEM.Detector.EDS.\ - energy_resolution_MnKa, + reso = ( + s.metadata.Acquisition_instrument.TEM.Detector.EDS.energy_resolution_MnKa, + ) s.set_microscope_parameters(energy_resolution_MnKa=150) - m.calibrate_energy_axis(calibrate='resolution') + m.calibrate_energy_axis(calibrate="resolution") np.testing.assert_allclose( - s.metadata.Acquisition_instrument.TEM.Detector.EDS. - energy_resolution_MnKa, reso, atol=1) + s.metadata.Acquisition_instrument.TEM.Detector.EDS.energy_resolution_MnKa, + reso, + atol=1, + ) def test_calibrate_energy_scale(self): s = self.s @@ -129,7 +133,7 @@ def test_calibrate_energy_scale(self): ax = s.axes_manager[-1] scale = ax.scale ax.scale += 0.0004 - m.calibrate_energy_axis('scale') + m.calibrate_energy_axis("scale") np.testing.assert_allclose(ax.scale, scale, atol=1e-5) def test_calibrate_energy_offset(self): @@ -139,55 +143,59 @@ def test_calibrate_energy_offset(self): ax = s.axes_manager[-1] offset = ax.offset ax.offset += 0.04 - m.calibrate_energy_axis('offset') + m.calibrate_energy_axis("offset") np.testing.assert_allclose(ax.offset, offset, atol=1e-5) def test_calibrate_xray_energy(self): s = self.s m = s.create_model() m.fit() - m['Fe_Ka'].centre.value = 6.39 + m["Fe_Ka"].centre.value = 6.39 - m.calibrate_xray_lines(calibrate='energy', xray_lines=['Fe_Ka'], - bound=100) + m.calibrate_xray_lines(calibrate="energy", xray_lines=["Fe_Ka"], bound=100) np.testing.assert_allclose( - m['Fe_Ka'].centre.value, elements_db['Fe']['Atomic_properties'][ - 'Xray_lines']['Ka']['energy (keV)'], atol=1e-6) + m["Fe_Ka"].centre.value, + elements_db["Fe"]["Atomic_properties"]["Xray_lines"]["Ka"]["energy (keV)"], + atol=1e-6, + ) def test_calibrate_xray_weight(self): s = self.s s1 = utils_eds.xray_lines_model( - elements=['Co'], + elements=["Co"], weight_percents=[50], - energy_axis={'units': 'keV', 'size': 400, - 'scale': 0.01, 'name': 'E', - 'offset': 4.9}) - s = (s + s1 / 50) + energy_axis={ + "units": "keV", + "size": 400, + "scale": 0.01, + "name": "E", + "offset": 4.9, + }, + ) + s = s + s1 / 50 m = s.create_model() m.fit() with pytest.warns( - UserWarning, - match="X-ray line expected to be in the model was not found" + UserWarning, match="X-ray line expected to be in the model was not found" ): - m.calibrate_xray_lines(calibrate='sub_weight', - xray_lines=['Fe_Ka'], bound=100) + m.calibrate_xray_lines( + calibrate="sub_weight", xray_lines=["Fe_Ka"], bound=100 + ) - np.testing.assert_allclose(0.0347, m['Fe_Kb'].A.value, atol=1e-3) + np.testing.assert_allclose(0.0347, m["Fe_Kb"].A.value, atol=1e-3) def test_calibrate_xray_width(self): s = self.s m = s.create_model() m.fit() - sigma = m['Fe_Ka'].sigma.value - m['Fe_Ka'].sigma.value = 0.065 + sigma = m["Fe_Ka"].sigma.value + m["Fe_Ka"].sigma.value = 0.065 - m.calibrate_xray_lines(calibrate='energy', xray_lines=['Fe_Ka'], - bound=10) + m.calibrate_xray_lines(calibrate="energy", xray_lines=["Fe_Ka"], bound=10) - np.testing.assert_allclose(sigma, m['Fe_Ka'].sigma.value, - atol=1e-2) + np.testing.assert_allclose(sigma, m["Fe_Ka"].sigma.value, atol=1e-2) def test_enable_adjust_position(self): m = self.s.create_model() @@ -197,11 +205,12 @@ def test_enable_adjust_position(self): assert len(list(m._position_widgets.values())[0]) == 2 lbls = [p[1].string for p in m._position_widgets.values()] assert sorted(lbls) == [ - '$\\mathrm{Cr}_{\\mathrm{Ka}}$', - '$\\mathrm{Cr}_{\\mathrm{Kb}}$', - '$\\mathrm{Fe}_{\\mathrm{Ka}}$', - '$\\mathrm{Fe}_{\\mathrm{Kb}}$', - '$\\mathrm{Zn}_{\\mathrm{Ka}}$'] + "$\\mathrm{Cr}_{\\mathrm{Ka}}$", + "$\\mathrm{Cr}_{\\mathrm{Kb}}$", + "$\\mathrm{Fe}_{\\mathrm{Ka}}$", + "$\\mathrm{Fe}_{\\mathrm{Kb}}$", + "$\\mathrm{Zn}_{\\mathrm{Ka}}$", + ] def test_quantification(self): s = self.s @@ -211,73 +220,75 @@ def test_quantification(self): m = s.create_model() m.fit() intensities = m.get_lines_intensity() - quant = s.quantification(intensities, method='CL', - factors=[1.0, 1.0, 1.0], - composition_units='weight') + quant = s.quantification( + intensities, + method="CL", + factors=[1.0, 1.0, 1.0], + composition_units="weight", + ) np.testing.assert_allclose(utils.stack(quant, axis=0), [50, 20, 30]) def test_quantification_2_elements(self): s = self.s m = s.create_model() m.fit() - intensities = m.get_lines_intensity(['Fe_Ka', 'Cr_Ka']) - _ = s.quantification(intensities, method='CL', factors=[1.0, 1.0]) + intensities = m.get_lines_intensity(["Fe_Ka", "Cr_Ka"]) + _ = s.quantification(intensities, method="CL", factors=[1.0, 1.0]) def test_comparison_quantification(): kfactors = [1.450226, 5.075602] # For Fe Ka and Pt La s = exspy.data.EDS_TEM_FePt_nanoparticles() - s.add_elements(['Cu']) # to get good estimation of the background + s.add_elements(["Cu"]) # to get good estimation of the background m = s.create_model(True) m.set_signal_range(5.5, 10.0) # to get good fit m.fit() - intensities_m = m.get_lines_intensity(xray_lines=['Fe_Ka', "Pt_La"]) - quant_model = s.quantification(intensities_m, method='CL', - factors=kfactors) + intensities_m = m.get_lines_intensity(xray_lines=["Fe_Ka", "Pt_La"]) + quant_model = s.quantification(intensities_m, method="CL", factors=kfactors) # Background substracted EDS quantification s2 = exspy.data.EDS_TEM_FePt_nanoparticles() s2.add_lines() bw = s2.estimate_background_windows( - xray_lines=['Fe_Ka', "Pt_La"], line_width=[5.0, 2.0] - ) + xray_lines=["Fe_Ka", "Pt_La"], line_width=[5.0, 2.0] + ) intensities = s2.get_lines_intensity( - xray_lines=['Fe_Ka', "Pt_La"], background_windows=bw - ) - atomic_percent = s2.quantification(intensities, method='CL', - factors=kfactors) + xray_lines=["Fe_Ka", "Pt_La"], background_windows=bw + ) + atomic_percent = s2.quantification(intensities, method="CL", factors=kfactors) - np.testing.assert_allclose([q.data for q in quant_model], - [q.data for q in atomic_percent], - rtol=5E-2) + np.testing.assert_allclose( + [q.data for q in quant_model], [q.data for q in atomic_percent], rtol=5e-2 + ) @lazifyTestClass class TestMaps: - def setup_method(self, method): beam_energy = 200 energy_resolution_MnKa = 130 - energy_axis = {'units': 'keV', 'size': 1200, 'scale': 0.01, - 'name': 'E'} + energy_axis = {"units": "keV", "size": 1200, "scale": 0.01, "name": "E"} s1 = utils_eds.xray_lines_model( - elements=['Fe', 'Cr'], weight_percents=[30, 70], + elements=["Fe", "Cr"], + weight_percents=[30, 70], beam_energy=beam_energy, energy_resolution_MnKa=energy_resolution_MnKa, - energy_axis=energy_axis) + energy_axis=energy_axis, + ) s2 = utils_eds.xray_lines_model( - elements=['Ga', 'As'], weight_percents=[50, 50], + elements=["Ga", "As"], + weight_percents=[50, 50], beam_energy=beam_energy, energy_resolution_MnKa=energy_resolution_MnKa, - energy_axis=energy_axis) + energy_axis=energy_axis, + ) - mix = np.linspace(0., 1., 4).reshape(2, 2) + mix = np.linspace(0.0, 1.0, 4).reshape(2, 2) mix_data = np.tile(s1.data, mix.shape + (1,)) s = s1._deepcopy_with_new_data(mix_data) a = s.axes_manager._axes.pop(0).get_axis_dictionary() - s.axes_manager.create_axes([{'size': mix.shape[0], - 'navigate': True}] * 2 + [a]) + s.axes_manager.create_axes([{"size": mix.shape[0], "navigate": True}] * 2 + [a]) s.add_elements(s2.metadata.Sample.elements) for d, m in zip(s._iterate_signal(), mix.flatten()): @@ -298,7 +309,8 @@ def test_lines_intensity(self): mix = self.mix[x, y] if i in (1, 2) else 1 - self.mix[x, y] w[i, x, y] = ws[i] * mix xray_lines = s._get_lines_from_elements( - s.metadata.Sample.elements, only_lines=('Ka',)) + s.metadata.Sample.elements, only_lines=("Ka",) + ) if s._lazy: s.compute() for fitted, expected in zip(m.get_lines_intensity(xray_lines), w): @@ -310,6 +322,7 @@ def test_lines_intensity(self): m_single_fit.inav[0, 0].fit() for fitted, expected in zip( - m.inav[0, 0].get_lines_intensity(xray_lines), - m_single_fit.inav[0, 0].get_lines_intensity(xray_lines)): + m.inav[0, 0].get_lines_intensity(xray_lines), + m_single_fit.inav[0, 0].get_lines_intensity(xray_lines), + ): np.testing.assert_allclose(fitted, expected, atol=1e-7) diff --git a/exspy/test/models/test_eelsmodel.py b/exspy/test/models/test_eelsmodel.py index 562cce70a..8ebbb2681 100644 --- a/exspy/test/models/test_eelsmodel.py +++ b/exspy/test/models/test_eelsmodel.py @@ -37,11 +37,12 @@ # when the test is lazy. When the test is not lazy, # internal use of np.errstate means the warnings never # appear in the first place. -@pytest.mark.filterwarnings("ignore:invalid value encountered in subtract:RuntimeWarning") +@pytest.mark.filterwarnings( + "ignore:invalid value encountered in subtract:RuntimeWarning" +) @pytest.mark.filterwarnings("ignore:divide by zero encountered in log:RuntimeWarning") @lazifyTestClass class TestCreateEELSModel: - def setup_method(self, method): s = EELSSpectrum(np.zeros(200)) s.set_microscope_parameters(100, 10, 10) @@ -51,6 +52,7 @@ def setup_method(self, method): def test_create_eelsmodel(self): from exspy.models.eelsmodel import EELSModel + assert isinstance(self.s.create_model(), EELSModel) def test_create_eelsmodel_no_md(self): @@ -87,12 +89,14 @@ def test_gos_file(self): def test_auto_add_background_true(self): m = self.s.create_model(auto_background=True) from hyperspy.components1d import PowerLaw + is_pl_instance = [isinstance(c, PowerLaw) for c in m] assert True in is_pl_instance def test_auto_add_edges_false(self): m = self.s.create_model(auto_background=False) from hyperspy.components1d import PowerLaw + is_pl_instance = [isinstance(c, PowerLaw) for c in m] assert not True in is_pl_instance @@ -122,7 +126,6 @@ def test_low_loss_bad_shape(self): @lazifyTestClass class TestEELSModel: - def setup_method(self, method): s = EELSSpectrum(np.ones(200)) s.set_microscope_parameters(100, 10, 10) @@ -133,7 +136,7 @@ def setup_method(self, method): def test_suspend_auto_fsw(self): m = self.m - m["B_K"].fine_structure_width = 140. + m["B_K"].fine_structure_width = 140.0 m.suspend_auto_fine_structure_width() m.enable_fine_structure() m.resolve_fine_structure() @@ -141,11 +144,14 @@ def test_suspend_auto_fsw(self): def test_resume_fsw(self): m = self.m - m["B_K"].fine_structure_width = 140. + m["B_K"].fine_structure_width = 140.0 m.suspend_auto_fine_structure_width() m.resume_auto_fine_structure_width() - window = (m["C_K"].onset_energy.value - - m["B_K"].onset_energy.value - m._preedge_safe_window_width) + window = ( + m["C_K"].onset_energy.value + - m["B_K"].onset_energy.value + - m._preedge_safe_window_width + ) m.enable_fine_structure() m.resolve_fine_structure() assert window == m["B_K"].fine_structure_width @@ -158,13 +164,17 @@ def test_disable_fine_structure(self): assert not self.m.components.B_K.fine_structure_active def test_get_first_ionization_edge_energy_C_B(self): - assert (self.m._get_first_ionization_edge_energy() == - self.m["B_K"].onset_energy.value) + assert ( + self.m._get_first_ionization_edge_energy() + == self.m["B_K"].onset_energy.value + ) def test_get_first_ionization_edge_energy_C(self): self.m["B_K"].active = False - assert (self.m._get_first_ionization_edge_energy() == - self.m["C_K"].onset_energy.value) + assert ( + self.m._get_first_ionization_edge_energy() + == self.m["C_K"].onset_energy.value + ) def test_get_first_ionization_edge_energy_None(self): self.m["B_K"].active = False @@ -172,53 +182,50 @@ def test_get_first_ionization_edge_energy_None(self): assert self.m._get_first_ionization_edge_energy() is None def test_two_area_powerlaw_estimation_BC(self): - self.m.signal.data = 2. * self.m.axis.axis ** (-3) # A= 2, r=3 - #self.m.signal.axes_manager[-1].is_binned = False + self.m.signal.data = 2.0 * self.m.axis.axis ** (-3) # A= 2, r=3 + # self.m.signal.axes_manager[-1].is_binned = False self.m.two_area_background_estimation() np.testing.assert_allclose( - self.m._background_components[0].A.value, - 2.1451237089380295) + self.m._background_components[0].A.value, 2.1451237089380295 + ) np.testing.assert_allclose( - self.m._background_components[0].r.value, - 3.0118980767392736) + self.m._background_components[0].r.value, 3.0118980767392736 + ) def test_two_area_powerlaw_estimation_C(self): self.m["B_K"].active = False - self.m.signal.data = 2. * self.m.axis.axis ** (-3) # A= 2, r=3 - #self.m.signal.axes_manager[-1].is_binned = False + self.m.signal.data = 2.0 * self.m.axis.axis ** (-3) # A= 2, r=3 + # self.m.signal.axes_manager[-1].is_binned = False self.m.two_area_background_estimation() np.testing.assert_allclose( - self.m._background_components[0].A.value, - 2.3978438900878087) + self.m._background_components[0].A.value, 2.3978438900878087 + ) np.testing.assert_allclose( - self.m._background_components[0].r.value, - 3.031884021065014) + self.m._background_components[0].r.value, 3.031884021065014 + ) def test_two_area_powerlaw_estimation_no_edge(self): self.m["B_K"].active = False self.m["C_K"].active = False - self.m.signal.data = 2. * self.m.axis.axis ** (-3) # A= 2, r=3 + self.m.signal.data = 2.0 * self.m.axis.axis ** (-3) # A= 2, r=3 print(self.m.signal.axes_manager[-1].is_binned) - #self.m.signal.axes_manager[-1].is_binned = False + # self.m.signal.axes_manager[-1].is_binned = False self.m.two_area_background_estimation() np.testing.assert_allclose( - self.m._background_components[0].A.value, - 2.6598803469440986) + self.m._background_components[0].A.value, 2.6598803469440986 + ) np.testing.assert_allclose( - self.m._background_components[0].r.value, - 3.0494030409062058) + self.m._background_components[0].r.value, 3.0494030409062058 + ) def test_get_start_energy_none(self): - assert (self.m._get_start_energy() == - 150) + assert self.m._get_start_energy() == 150 def test_get_start_energy_above(self): - assert (self.m._get_start_energy(170) == - 170) + assert self.m._get_start_energy(170) == 170 def test_get_start_energy_below(self): - assert (self.m._get_start_energy(100) == - 150) + assert self.m._get_start_energy(100) == 150 def test_remove_components(self): comp = self.m[1] @@ -265,7 +272,10 @@ def test_quantify(self): with contextlib.redirect_stdout(f): self.m.quantify() out = f.getvalue() - assert out == '\nAbsolute quantification:\nElem.\tIntensity\nB\t1.000000\nC\t1.000000\n' + assert ( + out + == "\nAbsolute quantification:\nElem.\tIntensity\nB\t1.000000\nC\t1.000000\n" + ) def test_enable_edges(self): m = self.m @@ -384,14 +394,13 @@ def test_free_fine_structure(self): @lazifyTestClass class TestEELSModelFitting: - def setup_method(self, method): data = np.zeros(200) data[25:] = 100 s = EELSSpectrum(data) s.set_microscope_parameters(100, 10, 10) s.axes_manager[-1].offset = 150 - s.add_elements(("B", )) + s.add_elements(("B",)) self.m = s.create_model(auto_background=False) @pytest.mark.parametrize("kind", ["std", "smart"]) @@ -424,7 +433,6 @@ def test_fix_edges(self, kind): @lazifyTestClass class TestFitBackground: - def setup_method(self, method): s = EELSSpectrum(np.ones(200)) s.set_microscope_parameters(100, 10, 10) @@ -439,16 +447,14 @@ def setup_method(self, method): def test_fit_background_B_C(self): self.m.fit_background() - np.testing.assert_allclose(self.m["Offset"].offset.value, - 1) + np.testing.assert_allclose(self.m["Offset"].offset.value, 1) assert self.m["B_K"].active assert self.m["C_K"].active def test_fit_background_C(self): self.m["B_K"].active = False self.m.fit_background() - np.testing.assert_allclose(self.m["Offset"].offset.value, - 1.7142857) + np.testing.assert_allclose(self.m["Offset"].offset.value, 1.7142857) assert not self.m["B_K"].active assert self.m["C_K"].active @@ -456,15 +462,13 @@ def test_fit_background_no_edge(self): self.m["B_K"].active = False self.m["C_K"].active = False self.m.fit_background() - np.testing.assert_allclose(self.m["Offset"].offset.value, - 2.14) + np.testing.assert_allclose(self.m["Offset"].offset.value, 2.14) assert not self.m["B_K"].active assert not self.m["C_K"].active @lazifyTestClass class TestFitBackground2D: - def setup_method(self): pl = hs.model.components1D.PowerLaw() data = np.empty((2, 250)) @@ -482,9 +486,9 @@ def test_only_current_false(self): residual = self.s - self.m.as_signal() assert pytest.approx(residual.data) == 0 + @lazifyTestClass class TestEELSFineStructure: - def setup_method(self, method): s = EELSSpectrum(np.zeros((1024))) s.axes_manager[0].units = "eV" @@ -516,11 +520,10 @@ def test_fs_components_inherit_fs_active(self, fine_structure_active): self.m.components.Fe_L3.fine_structure_components.update((self.g1, self.g2)) for component in self.m.components.Fe_L3.fine_structure_components: assert component.active == fine_structure_active - self.m.components.Fe_L3.fine_structure_active = (not fine_structure_active) + self.m.components.Fe_L3.fine_structure_active = not fine_structure_active for component in self.m.components.Fe_L3.fine_structure_components: assert component.active == (not fine_structure_active) - def test_fine_structure_smoothing(self): Fe = self.m.components.Fe_L3 Fe.fine_structure_active = True @@ -558,7 +561,6 @@ def test_free_fix_fine_structure(self): else: assert not parameter.free - def test_fine_structure_active_frees_coeff(self): Fe = self.m.components.Fe_L3 Fe.fine_structure_active = True @@ -590,15 +592,30 @@ def test_fine_structure_spline(self): Fe.fine_structure_spline_active = True Fe.fine_structure_width = 30 onset = Fe.onset_energy.value - axis1 = np.linspace(Fe.onset_energy.value, Fe.onset_energy.value + Fe.fine_structure_width, endpoint=False) + axis1 = np.linspace( + Fe.onset_energy.value, + Fe.onset_energy.value + Fe.fine_structure_width, + endpoint=False, + ) assert np.all(Fe.function(axis1) == 0) - Fe.fine_structure_coeff.value = np.arange(len(Fe.fine_structure_coeff.value)) + 1 + Fe.fine_structure_coeff.value = ( + np.arange(len(Fe.fine_structure_coeff.value)) + 1 + ) assert np.all(Fe.function(axis1) != 0) Fe.fine_structure_spline_onset = 10 - Fe.fine_structure_coeff.value = np.arange(len(Fe.fine_structure_coeff.value)) + 1 - axis2 = np.linspace(Fe.onset_energy.value, Fe.onset_energy.value + Fe.fine_structure_spline_onset, endpoint=False) - axis3 = np.linspace(Fe.onset_energy.value + Fe.fine_structure_spline_onset, - Fe.onset_energy.value + Fe.fine_structure_width, endpoint=False) + Fe.fine_structure_coeff.value = ( + np.arange(len(Fe.fine_structure_coeff.value)) + 1 + ) + axis2 = np.linspace( + Fe.onset_energy.value, + Fe.onset_energy.value + Fe.fine_structure_spline_onset, + endpoint=False, + ) + axis3 = np.linspace( + Fe.onset_energy.value + Fe.fine_structure_spline_onset, + Fe.onset_energy.value + Fe.fine_structure_width, + endpoint=False, + ) assert np.all(Fe.function(axis2) == 0) assert np.all(Fe.function(axis3) != 0) @@ -607,7 +624,9 @@ def test_model_store_restore(self): Fe.fine_structure_active = True Fe.fine_structure_components.update((self.g1, self.g2)) Fe.fine_structure_spline_onset = 20 - Fe.fine_structure_coeff.value = np.arange(len(Fe.fine_structure_coeff.value)) + 1 + Fe.fine_structure_coeff.value = ( + np.arange(len(Fe.fine_structure_coeff.value)) + 1 + ) m = self.m m.store() mc = m.signal.models.a.restore() @@ -660,6 +679,7 @@ def test_jacobian_convolved(self): assert m[1].centre.value == 4 assert m[1].sigma.value == 5 + class TestModelSettingPZero: def setup_method(self, method): s = EELSSpectrum(np.empty(1)) @@ -687,13 +707,11 @@ def test_calculating_convolution_axis(self): np.testing.assert_equal(ll_axis.value2index.call_args[0][0], 0) - @lazifyTestClass class TestConvolveModelSlicing: - def setup_method(self, method): s = EELSSpectrum(np.random.random((10, 10, 600))) - s.axes_manager[-1].offset = -150. + s.axes_manager[-1].offset = -150.0 s.axes_manager[-1].scale = 0.5 m = s.create_model(auto_add_edges=False, auto_background=False) m.low_loss = s + 1 @@ -711,8 +729,8 @@ def test_slicing_low_loss_isig(self): m1 = m.isig[::2] assert m.signal.data.shape == m1.low_loss.data.shape -class TestModelDictionary: +class TestModelDictionary: def setup_method(self, method): s = EELSSpectrum(np.array([1.0, 2, 4, 7, 12, 7, 4, 2, 1])) m = s.create_model(auto_add_edges=False, auto_background=False) @@ -730,7 +748,7 @@ def test_to_dictionary(self): m = self.model d = m.as_dictionary() - np.testing.assert_allclose(m.low_loss.data, d['low_loss']['data']) + np.testing.assert_allclose(m.low_loss.data, d["low_loss"]["data"]) def test_load_dictionary(self): d = self.model.as_dictionary() @@ -739,13 +757,11 @@ def test_load_dictionary(self): mn._load_dictionary(d) mo = self.model - np.testing.assert_allclose( - mn.low_loss.data, mo.low_loss.data - ) + np.testing.assert_allclose(mn.low_loss.data, mo.low_loss.data) for i in range(len(mn)): assert mn[i]._id_name == mo[i]._id_name for po, pn in zip(mo[i].parameters, mn[i].parameters): - np.testing.assert_allclose(po.map['values'], pn.map['values']) - np.testing.assert_allclose(po.map['is_set'], pn.map['is_set']) + np.testing.assert_allclose(po.map["values"], pn.map["values"]) + np.testing.assert_allclose(po.map["is_set"], pn.map["is_set"]) assert mn[0].A.twin is mn[1].A diff --git a/exspy/test/models/test_linear_model.py b/exspy/test/models/test_linear_model.py index 66d2a525f..d510f3b9a 100644 --- a/exspy/test/models/test_linear_model.py +++ b/exspy/test/models/test_linear_model.py @@ -27,48 +27,45 @@ from exspy.signals import EELSSpectrum - @lazifyTestClass class TestLinearEELSFitting: - def setup_method(self, method): ll = exspy.data.EELS_low_loss(navigation_shape=()) cl = exspy.data.EELS_MnFe(add_powerlaw=False, navigation_shape=()) m = cl.create_model(auto_background=False) - m[0].onset_energy.value = 637. + m[0].onset_energy.value = 637.0 m_convolved = cl.create_model(auto_background=False, low_loss=ll) - m_convolved[0].onset_energy.value = 637. + m_convolved[0].onset_energy.value = 637.0 self.ll, self.cl = ll, cl self.m, self.m_convolved = m, m_convolved def test_convolved_and_std_error(self): m = self.m_convolved - m.fit(optimizer='lstsq') + m.fit(optimizer="lstsq") linear = m.as_signal() std_linear = m.p_std - m.fit(optimizer='lm') + m.fit(optimizer="lm") lm = m.as_signal() std_lm = m.p_std diff = linear - lm - np.testing.assert_allclose(diff.data.sum(), 0.0, atol=5E-6) + np.testing.assert_allclose(diff.data.sum(), 0.0, atol=5e-6) np.testing.assert_allclose(std_linear, std_lm) def test_nonconvolved(self): m = self.m - m.fit(optimizer='lstsq') + m.fit(optimizer="lstsq") linear = m.as_signal() - m.fit(optimizer='lm') + m.fit(optimizer="lm") lm = m.as_signal() diff = linear - lm - np.testing.assert_allclose(diff.data.sum(), 0.0, atol=5E-6) + np.testing.assert_allclose(diff.data.sum(), 0.0, atol=5e-6) class TestTwinnedComponents: - def setup_method(self): s = exspy.data.EDS_SEM_TM002() m = s.create_model() - m2 = s.isig[5.:15.].create_model() + m2 = s.isig[5.0:15.0].create_model() self.m = m self.m2 = m2 @@ -80,15 +77,14 @@ def test_fixed_chained_twinned_components(self): m[4].A.free = False m.fit(optimizer="lstsq") B = m.as_signal() - np.testing.assert_allclose(A.data, B.data, rtol=5E-5) + np.testing.assert_allclose(A.data, B.data, rtol=5e-5) def test_fit_fixed_twinned_components_and_std(self): m = self.m2 m[1].A.free = False - m.fit(optimizer='lstsq') + m.fit(optimizer="lstsq") lstsq_fit = m.as_signal() - nonlinear_parameters = [p for c in m for p in c.parameters - if not p._linear] + nonlinear_parameters = [p for c in m for p in c.parameters if not p._linear] linear_std = [para.std for para in nonlinear_parameters if para.std] m.fit() @@ -98,8 +94,8 @@ def test_fit_fixed_twinned_components_and_std(self): np.testing.assert_allclose(nonlinear_fit.data, lstsq_fit.data) np.testing.assert_allclose(nonlinear_std, linear_std) -class TestWarningSlowMultifit: +class TestWarningSlowMultifit: def setup_method(self, method): s = hs.datasets.two_gaussians().inav[0] s.set_signal_type("EELS") @@ -110,9 +106,9 @@ def setup_method(self, method): # make dummy twinning g2.centre.twin = g1.centre - g2.centre.twin_function_expr = '15 + x' + g2.centre.twin_function_expr = "15 + x" g2.A.twin = g1.A - g2.centre.twin_function_expr = '2 * x' + g2.centre.twin_function_expr = "2 * x" m.set_parameters_not_free(only_nonlinear=True) @@ -124,11 +120,11 @@ def test_convolved(self): m.low_loss = s2 m.convolved = True with pytest.warns(UserWarning, match="convolution is not supported"): - m.multifit(optimizer='lstsq') + m.multifit(optimizer="lstsq") -@pytest.mark.parametrize('multiple_free_parameters', (True, False)) -@pytest.mark.parametrize('nav_dim', (0, 1, 2)) +@pytest.mark.parametrize("multiple_free_parameters", (True, False)) +@pytest.mark.parametrize("nav_dim", (0, 1, 2)) def test_expression_convolved(nav_dim, multiple_free_parameters): s_ref = EELSSpectrum(np.ones(100)) @@ -145,11 +141,11 @@ def test_expression_convolved(nav_dim, multiple_free_parameters): s = m_ref.as_signal() if nav_dim >= 1: - s = hs.stack([s]*2) - to_convolve = hs.stack([to_convolve]*2) + s = hs.stack([s] * 2) + to_convolve = hs.stack([to_convolve] * 2) if nav_dim == 2: - s = hs.stack([s]*3) - to_convolve = hs.stack([to_convolve]*3) + s = hs.stack([s] * 3) + to_convolve = hs.stack([to_convolve] * 3) m = s.create_model(auto_add_edges=False, auto_background=False) l = Lorentzian(centre=20, gamma=4) @@ -159,16 +155,16 @@ def test_expression_convolved(nav_dim, multiple_free_parameters): assert m.convolved m.set_parameters_not_free(only_nonlinear=True) with pytest.warns(UserWarning): - m.multifit(optimizer='lstsq') + m.multifit(optimizer="lstsq") np.testing.assert_allclose(l_ref.A.value, l.A.value) np.testing.assert_allclose(l_ref.centre.value, l.centre.value) np.testing.assert_allclose(l_ref.gamma.value, l.gamma.value) np.testing.assert_allclose(m.as_signal().data, s.data) if nav_dim in (1, 2): - np.testing.assert_allclose(l.A.map['values'].mean(), l_ref.A.value) - np.testing.assert_allclose(l.centre.map['values'].mean(), l_ref.centre.value) - np.testing.assert_allclose(l.gamma.map['values'].mean(), l_ref.gamma.value) + np.testing.assert_allclose(l.A.map["values"].mean(), l_ref.A.value) + np.testing.assert_allclose(l.centre.map["values"].mean(), l_ref.centre.value) + np.testing.assert_allclose(l.gamma.map["values"].mean(), l_ref.gamma.value) @pytest.mark.parametrize("nav_dim", (0, 1, 2)) @@ -194,13 +190,13 @@ def test_expression_multiple_linear_parameter(nav_dim, convolve): s = m_ref.as_signal() if nav_dim >= 1: - s = hs.stack([s]*2) + s = hs.stack([s] * 2) if convolve: - to_convolve = hs.stack([to_convolve]*2) + to_convolve = hs.stack([to_convolve] * 2) if nav_dim == 2: - s = hs.stack([s]*3) + s = hs.stack([s] * 3) if convolve: - to_convolve = hs.stack([to_convolve]*3) + to_convolve = hs.stack([to_convolve] * 3) m = s.create_model(auto_add_edges=False, auto_background=False) p = hs.model.components1D.Polynomial(order=2) @@ -209,21 +205,21 @@ def test_expression_multiple_linear_parameter(nav_dim, convolve): if convolve: m.low_loss = to_convolve with pytest.warns(UserWarning): - m.multifit(optimizer='lstsq') + m.multifit(optimizer="lstsq") else: - m.multifit(optimizer='lstsq') + m.multifit(optimizer="lstsq") np.testing.assert_allclose(p_ref.a0.value, p.a0.value) np.testing.assert_allclose(p_ref.a1.value, p.a1.value) np.testing.assert_allclose(p_ref.a2.value, p.a2.value) np.testing.assert_allclose(m.as_signal().data, s.data) if nav_dim >= 1: - np.testing.assert_allclose(p.a0.map['values'].mean(), p_ref.a0.value) - np.testing.assert_allclose(p.a1.map['values'].mean(), p_ref.a1.value) - np.testing.assert_allclose(p.a2.map['values'].mean(), p_ref.a2.value) + np.testing.assert_allclose(p.a0.map["values"].mean(), p_ref.a0.value) + np.testing.assert_allclose(p.a1.map["values"].mean(), p_ref.a1.value) + np.testing.assert_allclose(p.a2.map["values"].mean(), p_ref.a2.value) -@pytest.mark.parametrize('nav_dim', (0, 1, 2)) +@pytest.mark.parametrize("nav_dim", (0, 1, 2)) def test_multiple_linear_parameters_convolution(nav_dim): s_ref = EELSSpectrum(np.ones(1000)) @@ -241,11 +237,11 @@ def test_multiple_linear_parameters_convolution(nav_dim): s = m_ref.as_signal() if nav_dim >= 1: - s = hs.stack([s]*2) - to_convolve = hs.stack([to_convolve]*2) + s = hs.stack([s] * 2) + to_convolve = hs.stack([to_convolve] * 2) if nav_dim == 2: - s = hs.stack([s]*3) - to_convolve = hs.stack([to_convolve]*3) + s = hs.stack([s] * 3) + to_convolve = hs.stack([to_convolve] * 3) m = s.create_model(auto_add_edges=False, auto_background=False) l1 = Lorentzian(centre=200, gamma=10) @@ -256,7 +252,7 @@ def test_multiple_linear_parameters_convolution(nav_dim): assert m.convolved m.set_parameters_not_free(only_nonlinear=True) with pytest.warns(UserWarning): - m.multifit(optimizer='lstsq') + m.multifit(optimizer="lstsq") np.testing.assert_allclose(l_ref1.A.value, l1.A.value) np.testing.assert_allclose(l_ref1.centre.value, l1.centre.value) @@ -266,9 +262,9 @@ def test_multiple_linear_parameters_convolution(nav_dim): np.testing.assert_allclose(l_ref2.gamma.value, l2.gamma.value) np.testing.assert_allclose(m.as_signal().data, s.data) if nav_dim >= 1: - np.testing.assert_allclose(l1.A.map['values'].mean(), l_ref1.A.value) - np.testing.assert_allclose(l1.centre.map['values'].mean(), l_ref1.centre.value) - np.testing.assert_allclose(l1.gamma.map['values'].mean(), l_ref1.gamma.value) - np.testing.assert_allclose(l2.A.map['values'].mean(), l_ref2.A.value) - np.testing.assert_allclose(l2.centre.map['values'].mean(), l_ref2.centre.value) - np.testing.assert_allclose(l2.gamma.map['values'].mean(), l_ref2.gamma.value) + np.testing.assert_allclose(l1.A.map["values"].mean(), l_ref1.A.value) + np.testing.assert_allclose(l1.centre.map["values"].mean(), l_ref1.centre.value) + np.testing.assert_allclose(l1.gamma.map["values"].mean(), l_ref1.gamma.value) + np.testing.assert_allclose(l2.A.map["values"].mean(), l_ref2.A.value) + np.testing.assert_allclose(l2.centre.map["values"].mean(), l_ref2.centre.value) + np.testing.assert_allclose(l2.gamma.map["values"].mean(), l_ref2.gamma.value) diff --git a/exspy/test/signals/test_assign_subclass.py b/exspy/test/signals/test_assign_subclass.py index 4182fd06e..2d48d2b31 100644 --- a/exspy/test/signals/test_assign_subclass.py +++ b/exspy/test/signals/test_assign_subclass.py @@ -1,4 +1,3 @@ - import numpy as np import pytest @@ -10,7 +9,6 @@ class TestConvertSigna: - def setup_method(self, method): self.s = hs.signals.Signal1D([0, 1]) @@ -45,7 +43,6 @@ def test_error_None(self): class TestConvertComplexSignal1D: - def setup_method(self, method): self.s = hs.signals.ComplexSignal1D([0, 1]) @@ -75,7 +72,6 @@ def test_complex_to_dielectric_function(): @lazifyTestClass class Test1d: - def setup_method(self, method): self.s = hs.signals.BaseSignal([0, 1, 2]) @@ -92,7 +88,6 @@ def test_set_EELS(self): @lazifyTestClass class Test2d: - def setup_method(self, method): self.s = hs.signals.BaseSignal(np.random.random((2, 3))) # (|3, 2) @@ -106,8 +101,10 @@ def test_s2EELS2im2s(self): assert s.metadata.Signal.signal_type == "EELS" if s._lazy: from exspy.signals import LazyEELSSpectrum + _class = LazyEELSSpectrum else: from exspy.signals import EELSSpectrum + _class = EELSSpectrum assert isinstance(s, _class) diff --git a/exspy/test/signals/test_binned.py b/exspy/test/signals/test_binned.py index adaa10d95..6d597c8b5 100644 --- a/exspy/test/signals/test_binned.py +++ b/exspy/test/signals/test_binned.py @@ -45,4 +45,3 @@ def test_eds_tem_binned_default(): def test_eds_sem_binned_default(): s = exspy.signals.EDSSEMSpectrum([0]) assert s.axes_manager[-1].is_binned - diff --git a/exspy/test/signals/test_edges_range.py b/exspy/test/signals/test_edges_range.py index 05dd8d315..6c89032f1 100644 --- a/exspy/test/signals/test_edges_range.py +++ b/exspy/test/signals/test_edges_range.py @@ -38,68 +38,324 @@ def setup_method(self, method): self.er = er def test_init(self): - edges_all = np.array([ - 'Ag_M2', 'Ra_N5', 'Fr_N4', 'Cr_L2', 'Cd_M3', 'Te_M4', 'I_M5', - 'Fr_N5', 'Cr_L3', 'Te_M5', 'V_L1', 'Ag_M3', 'I_M4', 'Rn_N4', - 'Ti_L1', 'Ra_N4', 'Mn_L3', 'Pd_M2', 'Cd_M2', 'Mn_L2', 'Tc_M1', - 'Sb_M4', 'In_M3', 'At_N5', 'O_K', 'Pd_M3', 'Sb_M5', 'Xe_M5', - 'Ac_N4', 'Rh_M2', 'V_L2', 'F_K', 'Xe_M4', 'V_L3', 'Cr_L1', 'Sc_L1', - 'In_M2', 'Rh_M3', 'Sn_M4', 'Pa_N5', 'Fe_L3', 'Sn_M3', 'Sn_M5', - 'Ru_M2', 'Fe_L2', 'Cs_M5', 'Ti_L2', 'Ru_M3', 'Cs_M4', 'At_N3', - 'Pa_N4', 'Ti_L3', 'In_M4', 'Pu_N6', 'Tc_M2', 'Sn_M2', 'In_M5', - 'Ca_L1', 'Sb_M3', 'Pu_N7', 'Rn_N3', 'Mn_L1', 'Np_N5', 'Tc_M3', - 'Co_L3', 'Ba_M5', 'Np_N6', 'Cd_M4', 'Mo_M2', 'Sc_L2', 'Co_L2', - 'Cd_M5', 'Np_N7', 'Ba_M4', 'Sc_L3', 'N_K' + edges_all = np.array( + [ + "Ag_M2", + "Ra_N5", + "Fr_N4", + "Cr_L2", + "Cd_M3", + "Te_M4", + "I_M5", + "Fr_N5", + "Cr_L3", + "Te_M5", + "V_L1", + "Ag_M3", + "I_M4", + "Rn_N4", + "Ti_L1", + "Ra_N4", + "Mn_L3", + "Pd_M2", + "Cd_M2", + "Mn_L2", + "Tc_M1", + "Sb_M4", + "In_M3", + "At_N5", + "O_K", + "Pd_M3", + "Sb_M5", + "Xe_M5", + "Ac_N4", + "Rh_M2", + "V_L2", + "F_K", + "Xe_M4", + "V_L3", + "Cr_L1", + "Sc_L1", + "In_M2", + "Rh_M3", + "Sn_M4", + "Pa_N5", + "Fe_L3", + "Sn_M3", + "Sn_M5", + "Ru_M2", + "Fe_L2", + "Cs_M5", + "Ti_L2", + "Ru_M3", + "Cs_M4", + "At_N3", + "Pa_N4", + "Ti_L3", + "In_M4", + "Pu_N6", + "Tc_M2", + "Sn_M2", + "In_M5", + "Ca_L1", + "Sb_M3", + "Pu_N7", + "Rn_N3", + "Mn_L1", + "Np_N5", + "Tc_M3", + "Co_L3", + "Ba_M5", + "Np_N6", + "Cd_M4", + "Mo_M2", + "Sc_L2", + "Co_L2", + "Cd_M5", + "Np_N7", + "Ba_M4", + "Sc_L3", + "N_K", ] ) - energy_all = np.array([ - 602., 603., 603., 584., 616., 582., 620., 577., 575., 572., 628., - 571., 631., 567., 564., 636., 640., 559., 651., 651., 544., 537., - 664., 533., 532., 531., 528., 672., 675., 521., 521., 685., 685., - 513., 695., 500., 702., 496., 494., 708., 708., 714., 485., 483., - 721., 726., 462., 461., 740., 740., 743., 456., 451., 446., 445., - 756., 443., 438., 766., 432., 768., 769., 770., 425., 779., 781., - 415., 411., 410., 407., 794., 404., 404., 796., 402., 401. + energy_all = np.array( + [ + 602.0, + 603.0, + 603.0, + 584.0, + 616.0, + 582.0, + 620.0, + 577.0, + 575.0, + 572.0, + 628.0, + 571.0, + 631.0, + 567.0, + 564.0, + 636.0, + 640.0, + 559.0, + 651.0, + 651.0, + 544.0, + 537.0, + 664.0, + 533.0, + 532.0, + 531.0, + 528.0, + 672.0, + 675.0, + 521.0, + 521.0, + 685.0, + 685.0, + 513.0, + 695.0, + 500.0, + 702.0, + 496.0, + 494.0, + 708.0, + 708.0, + 714.0, + 485.0, + 483.0, + 721.0, + 726.0, + 462.0, + 461.0, + 740.0, + 740.0, + 743.0, + 456.0, + 451.0, + 446.0, + 445.0, + 756.0, + 443.0, + 438.0, + 766.0, + 432.0, + 768.0, + 769.0, + 770.0, + 425.0, + 779.0, + 781.0, + 415.0, + 411.0, + 410.0, + 407.0, + 794.0, + 404.0, + 404.0, + 796.0, + 402.0, + 401.0, ] ) - relevance_all = np.array([ - 'Minor', 'Minor', 'Minor', 'Major', 'Minor', 'Major', 'Major', - 'Minor', 'Major', 'Major', 'Minor', 'Minor', 'Major', 'Minor', - 'Minor', 'Minor', 'Major', 'Minor', 'Minor', 'Major', 'Minor', - 'Major', 'Minor', 'Minor', 'Major', 'Minor', 'Major', 'Major', - 'Minor', 'Minor', 'Major', 'Major', 'Major', 'Major', 'Minor', - 'Minor', 'Minor', 'Minor', 'Major', 'Minor', 'Major', 'Minor', - 'Major', 'Minor', 'Major', 'Major', 'Major', 'Minor', 'Major', - 'Minor', 'Minor', 'Major', 'Major', 'Major', 'Minor', 'Minor', - 'Major', 'Minor', 'Minor', 'Major', 'Minor', 'Minor', 'Minor', - 'Minor', 'Major', 'Major', 'Major', 'Major', 'Minor', 'Major', - 'Major', 'Major', 'Major', 'Major', 'Major', 'Major' + relevance_all = np.array( + [ + "Minor", + "Minor", + "Minor", + "Major", + "Minor", + "Major", + "Major", + "Minor", + "Major", + "Major", + "Minor", + "Minor", + "Major", + "Minor", + "Minor", + "Minor", + "Major", + "Minor", + "Minor", + "Major", + "Minor", + "Major", + "Minor", + "Minor", + "Major", + "Minor", + "Major", + "Major", + "Minor", + "Minor", + "Major", + "Major", + "Major", + "Major", + "Minor", + "Minor", + "Minor", + "Minor", + "Major", + "Minor", + "Major", + "Minor", + "Major", + "Minor", + "Major", + "Major", + "Major", + "Minor", + "Major", + "Minor", + "Minor", + "Major", + "Major", + "Major", + "Minor", + "Minor", + "Major", + "Minor", + "Minor", + "Major", + "Minor", + "Minor", + "Minor", + "Minor", + "Major", + "Major", + "Major", + "Major", + "Minor", + "Major", + "Major", + "Major", + "Major", + "Major", + "Major", + "Major", ] ) - description_all = np.array([ - 'Delayed maximum', '', '', 'Sharp peak. Delayed maximum', - 'Delayed maximum', 'Delayed maximum', 'Delayed maximum', '', - 'Sharp peak. Delayed maximum', 'Delayed maximum', 'Abrupt onset', - 'Delayed maximum', 'Delayed maximum', '', 'Abrupt onset', '', - 'Sharp peak. Delayed maximum', '', '', - 'Sharp peak. Delayed maximum', 'Abrupt onset', 'Delayed maximum', - 'Delayed maximum', '', 'Abrupt onset', '', 'Delayed maximum', - 'Delayed maximum', '', 'Sharp peak', 'Sharp peak. Delayed maximum', - 'Abrupt onset', 'Delayed maximum', 'Sharp peak. Delayed maximum', - 'Abrupt onset', 'Abrupt onset', '', 'Sharp peak', - 'Delayed maximum', '', 'Sharp peak. Delayed maximum', - 'Delayed maximum', 'Delayed maximum', 'Sharp peak', - 'Sharp peak. Delayed maximum', 'Sharp peak. Delayed maximum', - 'Sharp peak. Delayed maximum', 'Sharp peak', - 'Sharp peak. Delayed maximum', '', '', - 'Sharp peak. Delayed maximum', 'Delayed maximum', '', - 'Sharp peak. Delayed maximum', '', 'Delayed maximum', - 'Abrupt onset', 'Delayed maximum', '', '', 'Abrupt onset', '', - 'Sharp peak. Delayed maximum', 'Sharp peak. Delayed maximum', - 'Sharp peak. Delayed maximum', '', 'Delayed maximum', 'Sharp peak', - 'Sharp peak. Delayed maximum', 'Sharp peak. Delayed maximum', - 'Delayed maximum', '', 'Sharp peak. Delayed maximum', - 'Sharp peak. Delayed maximum', 'Abrupt onset' + description_all = np.array( + [ + "Delayed maximum", + "", + "", + "Sharp peak. Delayed maximum", + "Delayed maximum", + "Delayed maximum", + "Delayed maximum", + "", + "Sharp peak. Delayed maximum", + "Delayed maximum", + "Abrupt onset", + "Delayed maximum", + "Delayed maximum", + "", + "Abrupt onset", + "", + "Sharp peak. Delayed maximum", + "", + "", + "Sharp peak. Delayed maximum", + "Abrupt onset", + "Delayed maximum", + "Delayed maximum", + "", + "Abrupt onset", + "", + "Delayed maximum", + "Delayed maximum", + "", + "Sharp peak", + "Sharp peak. Delayed maximum", + "Abrupt onset", + "Delayed maximum", + "Sharp peak. Delayed maximum", + "Abrupt onset", + "Abrupt onset", + "", + "Sharp peak", + "Delayed maximum", + "", + "Sharp peak. Delayed maximum", + "Delayed maximum", + "Delayed maximum", + "Sharp peak", + "Sharp peak. Delayed maximum", + "Sharp peak. Delayed maximum", + "Sharp peak. Delayed maximum", + "Sharp peak", + "Sharp peak. Delayed maximum", + "", + "", + "Sharp peak. Delayed maximum", + "Delayed maximum", + "", + "Sharp peak. Delayed maximum", + "", + "Delayed maximum", + "Abrupt onset", + "Delayed maximum", + "", + "", + "Abrupt onset", + "", + "Sharp peak. Delayed maximum", + "Sharp peak. Delayed maximum", + "Sharp peak. Delayed maximum", + "", + "Delayed maximum", + "Sharp peak", + "Sharp peak. Delayed maximum", + "Sharp peak. Delayed maximum", + "Delayed maximum", + "", + "Sharp peak. Delayed maximum", + "Sharp peak. Delayed maximum", + "Abrupt onset", ] ) @@ -115,28 +371,48 @@ def test_selected_span_selector(self): edges, energy, relevance, description = self.er.update_table() assert set(edges) == set( - ('Tc_M1', 'Sb_M4', 'At_N5', 'O_K', 'Pd_M3', 'Sb_M5', 'Rh_M2', - 'V_L2', 'V_L3', 'Sc_L1') + ( + "Tc_M1", + "Sb_M4", + "At_N5", + "O_K", + "Pd_M3", + "Sb_M5", + "Rh_M2", + "V_L2", + "V_L3", + "Sc_L1", + ) ) assert set(energy) == set( (544.0, 537.0, 533.0, 532.0, 531.0, 528.0, 521.0, 521.0, 513.0, 500.0) ) assert set(relevance) == set( - ('Minor', 'Major', 'Minor', 'Major', 'Minor', 'Major', 'Minor', - 'Major', 'Major', 'Minor') + ( + "Minor", + "Major", + "Minor", + "Major", + "Minor", + "Major", + "Minor", + "Major", + "Major", + "Minor", + ) ) assert set(description) == set( ( - 'Abrupt onset', - 'Delayed maximum', - '', - 'Abrupt onset', - '', - 'Delayed maximum', - 'Sharp peak', - 'Sharp peak. Delayed maximum', - 'Sharp peak. Delayed maximum', - 'Abrupt onset' + "Abrupt onset", + "Delayed maximum", + "", + "Abrupt onset", + "", + "Delayed maximum", + "Sharp peak", + "Sharp peak. Delayed maximum", + "Sharp peak. Delayed maximum", + "Abrupt onset", ) ) diff --git a/exspy/test/signals/test_eds_sem.py b/exspy/test/signals/test_eds_sem.py index af792b897..c177c72e8 100644 --- a/exspy/test/signals/test_eds_sem.py +++ b/exspy/test/signals/test_eds_sem.py @@ -32,7 +32,6 @@ @lazifyTestClass class Test_metadata: - def setup_method(self, method): # Create an empty spectrum s = EDSSEMSpectrum(np.ones((4, 2, 1024))) @@ -51,25 +50,28 @@ def test_sum_live_time(self): old_metadata = s.metadata.deepcopy() sSum = s.sum(0) assert ( - sSum.metadata.Acquisition_instrument.SEM.Detector.EDS.live_time == - s.metadata.Acquisition_instrument.SEM.Detector.EDS.live_time * 2) + sSum.metadata.Acquisition_instrument.SEM.Detector.EDS.live_time + == s.metadata.Acquisition_instrument.SEM.Detector.EDS.live_time * 2 + ) # Check that metadata is unchanged - print(old_metadata, s.metadata) # Capture for comparison on error - assert (old_metadata.as_dictionary() == - s.metadata.as_dictionary()), "Source metadata changed" + print(old_metadata, s.metadata) # Capture for comparison on error + assert ( + old_metadata.as_dictionary() == s.metadata.as_dictionary() + ), "Source metadata changed" def test_sum_live_time2(self): s = self.signal old_metadata = s.metadata.deepcopy() sSum = s.sum((0, 1)) assert ( - sSum.metadata.Acquisition_instrument.SEM.Detector.EDS.live_time == - s.metadata.Acquisition_instrument.SEM.Detector.EDS.live_time * - 2 * 4) + sSum.metadata.Acquisition_instrument.SEM.Detector.EDS.live_time + == s.metadata.Acquisition_instrument.SEM.Detector.EDS.live_time * 2 * 4 + ) # Check that metadata is unchanged - print(old_metadata, s.metadata) # Capture for comparison on error - assert (old_metadata.as_dictionary() == - s.metadata.as_dictionary()), "Source metadata changed" + print(old_metadata, s.metadata) # Capture for comparison on error + assert ( + old_metadata.as_dictionary() == s.metadata.as_dictionary() + ), "Source metadata changed" def test_sum_live_time_out_arg(self): s = self.signal @@ -79,8 +81,9 @@ def test_sum_live_time_out_arg(self): r = s.sum(0, out=sSum) assert r is None assert ( - s_resum.metadata.Acquisition_instrument.SEM.Detector.EDS.live_time == - s.metadata.Acquisition_instrument.SEM.Detector.EDS.live_time * 2) + s_resum.metadata.Acquisition_instrument.SEM.Detector.EDS.live_time + == s.metadata.Acquisition_instrument.SEM.Detector.EDS.live_time * 2 + ) np.testing.assert_allclose(s_resum.data, sSum.data) def test_rebin_live_time(self): @@ -88,25 +91,28 @@ def test_rebin_live_time(self): old_metadata = s.metadata.deepcopy() s = s.rebin(scale=[2, 2, 1]) assert ( - s.metadata.Acquisition_instrument.SEM.Detector.EDS.live_time == - 3.1 * - 2 * - 2) + s.metadata.Acquisition_instrument.SEM.Detector.EDS.live_time == 3.1 * 2 * 2 + ) # Check that metadata is unchanged - print(old_metadata, self.signal.metadata) # Captured on error - assert (old_metadata.as_dictionary() == - self.signal.metadata.as_dictionary()), "Source metadata changed" + print(old_metadata, self.signal.metadata) # Captured on error + assert ( + old_metadata.as_dictionary() == self.signal.metadata.as_dictionary() + ), "Source metadata changed" def test_add_elements(self): s = self.signal - s.add_elements(['Al', 'Ni']) - assert s.metadata.Sample.elements == ['Al', 'Ni'] - s.add_elements(['Al', 'Ni']) - assert s.metadata.Sample.elements == ['Al', 'Ni'] - s.add_elements(["Fe", ]) - assert s.metadata.Sample.elements == ['Al', "Fe", 'Ni'] - s.set_elements(['Al', 'Ni']) - assert s.metadata.Sample.elements == ['Al', 'Ni'] + s.add_elements(["Al", "Ni"]) + assert s.metadata.Sample.elements == ["Al", "Ni"] + s.add_elements(["Al", "Ni"]) + assert s.metadata.Sample.elements == ["Al", "Ni"] + s.add_elements( + [ + "Fe", + ] + ) + assert s.metadata.Sample.elements == ["Al", "Fe", "Ni"] + s.set_elements(["Al", "Ni"]) + assert s.metadata.Sample.elements == ["Al", "Ni"] def test_add_lines(self): s = self.signal @@ -116,69 +122,76 @@ def test_add_lines(self): assert s.metadata.Sample.xray_lines == ["Fe_Ln"] s.add_lines(("Fe_Ln",)) assert s.metadata.Sample.xray_lines == ["Fe_Ln"] - s.add_elements(["Ti", ]) + s.add_elements( + [ + "Ti", + ] + ) s.add_lines(()) - assert ( - s.metadata.Sample.xray_lines == ['Fe_Ln', 'Ti_La']) + assert s.metadata.Sample.xray_lines == ["Fe_Ln", "Ti_La"] s.set_lines((), only_one=False, only_lines=False) - assert (s.metadata.Sample.xray_lines == - ['Fe_La', 'Fe_Lb3', 'Fe_Ll', 'Fe_Ln', 'Ti_La', - 'Ti_Lb3', 'Ti_Ll', 'Ti_Ln']) + assert s.metadata.Sample.xray_lines == [ + "Fe_La", + "Fe_Lb3", + "Fe_Ll", + "Fe_Ln", + "Ti_La", + "Ti_Lb3", + "Ti_Ll", + "Ti_Ln", + ] s.metadata.Acquisition_instrument.SEM.beam_energy = 0.4 s.set_lines((), only_one=False, only_lines=False) - assert s.metadata.Sample.xray_lines == ['Ti_Ll'] + assert s.metadata.Sample.xray_lines == ["Ti_Ll"] def test_add_lines_warning(self): s = self.signal with pytest.warns(UserWarning): - s.add_lines(('Fe_Ka',)) + s.add_lines(("Fe_Ka",)) def test_add_lines_auto(self): s = self.signal s.axes_manager.signal_axes[0].scale = 1e-2 s.set_elements(["Ti", "Al"]) - s.set_lines(['Al_Ka']) - assert ( - s.metadata.Sample.xray_lines == ['Al_Ka', 'Ti_Ka']) + s.set_lines(["Al_Ka"]) + assert s.metadata.Sample.xray_lines == ["Al_Ka", "Ti_Ka"] del s.metadata.Sample.xray_lines - s.set_elements(['Al', 'Ni']) + s.set_elements(["Al", "Ni"]) s.add_lines() - assert ( - s.metadata.Sample.xray_lines == ['Al_Ka', 'Ni_Ka']) + assert s.metadata.Sample.xray_lines == ["Al_Ka", "Ni_Ka"] s.metadata.Acquisition_instrument.SEM.beam_energy = 10.0 s.set_lines([]) - assert ( - s.metadata.Sample.xray_lines == ['Al_Ka', 'Ni_La']) + assert s.metadata.Sample.xray_lines == ["Al_Ka", "Ni_La"] s.metadata.Acquisition_instrument.SEM.beam_energy = 200 - s.set_elements(['Au', 'Ni']) + s.set_elements(["Au", "Ni"]) s.set_lines([]) - assert (s.metadata.Sample.xray_lines == - ['Au_La', 'Ni_Ka']) + assert s.metadata.Sample.xray_lines == ["Au_La", "Ni_Ka"] def test_default_param(self): s = self.signal mp = s.metadata assert ( - mp.Acquisition_instrument.SEM.Detector.EDS.energy_resolution_MnKa == - preferences.EDS.eds_mn_ka) + mp.Acquisition_instrument.SEM.Detector.EDS.energy_resolution_MnKa + == preferences.EDS.eds_mn_ka + ) def test_SEM_to_TEM(self): s = self.signal.inav[0, 0] - signal_type = 'EDS_TEM' + signal_type = "EDS_TEM" mp = s.metadata - mp.Acquisition_instrument.SEM.Detector.EDS.energy_resolution_MnKa = \ - 125.3 + mp.Acquisition_instrument.SEM.Detector.EDS.energy_resolution_MnKa = 125.3 sTEM = s.deepcopy() sTEM.set_signal_type(signal_type) mpTEM = sTEM.metadata results = [ mp.Acquisition_instrument.SEM.Detector.EDS.energy_resolution_MnKa, - signal_type] + signal_type, + ] resultsTEM = [ - (mpTEM.Acquisition_instrument.TEM.Detector.EDS. - energy_resolution_MnKa), - mpTEM.Signal.signal_type] + (mpTEM.Acquisition_instrument.TEM.Detector.EDS.energy_resolution_MnKa), + mpTEM.Signal.signal_type, + ] assert results == resultsTEM def test_get_calibration_from(self): @@ -218,16 +231,14 @@ def test_take_off_angle_elevation(self): s.get_take_off_angle() - @lazifyTestClass class Test_get_lines_intensity: - def setup_method(self, method): # Create an empty spectrum s = EDSSEMSpectrum(np.zeros((2, 2, 3, 100))) energy_axis = s.axes_manager.signal_axes[0] energy_axis.scale = 0.04 - energy_axis.units = 'keV' + energy_axis.units = "keV" energy_axis.name = "Energy" g = Gaussian() g.sigma.value = 0.05 @@ -237,7 +248,7 @@ def setup_method(self, method): s.metadata.Acquisition_instrument.SEM.beam_energy = 15.0 self.signal = s - @pytest.mark.parametrize("bad_iter", ["Al_Kb", {"A" : "Al_Kb", "B" : "Ca_Ka"}]) + @pytest.mark.parametrize("bad_iter", ["Al_Kb", {"A": "Al_Kb", "B": "Ca_Ka"}]) def test_bad_iter(self, bad_iter): # get_lines_intensity() should raise TypeError when # xray_lines is a string or a dictionary @@ -246,46 +257,44 @@ def test_bad_iter(self, bad_iter): with pytest.raises(TypeError): s.get_lines_intensity(xray_lines=bad_iter, plot_result=False) - @pytest.mark.parametrize("good_iter", [("Al_Kb", "Ca_Ka"), - ["Al_Kb", "Ca_Ka"], - set(["Al_Kb", "Ca_Ka"]) - ]) + @pytest.mark.parametrize( + "good_iter", [("Al_Kb", "Ca_Ka"), ["Al_Kb", "Ca_Ka"], set(["Al_Kb", "Ca_Ka"])] + ) def test_good_iter(self, good_iter): s = self.signal # get_lines_intensity() should succeed and return a list # when xray_lines is an iterable (other than a str or dict) assert isinstance( - s.get_lines_intensity( - xray_lines=good_iter, - plot_result=False - ), - list + s.get_lines_intensity(xray_lines=good_iter, plot_result=False), list ) def test(self): s = self.signal - sAl = s.get_lines_intensity(["Al_Ka"], - plot_result=False, - integration_windows=5)[0] + sAl = s.get_lines_intensity( + ["Al_Ka"], plot_result=False, integration_windows=5 + )[0] assert sAl.axes_manager.signal_dimension == 0 np.testing.assert_allclose( sAl.metadata.Acquisition_instrument.SEM.Detector.EDS.live_time, - s.metadata.Acquisition_instrument.SEM.Detector.EDS.live_time) + s.metadata.Acquisition_instrument.SEM.Detector.EDS.live_time, + ) np.testing.assert_allclose(24.99516, sAl.data[0, 0, 0], atol=1e-3) sAl = s.inav[0].get_lines_intensity( - ["Al_Ka"], plot_result=False, integration_windows=5)[0] + ["Al_Ka"], plot_result=False, integration_windows=5 + )[0] np.testing.assert_allclose(24.99516, sAl.data[0, 0], atol=1e-3) sAl = s.inav[0, 0].get_lines_intensity( - ["Al_Ka"], plot_result=False, integration_windows=5)[0] + ["Al_Ka"], plot_result=False, integration_windows=5 + )[0] np.testing.assert_allclose(24.99516, sAl.data[0], atol=1e-3) sAl = s.inav[0, 0, 0].get_lines_intensity( - ["Al_Ka"], plot_result=False, integration_windows=5)[0] + ["Al_Ka"], plot_result=False, integration_windows=5 + )[0] np.testing.assert_allclose(24.99516, sAl.data, atol=1e-3) s.axes_manager[-1].offset = 1.0 - with pytest.warns(UserWarning, - match="C_Ka is not in the data energy range."): + with pytest.warns(UserWarning, match="C_Ka is not in the data energy range."): sC = s.get_lines_intensity(["C_Ka"], plot_result=False) assert len(sC) == 0 assert sAl.metadata.Sample.elements == ["Al"] @@ -295,11 +304,11 @@ def test_eV(self): s = self.signal energy_axis = s.axes_manager.signal_axes[0] energy_axis.scale = 40 - energy_axis.units = 'eV' + energy_axis.units = "eV" - sAl = s.get_lines_intensity(["Al_Ka"], - plot_result=False, - integration_windows=5)[0] + sAl = s.get_lines_intensity( + ["Al_Ka"], plot_result=False, integration_windows=5 + )[0] np.testing.assert_allclose(24.99516, sAl.data[0, 0, 0], atol=1e-3) def test_plot_result_single_spectrum(self): @@ -310,40 +319,47 @@ def test_plot_result_single_spectrum(self): def test_background_substraction(self): s = self.signal intens = s.get_lines_intensity(["Al_Ka"], plot_result=False)[0].data - s = s + 1. - np.testing.assert_allclose(s.estimate_background_windows( - xray_lines=["Al_Ka"])[0, 0], 1.25666201, atol=1e-3) + s = s + 1.0 + np.testing.assert_allclose( + s.estimate_background_windows(xray_lines=["Al_Ka"])[0, 0], + 1.25666201, + atol=1e-3, + ) np.testing.assert_allclose( s.get_lines_intensity( ["Al_Ka"], background_windows=s.estimate_background_windows( - [4, 4], xray_lines=["Al_Ka"]), - plot_result=False)[0].data, - intens, atol=1e-3) + [4, 4], xray_lines=["Al_Ka"] + ), + plot_result=False, + )[0].data, + intens, + atol=1e-3, + ) def test_estimate_integration_windows(self): s = self.signal np.testing.assert_allclose( - s.estimate_integration_windows(3.0, ["Al_Ka"]), - [[1.371, 1.601]], atol=1e-2) + s.estimate_integration_windows(3.0, ["Al_Ka"]), [[1.371, 1.601]], atol=1e-2 + ) def test_with_signals_examples(self): s = exspy.data.EDS_SEM_TM002() np.testing.assert_allclose( utils.stack(s.get_lines_intensity()).data.squeeze(), - np.array([84163, 89063, 96117, 96700, 99075])) + np.array([84163, 89063, 96117, 96700, 99075]), + ) @lazifyTestClass class Test_tools_bulk: - def setup_method(self, method): s = EDSSEMSpectrum(np.ones(1024)) s.metadata.Acquisition_instrument.SEM.beam_energy = 5.0 energy_axis = s.axes_manager.signal_axes[0] energy_axis.scale = 0.01 - energy_axis.units = 'keV' - s.set_elements(['Al', 'Zn']) + energy_axis.units = "keV" + s.set_elements(["Al", "Zn"]) s.add_lines() self.signal = s @@ -353,8 +369,9 @@ def test_electron_range(self): elec_range = eds_utils.electron_range( mp.Sample.elements[0], mp.Acquisition_instrument.SEM.beam_energy, - density='auto', - tilt=mp.Acquisition_instrument.SEM.Stage.tilt_alpha) + density="auto", + tilt=mp.Acquisition_instrument.SEM.Stage.tilt_alpha, + ) np.testing.assert_allclose(elec_range, 0.41350651162374225) def test_xray_range(self): @@ -363,35 +380,37 @@ def test_xray_range(self): xr_range = eds_utils.xray_range( mp.Sample.xray_lines[0], mp.Acquisition_instrument.SEM.beam_energy, - density=4.37499648818) + density=4.37499648818, + ) np.testing.assert_allclose(xr_range, 0.1900368800933955) @lazifyTestClass class Test_energy_units: - def setup_method(self, method): s = EDSSEMSpectrum(np.ones(1024)) s.metadata.Acquisition_instrument.SEM.beam_energy = 5.0 - s.axes_manager.signal_axes[0].units = 'keV' + s.axes_manager.signal_axes[0].units = "keV" s.set_microscope_parameters(energy_resolution_MnKa=130) self.signal = s def test_beam_energy(self): s = self.signal assert s._get_beam_energy() == 5.0 - s.axes_manager.signal_axes[0].units = 'eV' + s.axes_manager.signal_axes[0].units = "eV" assert s._get_beam_energy() == 5000.0 - s.axes_manager.signal_axes[0].units = 'keV' + s.axes_manager.signal_axes[0].units = "keV" def test_line_energy(self): s = self.signal - assert s._get_line_energy('Al_Ka') == 1.4865 - s.axes_manager.signal_axes[0].units = 'eV' - assert s._get_line_energy('Al_Ka') == 1486.5 - s.axes_manager.signal_axes[0].units = 'keV' - - np.testing.assert_allclose(s._get_line_energy('Al_Ka', FWHM_MnKa='auto'), - (1.4865, 0.07661266213883969)) - np.testing.assert_allclose(s._get_line_energy('Al_Ka', FWHM_MnKa=128), - (1.4865, 0.073167615787314)) + assert s._get_line_energy("Al_Ka") == 1.4865 + s.axes_manager.signal_axes[0].units = "eV" + assert s._get_line_energy("Al_Ka") == 1486.5 + s.axes_manager.signal_axes[0].units = "keV" + + np.testing.assert_allclose( + s._get_line_energy("Al_Ka", FWHM_MnKa="auto"), (1.4865, 0.07661266213883969) + ) + np.testing.assert_allclose( + s._get_line_energy("Al_Ka", FWHM_MnKa=128), (1.4865, 0.073167615787314) + ) diff --git a/exspy/test/signals/test_eds_tem.py b/exspy/test/signals/test_eds_tem.py index 61021dccf..50f92348f 100644 --- a/exspy/test/signals/test_eds_tem.py +++ b/exspy/test/signals/test_eds_tem.py @@ -32,7 +32,6 @@ @lazifyTestClass class Test_metadata: - def setup_method(self, method): # Create an empty spectrum s = EDSTEMSpectrum(np.ones((4, 2, 1024))) @@ -49,25 +48,28 @@ def test_sum_live_time1(self): old_metadata = s.metadata.deepcopy() sSum = s.sum(0) assert ( - sSum.metadata.Acquisition_instrument.TEM.Detector.EDS.live_time == - s.metadata.Acquisition_instrument.TEM.Detector.EDS.live_time * 2) + sSum.metadata.Acquisition_instrument.TEM.Detector.EDS.live_time + == s.metadata.Acquisition_instrument.TEM.Detector.EDS.live_time * 2 + ) # Check that metadata is unchanged - print(old_metadata, s.metadata) # Capture for comparison on error - assert (old_metadata.as_dictionary() == - s.metadata.as_dictionary()), "Source metadata changed" + print(old_metadata, s.metadata) # Capture for comparison on error + assert ( + old_metadata.as_dictionary() == s.metadata.as_dictionary() + ), "Source metadata changed" def test_sum_live_time2(self): s = self.signal old_metadata = s.metadata.deepcopy() sSum = s.sum((0, 1)) assert ( - sSum.metadata.Acquisition_instrument.TEM.Detector.EDS.live_time == - s.metadata.Acquisition_instrument.TEM.Detector.EDS.live_time - * 2 * 4) + sSum.metadata.Acquisition_instrument.TEM.Detector.EDS.live_time + == s.metadata.Acquisition_instrument.TEM.Detector.EDS.live_time * 2 * 4 + ) # Check that metadata is unchanged - print(old_metadata, s.metadata) # Capture for comparison on error - assert (old_metadata.as_dictionary() == - s.metadata.as_dictionary()), "Source metadata changed" + print(old_metadata, s.metadata) # Capture for comparison on error + assert ( + old_metadata.as_dictionary() == s.metadata.as_dictionary() + ), "Source metadata changed" def test_sum_live_time_out_arg(self): s = self.signal @@ -77,8 +79,9 @@ def test_sum_live_time_out_arg(self): r = s.sum(0, out=sSum) assert r is None assert ( - s_resum.metadata.Acquisition_instrument.TEM.Detector.EDS.live_time == - s.metadata.Acquisition_instrument.TEM.Detector.EDS.live_time * 2) + s_resum.metadata.Acquisition_instrument.TEM.Detector.EDS.live_time + == s.metadata.Acquisition_instrument.TEM.Detector.EDS.live_time * 2 + ) np.testing.assert_allclose(s_resum.data, sSum.data) def test_rebin_live_time(self): @@ -87,12 +90,13 @@ def test_rebin_live_time(self): dim = s.axes_manager.shape s = s.rebin(new_shape=[dim[0] / 2, dim[1] / 2, dim[2]]) assert ( - s.metadata.Acquisition_instrument.TEM.Detector.EDS.live_time == - 3.1 * 2 * 2) + s.metadata.Acquisition_instrument.TEM.Detector.EDS.live_time == 3.1 * 2 * 2 + ) # Check that metadata is unchanged - print(old_metadata, self.signal.metadata) # Captured on error - assert (old_metadata.as_dictionary() == - self.signal.metadata.as_dictionary()), "Source metadata changed" + print(old_metadata, self.signal.metadata) # Captured on error + assert ( + old_metadata.as_dictionary() == self.signal.metadata.as_dictionary() + ), "Source metadata changed" def test_offset_after_rebin(self): s = self.signal @@ -106,36 +110,37 @@ def test_offset_after_rebin(self): def test_add_elements(self): s = self.signal - s.add_elements(['Al', 'Ni']) - assert s.metadata.Sample.elements == ['Al', 'Ni'] - s.add_elements(['Al', 'Ni']) - assert s.metadata.Sample.elements == ['Al', 'Ni'] - s.add_elements(["Fe", ]) - assert s.metadata.Sample.elements == ['Al', "Fe", 'Ni'] - s.set_elements(['Al', 'Ni']) - assert s.metadata.Sample.elements == ['Al', 'Ni'] + s.add_elements(["Al", "Ni"]) + assert s.metadata.Sample.elements == ["Al", "Ni"] + s.add_elements(["Al", "Ni"]) + assert s.metadata.Sample.elements == ["Al", "Ni"] + s.add_elements( + [ + "Fe", + ] + ) + assert s.metadata.Sample.elements == ["Al", "Fe", "Ni"] + s.set_elements(["Al", "Ni"]) + assert s.metadata.Sample.elements == ["Al", "Ni"] def test_default_param(self): s = self.signal mp = s.metadata assert ( - mp.Acquisition_instrument.TEM.Detector.EDS.energy_resolution_MnKa == - preferences.EDS.eds_mn_ka) + mp.Acquisition_instrument.TEM.Detector.EDS.energy_resolution_MnKa + == preferences.EDS.eds_mn_ka + ) def test_TEM_to_SEM(self): s = self.signal.inav[0, 0] - signal_type = 'EDS_SEM' + signal_type = "EDS_SEM" mp = s.metadata.Acquisition_instrument.TEM.Detector.EDS mp.energy_resolution_MnKa = 125.3 sSEM = s.deepcopy() sSEM.set_signal_type(signal_type) mpSEM = sSEM.metadata.Acquisition_instrument.SEM.Detector.EDS - results = [ - mp.energy_resolution_MnKa, - signal_type] - resultsSEM = [ - mpSEM.energy_resolution_MnKa, - sSEM.metadata.Signal.signal_type] + results = [mp.energy_resolution_MnKa, signal_type] + resultsSEM = [mpSEM.energy_resolution_MnKa, sSEM.metadata.Signal.signal_type] assert results == resultsSEM def test_get_calibration_from(self): @@ -145,8 +150,7 @@ def test_get_calibration_from(self): energy_axis.scale = 0.01 energy_axis.offset = -0.10 s.get_calibration_from(scalib) - assert (s.axes_manager.signal_axes[0].scale == - energy_axis.scale) + assert s.axes_manager.signal_axes[0].scale == energy_axis.scale def test_are_microscope_parameters_missing(self): assert not self.signal._are_microscope_parameters_missing() @@ -158,24 +162,27 @@ def test_are_microscope_parameters_missing(self): @lazifyTestClass class Test_quantification: - def setup_method(self, method): s = EDSTEMSpectrum(np.ones([2, 2, 1024])) energy_axis = s.axes_manager.signal_axes[0] energy_axis.scale = 1e-2 - energy_axis.units = 'keV' + energy_axis.units = "keV" energy_axis.name = "Energy" - s.set_microscope_parameters(beam_energy=200, - live_time=2.5, tilt_stage=0.0, - azimuth_angle=0, elevation_angle=35, - energy_resolution_MnKa=130, - beam_current=0.05) - elements = ['Al', 'Zn'] - xray_lines = ['Al_Ka', 'Zn_Ka'] + s.set_microscope_parameters( + beam_energy=200, + live_time=2.5, + tilt_stage=0.0, + azimuth_angle=0, + elevation_angle=35, + energy_resolution_MnKa=130, + beam_current=0.05, + ) + elements = ["Al", "Zn"] + xray_lines = ["Al_Ka", "Zn_Ka"] intensities = [300, 500] for i, xray_line in enumerate(xray_lines): gauss = Gaussian() - line_energy, FWHM = s._get_line_energy(xray_line, FWHM_MnKa='auto') + line_energy, FWHM = s._get_line_energy(xray_line, FWHM_MnKa="auto") gauss.centre.value = line_energy gauss.A.value = intensities[i] gauss.sigma.value = FWHM @@ -185,8 +192,8 @@ def setup_method(self, method): s.add_lines(xray_lines) s.axes_manager[0].scale = 0.5 s.axes_manager[1].scale = 0.5 - s.axes_manager[0].units = 'nm' - s.axes_manager[1].units = 'nm' + s.axes_manager[0].units = "nm" + s.axes_manager[1].units = "nm" self.signal = s def test_metadata(self): @@ -196,18 +203,16 @@ def test_metadata(self): np.testing.assert_approx_equal(TEM_md.Stage.tilt_alpha, 0.0) np.testing.assert_approx_equal(TEM_md.Detector.EDS.live_time, 2.5) np.testing.assert_approx_equal(TEM_md.Detector.EDS.elevation_angle, 35) - np.testing.assert_approx_equal( - TEM_md.Detector.EDS.energy_resolution_MnKa, 130) + np.testing.assert_approx_equal(TEM_md.Detector.EDS.energy_resolution_MnKa, 130) self.signal.set_microscope_parameters(real_time=3.1) self.signal.set_microscope_parameters(probe_area=1.2) - np.testing.assert_approx_equal( - TEM_md.probe_area, 1.2) + np.testing.assert_approx_equal(TEM_md.probe_area, 1.2) np.testing.assert_approx_equal(TEM_md.Detector.EDS.real_time, 3.1) def test_quant_one_intensity_error(self): s = self.signal - method = 'CL' + method = "CL" kfactors = [1, 2.0009344042484134] intensities = s.get_lines_intensity()[:1] assert len(intensities) == 1 @@ -220,158 +225,188 @@ def test_quant_one_intensity_error(self): def test_quant_lorimer(self): s = self.signal - method = 'CL' + method = "CL" kfactors = [1, 2.0009344042484134] - composition_units = 'weight' + composition_units = "weight" intensities = s.get_lines_intensity() - res = s.quantification(intensities, method, kfactors, - composition_units) - s2 = s.rebin(new_shape=(1,1,1024)).squeeze() - s2.quantification(intensities, method, kfactors, - composition_units, plot_result=True) - np.testing.assert_allclose(res[0].data, np.ones((2, 2)) * 22.70779, - atol=1e-3) + res = s.quantification(intensities, method, kfactors, composition_units) + s2 = s.rebin(new_shape=(1, 1, 1024)).squeeze() + s2.quantification( + intensities, method, kfactors, composition_units, plot_result=True + ) + np.testing.assert_allclose(res[0].data, np.ones((2, 2)) * 22.70779, atol=1e-3) # Test plot_results s2 = s.inav[0, 0] - s2.quantification(intensities, method, kfactors, - composition_units, plot_result=True) - np.testing.assert_allclose(res[0].data, 22.70779) + s2.quantification( + intensities, method, kfactors, composition_units, plot_result=True + ) + np.testing.assert_allclose(res[0].data, 22.70779) def test_quant_lorimer_mask(self): s = self.signal - method = 'CL' + method = "CL" kfactors = [1, 2.0009344042484134] - composition_units = 'weight' + composition_units = "weight" intensities = s.get_lines_intensity() mask = np.array([[1, 1], [0, 0]]) - res = s.quantification(intensities, method, kfactors, - composition_units, - navigation_mask=mask) - np.testing.assert_allclose(res[0].data, np.array([ - [0, 0], - [22.70779, 22.70779]]), atol=1e-3) + res = s.quantification( + intensities, method, kfactors, composition_units, navigation_mask=mask + ) + np.testing.assert_allclose( + res[0].data, np.array([[0, 0], [22.70779, 22.70779]]), atol=1e-3 + ) def test_quant_lorimer_warning(self): s = self.signal - method = 'CL' + method = "CL" kfactors = [1, 2.0009344042484134] - composition_units = 'weight' + composition_units = "weight" intensities = s.get_lines_intensity() with pytest.raises(ValueError, match="Thickness is required for absorption"): - _ = s.quantification(intensities, method, kfactors, - composition_units, - absorption_correction=True, - thickness=None) + _ = s.quantification( + intensities, + method, + kfactors, + composition_units, + absorption_correction=True, + thickness=None, + ) def test_quant_lorimer_ac(self): s = self.signal - method = 'CL' + method = "CL" kfactors = [1, 2.0009344042484134] - composition_units = 'weight' + composition_units = "weight" intensities = s.get_lines_intensity() - res = s.quantification(intensities, method, kfactors, - composition_units) - np.testing.assert_allclose(res[0].data, np.ones((2, 2)) * 22.70779, - atol=1e-3) - res2 = s.quantification(intensities, method, kfactors, - composition_units, - absorption_correction=True, - thickness=1.) - res3 = s.quantification(intensities, method, kfactors, - composition_units, - absorption_correction=True, - thickness=300.) - res4 = s.quantification(intensities, method, kfactors, - composition_units, - absorption_correction=True, - thickness=0.0001) + res = s.quantification(intensities, method, kfactors, composition_units) + np.testing.assert_allclose(res[0].data, np.ones((2, 2)) * 22.70779, atol=1e-3) + res2 = s.quantification( + intensities, + method, + kfactors, + composition_units, + absorption_correction=True, + thickness=1.0, + ) + res3 = s.quantification( + intensities, + method, + kfactors, + composition_units, + absorption_correction=True, + thickness=300.0, + ) + res4 = s.quantification( + intensities, + method, + kfactors, + composition_units, + absorption_correction=True, + thickness=0.0001, + ) list.reverse(intensities) list.reverse(kfactors) - res5 = s.quantification(intensities, method, kfactors, - composition_units, - absorption_correction=True, - thickness=300.) + res5 = s.quantification( + intensities, + method, + kfactors, + composition_units, + absorption_correction=True, + thickness=300.0, + ) np.testing.assert_allclose(res5[0][0].data, res3[0][1].data, atol=1e-5) - np.testing.assert_allclose(res2[0][0].data, np.ones((2, 2)) * 22.743013, - atol=1e-3) - np.testing.assert_allclose(res3[0][0].data, np.ones((2, 2)) * 31.816908, - atol=1e-3) + np.testing.assert_allclose( + res2[0][0].data, np.ones((2, 2)) * 22.743013, atol=1e-3 + ) + np.testing.assert_allclose( + res3[0][0].data, np.ones((2, 2)) * 31.816908, atol=1e-3 + ) np.testing.assert_allclose(res[0].data, res4[0][0].data, atol=1e-5) def test_quant_zeta(self): s = self.signal - method = 'zeta' - composition_units = 'weight' + method = "zeta" + composition_units = "weight" factors = [20, 50] intensities = s.get_lines_intensity() - res = s.quantification(intensities, method, factors, - composition_units) - np.testing.assert_allclose(res[1].data, np.ones((2, 2)) * 2.7125736e-03, - atol=1e-3) - np.testing.assert_allclose(res[0][1].data, - np.ones((2, 2)) * 80.962287987, atol=1e-3) - res2 = s.quantification(intensities, method, factors, - composition_units, - absorption_correction=True, - thickness=1.) - res3 = s.quantification(intensities, method, factors, - composition_units, - absorption_correction=True, - thickness=100.) + res = s.quantification(intensities, method, factors, composition_units) + np.testing.assert_allclose( + res[1].data, np.ones((2, 2)) * 2.7125736e-03, atol=1e-3 + ) + np.testing.assert_allclose( + res[0][1].data, np.ones((2, 2)) * 80.962287987, atol=1e-3 + ) + res2 = s.quantification( + intensities, + method, + factors, + composition_units, + absorption_correction=True, + thickness=1.0, + ) + res3 = s.quantification( + intensities, + method, + factors, + composition_units, + absorption_correction=True, + thickness=100.0, + ) assert res2 == res3 - np.testing.assert_allclose(res2[0][1].data, np.ones((2, 2)) * 65.5867, - atol=1e-3) + np.testing.assert_allclose( + res2[0][1].data, np.ones((2, 2)) * 65.5867, atol=1e-3 + ) def test_quant_cross_section_units(self): s = self.signal.deepcopy() s2 = self.signal.deepcopy() - s.axes_manager[0].units = 'µm' - s.axes_manager[1].units = 'µm' - s.axes_manager[0].scale = 0.5/1000 - s.axes_manager[1].scale = 0.5/1000 + s.axes_manager[0].units = "µm" + s.axes_manager[1].units = "µm" + s.axes_manager[0].scale = 0.5 / 1000 + s.axes_manager[1].scale = 0.5 / 1000 - method = 'cross_section' + method = "cross_section" factors = [3, 5] intensities = s.get_lines_intensity() res = s.quantification(intensities, method, factors) res2 = s2.quantification(intensities, method, factors) np.testing.assert_allclose(res[0][0].data, res2[0][0].data) # Check that the quantification doesn't change the units of the signal - assert s.axes_manager[0].units == 'µm' - assert s.axes_manager[1].units == 'µm' + assert s.axes_manager[0].units == "µm" + assert s.axes_manager[1].units == "µm" - @pytest.mark.parametrize("axes", (None, "nav_axes", [0, 1], ['x', 'y'])) + @pytest.mark.parametrize("axes", (None, "nav_axes", [0, 1], ["x", "y"])) def test_get_probe_area(self, axes): s = self.signal - s.axes_manager[0].name = 'x' - s.axes_manager[1].name = 'y' - s.axes_manager[0].units = 'µm' - s.axes_manager[1].units = 'µm' - s.axes_manager[0].scale = 0.5/1000 - s.axes_manager[1].scale = 0.5/1000 + s.axes_manager[0].name = "x" + s.axes_manager[1].name = "y" + s.axes_manager[0].units = "µm" + s.axes_manager[1].units = "µm" + s.axes_manager[0].scale = 0.5 / 1000 + s.axes_manager[1].scale = 0.5 / 1000 if axes == "nav_axes": axes = s.axes_manager.navigation_axes np.testing.assert_allclose(s.get_probe_area(axes), 0.25, atol=1e-3) - @pytest.mark.parametrize("axes", (None, "nav_axes", [0], ['x'])) + @pytest.mark.parametrize("axes", (None, "nav_axes", [0], ["x"])) def test_get_probe_area_line_scan(self, axes): s = self.signal.inav[0] - s.axes_manager[0].name = 'x' - s.axes_manager[0].units = 'µm' - s.axes_manager[0].scale = 0.5/1000 + s.axes_manager[0].name = "x" + s.axes_manager[0].units = "µm" + s.axes_manager[0].scale = 0.5 / 1000 if axes == "nav_axes": axes = s.axes_manager.navigation_axes np.testing.assert_allclose(s.get_probe_area(axes), 0.25, atol=1e-3) - @pytest.mark.parametrize("axes", (None, "nav_axes", [0], ['x'])) + @pytest.mark.parametrize("axes", (None, "nav_axes", [0], ["x"])) def test_get_probe_area_line_scan_other_nav_axes(self, axes): s = self.signal - s.axes_manager[0].name = 'x' - s.axes_manager[1].name = 'time' - s.axes_manager[0].units = 'µm' - s.axes_manager[1].units = 's' - s.axes_manager[0].scale = 0.5/1000 + s.axes_manager[0].name = "x" + s.axes_manager[1].name = "time" + s.axes_manager[0].units = "µm" + s.axes_manager[1].units = "s" + s.axes_manager[0].scale = 0.5 / 1000 s.axes_manager[1].scale = 10 if axes == "nav_axes" or axes is None: axes = s.axes_manager.navigation_axes @@ -383,93 +418,101 @@ def test_get_probe_area_line_scan_other_nav_axes(self, axes): def test_zeta_vs_cross_section(self): s = self.signal factors = [3, 5] - method = 'zeta' + method = "zeta" intensities = s.get_lines_intensity() - zfactors = utils_eds.edx_cross_section_to_zeta([3, 5], ['Al', 'Zn']) - factors2 = utils_eds.zeta_to_edx_cross_section(zfactors, ['Al', 'Zn']) + zfactors = utils_eds.edx_cross_section_to_zeta([3, 5], ["Al", "Zn"]) + factors2 = utils_eds.zeta_to_edx_cross_section(zfactors, ["Al", "Zn"]) np.testing.assert_allclose(factors, factors2, atol=1e-3) res = s.quantification( intensities, method, - factors = utils_eds.edx_cross_section_to_zeta([22.402, 21.7132], - ['Al','Zn'])) - res2 = s.quantification(intensities, - method='cross_section', - factors=[22.402, 21.7132]) + factors=utils_eds.edx_cross_section_to_zeta( + [22.402, 21.7132], ["Al", "Zn"] + ), + ) + res2 = s.quantification( + intensities, method="cross_section", factors=[22.402, 21.7132] + ) np.testing.assert_allclose(res[0][0].data, res2[0][0].data, atol=1e-3) - np.testing.assert_allclose(res[0][0].data, np.ones((2, 2)) * 36.2969, - atol=1e-3) - + np.testing.assert_allclose(res[0][0].data, np.ones((2, 2)) * 36.2969, atol=1e-3) def test_quant_cross_section(self): s = self.signal - method = 'cross_section' + method = "cross_section" factors = [3, 5] intensities = s.get_lines_intensity() res = s.quantification(intensities, method, factors) - np.testing.assert_allclose(res[1][0].data, np.ones((2, 2)) * 21517.1647, - atol=1e-3) - np.testing.assert_allclose(res[1][1].data, np.ones((2, 2)) * 21961.6166, - atol=1e-3) - np.testing.assert_allclose(res[0][0].data, np.ones((2, 2)) * 49.4889, - atol=1e-3) - + np.testing.assert_allclose( + res[1][0].data, np.ones((2, 2)) * 21517.1647, atol=1e-3 + ) + np.testing.assert_allclose( + res[1][1].data, np.ones((2, 2)) * 21961.6166, atol=1e-3 + ) + np.testing.assert_allclose(res[0][0].data, np.ones((2, 2)) * 49.4889, atol=1e-3) def test_method_error(self): s = self.signal - method = 'random_method' + method = "random_method" factors = [3, 5] intensities = s.get_lines_intensity() - with pytest.raises(ValueError, match="Please specify method for quantification"): + with pytest.raises( + ValueError, match="Please specify method for quantification" + ): _ = s.quantification(intensities, method, factors) def test_quant_cross_section_ac(self): s = self.signal - method = 'cross_section' + method = "cross_section" zfactors = [20, 50] - factors = utils_eds.zeta_to_edx_cross_section(zfactors, ['Al', 'Zn']) + factors = utils_eds.zeta_to_edx_cross_section(zfactors, ["Al", "Zn"]) intensities = s.get_lines_intensity() - res = s.quantification(intensities, method, factors, - absorption_correction=True) - res2 = s.quantification(intensities, method='zeta', - factors=zfactors, - absorption_correction=True) - np.testing.assert_allclose(res[0][1].data, np.ones((2, 2)) * 44.02534, - atol=1e-3) + res = s.quantification(intensities, method, factors, absorption_correction=True) + res2 = s.quantification( + intensities, method="zeta", factors=zfactors, absorption_correction=True + ) + np.testing.assert_allclose( + res[0][1].data, np.ones((2, 2)) * 44.02534, atol=1e-3 + ) np.testing.assert_allclose(res2[0][0].data, res[0][0].data, atol=1e-3) def test_quant_zeros(self): - intens = np.array([[0.5, 0.5, 0.5], - [0.0, 0.5, 0.5], - [0.5, 0.0, 0.5], - [0.5, 0.5, 0.0], - [0.5, 0.0, 0.0]]).T + intens = np.array( + [ + [0.5, 0.5, 0.5], + [0.0, 0.5, 0.5], + [0.5, 0.0, 0.5], + [0.5, 0.5, 0.0], + [0.5, 0.0, 0.0], + ] + ).T with warnings.catch_warnings(): warnings.filterwarnings( - "ignore", - message="divide by zero encountered", - category=RuntimeWarning - ) - quant = utils_eds.quantification_cliff_lorimer( - intens, [1, 1, 3]).T + "ignore", message="divide by zero encountered", category=RuntimeWarning + ) + quant = utils_eds.quantification_cliff_lorimer(intens, [1, 1, 3]).T np.testing.assert_allclose( quant, - np.array([[0.2, 0.2, 0.6], - [0.0, 0.25, 0.75], - [0.25, 0.0, 0.75], - [0.5, 0.5, 0.0], - [1.0, 0.0, 0.0]])) + np.array( + [ + [0.2, 0.2, 0.6], + [0.0, 0.25, 0.75], + [0.25, 0.0, 0.75], + [0.5, 0.5, 0.0], + [1.0, 0.0, 0.0], + ] + ), + ) def test_edx_cross_section_to_zeta(self): cs = [3, 6] - elements = ['Pt', 'Ni'] + elements = ["Pt", "Ni"] res = utils_eds.edx_cross_section_to_zeta(cs, elements) np.testing.assert_allclose(res, [1079.815272, 162.4378035], atol=1e-3) def test_zeta_to_edx_cross_section(self): factors = [1079.815272, 162.4378035] - elements = ['Pt', 'Ni'] + elements = ["Pt", "Ni"] res = utils_eds.zeta_to_edx_cross_section(factors, elements) np.testing.assert_allclose(res, [3, 6], atol=1e-3) @@ -477,38 +520,34 @@ def test_quant_element_order(self): s = self.signal s.set_elements([]) s.set_lines([]) - lines = ['Zn_Ka', 'Al_Ka'] + lines = ["Zn_Ka", "Al_Ka"] kfactors = [2.0009344042484134, 1] intensities = s.get_lines_intensity(xray_lines=lines) - res = s.quantification(intensities, method='CL', factors=kfactors, - composition_units='weight') - assert res[0].metadata.Sample.xray_lines[0] == 'Zn_Ka' - assert res[1].metadata.Sample.xray_lines[0] == 'Al_Ka' - np.testing.assert_allclose(res[1].data, np.ones((2, 2)) * 22.70779, - atol=1e-3) + res = s.quantification( + intensities, method="CL", factors=kfactors, composition_units="weight" + ) + assert res[0].metadata.Sample.xray_lines[0] == "Zn_Ka" + assert res[1].metadata.Sample.xray_lines[0] == "Al_Ka" + np.testing.assert_allclose(res[1].data, np.ones((2, 2)) * 22.70779, atol=1e-3) def test_CL_get_mass_thickness(self): s = self.signal - method = 'CL' + method = "CL" kfactors = [1, 2.0009344042484134] - composition_units = 'weight' + composition_units = "weight" intensities = s.get_lines_intensity() - res = s.quantification(intensities, method, kfactors, - composition_units) + res = s.quantification(intensities, method, kfactors, composition_units) - mass_thickness = s.CL_get_mass_thickness(res, 100.)[0, 0] - np.testing.assert_allclose(mass_thickness, 6.1317741E-4) + mass_thickness = s.CL_get_mass_thickness(res, 100.0)[0, 0] + np.testing.assert_allclose(mass_thickness, 6.1317741e-4) - thickness = np.array([[100., 90.0], - [85, 80.]]) + thickness = np.array([[100.0, 90.0], [85, 80.0]]) mass_thickness2 = s.CL_get_mass_thickness(res, thickness) - np.testing.assert_allclose(mass_thickness2, - mass_thickness * thickness / 100) + np.testing.assert_allclose(mass_thickness2, mass_thickness * thickness / 100) @lazifyTestClass class Test_vacuum_mask: - def setup_method(self, method): s = EDSTEMSpectrum(np.array([np.linspace(0.001, 0.5, 20)] * 100).T) s.add_poissonian_noise() @@ -526,7 +565,7 @@ def test_vacuum_mask_navigation_dimension_0(self): s2.vacuum_mask() -@pytest.mark.parametrize('normalise_poissonian_noise', [True, False]) +@pytest.mark.parametrize("normalise_poissonian_noise", [True, False]) def test_decomposition(normalise_poissonian_noise): s = EDSTEMSpectrum(np.ones(shape=(32, 32, 1024))) s.add_poissonian_noise() @@ -540,29 +579,28 @@ def test_decomposition(normalise_poissonian_noise): @lazifyTestClass class Test_simple_model: - def setup_method(self, method): - s = utils_eds.xray_lines_model(elements=['Al', 'Zn'], - weight_percents=[50, 50]) + s = utils_eds.xray_lines_model(elements=["Al", "Zn"], weight_percents=[50, 50]) self.signal = s def test_intensity(self): s = self.signal np.testing.assert_allclose( - [i.data[0] for i in s.get_lines_intensity( - integration_window_factor=5.0)], + [i.data[0] for i in s.get_lines_intensity(integration_window_factor=5.0)], [0.5, 0.5], - atol=1e-1) + atol=1e-1, + ) def test_intensity_dtype_uint(self): s = self.signal - s.data *= 1E5 + s.data *= 1e5 s.change_dtype("uint") bw = s.estimate_background_windows() np.testing.assert_allclose( [i.data[0] for i in s.get_lines_intensity(background_windows=bw)], - [5E4, 5E4], - rtol=0.03) + [5e4, 5e4], + rtol=0.03, + ) def test_with_signals_examples(): @@ -570,48 +608,45 @@ def test_with_signals_examples(): for s in (sig, sig.as_lazy()): np.testing.assert_allclose( np.array([res.data[0] for res in s.get_lines_intensity()]), - np.array([3710, 15872])) + np.array([3710, 15872]), + ) class Test_eds_markers: - def setup_method(self, method): - s = utils_eds.xray_lines_model(elements=['Al', 'Zn'], - weight_percents=[50, 50]) + s = utils_eds.xray_lines_model(elements=["Al", "Zn"], weight_percents=[50, 50]) self.signal = s def test_plot_auto_add(self): s = self.signal s.plot(xray_lines=True) # Should contain 6 lines - assert ( - sorted(s._xray_markers["names"] == - ['Al_Ka', 'Al_Kb', 'Zn_Ka', 'Zn_Kb', 'Zn_La', 'Zn_Lb1'])) + assert sorted( + s._xray_markers["names"] + == ["Al_Ka", "Al_Kb", "Zn_Ka", "Zn_Kb", "Zn_La", "Zn_Lb1"] + ) def test_manual_add_line(self): s = self.signal s.plot() - s.add_xray_lines_markers(['Zn_La']) - assert ( - list(s._xray_markers["names"]) == - ['Zn_La']) + s.add_xray_lines_markers(["Zn_La"]) + assert list(s._xray_markers["names"]) == ["Zn_La"] assert len(s._xray_markers["names"]) == 1 - def test_manual_add_line_error(self): s = self.signal with pytest.raises(RuntimeError): - s.add_xray_lines_markers(['Zn_La']) + s.add_xray_lines_markers(["Zn_La"]) def test_manual_remove_element(self): s = self.signal s.plot() - s.add_xray_lines_markers(['Zn_Ka', 'Zn_Kb', 'Zn_La']) - s.remove_xray_lines_markers(['Zn_Kb']) - assert sorted(s._xray_markers["names"]) == ['Zn_Ka', 'Zn_La'] - s.remove_xray_lines_markers(['Zn_Ka'], render_figure=False) - assert sorted(s._xray_markers["names"]) == ['Zn_La'] - s.remove_xray_lines_markers(['Zn_La'], render_figure=True) + s.add_xray_lines_markers(["Zn_Ka", "Zn_Kb", "Zn_La"]) + s.remove_xray_lines_markers(["Zn_Kb"]) + assert sorted(s._xray_markers["names"]) == ["Zn_Ka", "Zn_La"] + s.remove_xray_lines_markers(["Zn_Ka"], render_figure=False) + assert sorted(s._xray_markers["names"]) == ["Zn_La"] + s.remove_xray_lines_markers(["Zn_La"], render_figure=True) assert sorted(s._xray_markers["names"]) == [] assert len(s._xray_markers["texts"].get_current_kwargs()["offsets"]) == 0 assert len(s._xray_markers["texts"].get_current_kwargs()["texts"]) == 0 diff --git a/exspy/test/signals/test_eels.py b/exspy/test/signals/test_eels.py index 511ef8590..e0c80add0 100644 --- a/exspy/test/signals/test_eels.py +++ b/exspy/test/signals/test_eels.py @@ -32,7 +32,6 @@ @lazifyTestClass class Test_Estimate_Elastic_Scattering_Threshold: - def setup_method(self, method): # Create an empty spectrum s = exspy.signals.EELSSpectrum(np.zeros((3, 2, 1024))) @@ -49,8 +48,7 @@ def setup_method(self, method): # Inflexion point 1.5 gauss2.A.value = 5000 gauss2.centre.value = 5 - s.data[:] = (gauss.function(energy_axis.axis) + - gauss2.function(energy_axis.axis)) + s.data[:] = gauss.function(energy_axis.axis) + gauss2.function(energy_axis.axis) self.signal = s def test_min_in_window_with_smoothing(self): @@ -86,14 +84,15 @@ def test_min_not_in_window(self): # If I use a much lower window, this is the value that has to be # returned as threshold. s = self.signal - data = s.estimate_elastic_scattering_threshold(window=1.5, - tol=0.001, - ).data + data = s.estimate_elastic_scattering_threshold( + window=1.5, + tol=0.001, + ).data assert np.all(np.isnan(data)) def test_estimate_elastic_scattering_intensity(self): s = self.signal - threshold = s.estimate_elastic_scattering_threshold(window=4.) + threshold = s.estimate_elastic_scattering_threshold(window=4.0) # Threshold is nd signal t = s.estimate_elastic_scattering_intensity(threshold=threshold) assert t.metadata.Signal.signal_type == "" @@ -101,7 +100,7 @@ def test_estimate_elastic_scattering_intensity(self): np.testing.assert_array_almost_equal(t.data, 249999.985133) # Threshold is signal, 1 spectrum s0 = s.inav[0] - t0 = s0.estimate_elastic_scattering_threshold(window=4.) + t0 = s0.estimate_elastic_scattering_threshold(window=4.0) t = s0.estimate_elastic_scattering_intensity(threshold=t0) np.testing.assert_array_almost_equal(t.data, 249999.985133) # Threshold is value @@ -111,7 +110,6 @@ def test_estimate_elastic_scattering_intensity(self): @lazifyTestClass class TestEstimateZLPCentre: - def setup_method(self, method): s = exspy.signals.EELSSpectrum(np.diag(np.arange(1, 11))) s.axes_manager[-1].scale = 0.1 @@ -121,17 +119,13 @@ def setup_method(self, method): def test_estimate_zero_loss_peak_centre(self): s = self.signal zlpc = s.estimate_zero_loss_peak_centre() - np.testing.assert_allclose(zlpc.data, - np.arange(100, - 101, - 0.1)) + np.testing.assert_allclose(zlpc.data, np.arange(100, 101, 0.1)) assert zlpc.metadata.Signal.signal_type == "" assert zlpc.axes_manager.signal_dimension == 0 @lazifyTestClass class TestAlignZLP: - def setup_method(self, method): s = exspy.signals.EELSSpectrum(np.zeros((10, 100))) self.scale = 0.1 @@ -150,9 +144,7 @@ def setup_method(self, method): def test_align_zero_loss_peak_calibrate_true(self): s = self.signal - s.align_zero_loss_peak( - calibrate=True, - print_stats=False) + s.align_zero_loss_peak(calibrate=True, print_stats=False) zlpc = s.estimate_zero_loss_peak_centre() np.testing.assert_allclose(zlpc.data.mean(), 0) np.testing.assert_allclose(zlpc.data.std(), 0) @@ -161,30 +153,21 @@ def test_align_zero_loss_peak_calibrate_true_with_mask(self): s = self.signal mask = s._get_navigation_signal(dtype="bool").T mask.data[[3, 5]] = (True, True) - s.align_zero_loss_peak( - calibrate=True, - print_stats=False, - mask=mask) + s.align_zero_loss_peak(calibrate=True, print_stats=False, mask=mask) zlpc = s.estimate_zero_loss_peak_centre(mask=mask) - np.testing.assert_allclose(np.nanmean(zlpc.data), 0, - atol=np.finfo(float).eps) - np.testing.assert_allclose(np.nanstd(zlpc.data), 0, - atol=np.finfo(float).eps) + np.testing.assert_allclose(np.nanmean(zlpc.data), 0, atol=np.finfo(float).eps) + np.testing.assert_allclose(np.nanstd(zlpc.data), 0, atol=np.finfo(float).eps) def test_align_zero_loss_peak_calibrate_false(self): s = self.signal - s.align_zero_loss_peak( - calibrate=False, - print_stats=False) + s.align_zero_loss_peak(calibrate=False, print_stats=False) zlpc = s.estimate_zero_loss_peak_centre() np.testing.assert_allclose(zlpc.data.std(), 0, atol=10e-3) def test_also_aligns(self): s = self.signal s2 = s.deepcopy() - s.align_zero_loss_peak(calibrate=True, - print_stats=False, - also_align=[s2]) + s.align_zero_loss_peak(calibrate=True, print_stats=False, also_align=[s2]) zlpc = s2.estimate_zero_loss_peak_centre() assert zlpc.data.mean() == 0 assert zlpc.data.std() == 0 @@ -196,7 +179,8 @@ def test_align_zero_loss_peak_with_spike_signal_range(self): spike[:, 75] = spike_amplitude s.data += spike s.align_zero_loss_peak( - print_stats=False, subpixel=False, signal_range=(98., 102.)) + print_stats=False, subpixel=False, signal_range=(98.0, 102.0) + ) zlp_max = s.isig[-0.5:0.5].max(-1).data # Max value in the original spectrum is 12, but due to the aligning # the peak is split between two different channels. So 8 is the @@ -206,15 +190,12 @@ def test_align_zero_loss_peak_with_spike_signal_range(self): def test_align_zero_loss_peak_crop_false(self): s = self.signal original_size = s.axes_manager.signal_axes[0].size - s.align_zero_loss_peak( - crop=False, - print_stats=False) + s.align_zero_loss_peak(crop=False, print_stats=False) assert original_size == s.axes_manager.signal_axes[0].size @lazifyTestClass class TestSpikesRemovalToolZLP: - def setup_method(self, method): # Create an empty spectrum s = exspy.signals.EELSSpectrum(np.zeros((2, 3, 64))) @@ -238,21 +219,17 @@ def _add_spikes(self): def test_get_zero_loss_peak_mask(self): mask = self.signal.get_zero_loss_peak_mask() - expected_mask = np.zeros(self.signal.axes_manager.signal_size, - dtype=bool) + expected_mask = np.zeros(self.signal.axes_manager.signal_size, dtype=bool) expected_mask[13:38] = True np.testing.assert_allclose(mask, expected_mask) def test_get_zero_loss_peak_mask_signal_mask(self): - signal_mask = np.zeros(self.signal.axes_manager.signal_size, - dtype=bool) + signal_mask = np.zeros(self.signal.axes_manager.signal_size, dtype=bool) signal_mask[40:50] = True mask = self.signal.get_zero_loss_peak_mask(signal_mask=signal_mask) - expected_mask = np.zeros(self.signal.axes_manager.signal_size, - dtype=bool) + expected_mask = np.zeros(self.signal.axes_manager.signal_size, dtype=bool) expected_mask[13:38] = True - np.testing.assert_allclose(mask, np.logical_or(expected_mask, - signal_mask)) + np.testing.assert_allclose(mask, np.logical_or(expected_mask, signal_mask)) def test_spikes_diagnosis(self): if self.signal._lazy: @@ -269,11 +246,11 @@ def test_spikes_diagnosis(self): np.testing.assert_allclose(hist_data.data, expected_data) hist_data2 = self.signal._spikes_diagnosis(bins=25) - expected_data2 = np.array([286, 10, 13, 0, 0, 1, 12, 0]) + expected_data2 = np.array([286, 10, 13, 0, 0, 1, 12, 0]) np.testing.assert_allclose(hist_data2.data[:8], expected_data2) # mask all to check that it raises an error when there is no data - signal_mask = self.signal.inav[0,1].data.astype(bool) + signal_mask = self.signal.inav[0, 1].data.astype(bool) with pytest.raises(ValueError): self.signal.spikes_diagnosis(signal_mask=signal_mask) @@ -297,6 +274,7 @@ def test_spikes_diagnosis_constant_derivative(): hs.preferences.GUIs.enable_traitsui_gui = True try: import hyperspy_gui_traitsui + s._spikes_diagnosis(use_gui=True) except ImportError: pass @@ -304,9 +282,8 @@ def test_spikes_diagnosis_constant_derivative(): @lazifyTestClass class TestPowerLawExtrapolation: - def setup_method(self, method): - s = exspy.signals.EELSSpectrum(0.1 * np.arange(50, 250, 0.5) ** -3.) + s = exspy.signals.EELSSpectrum(0.1 * np.arange(50, 250, 0.5) ** -3.0) s.axes_manager[-1].is_binned = False s.axes_manager[-1].offset = 50 s.axes_manager[-1].scale = 0.5 @@ -327,8 +304,7 @@ def test_binned(self): @lazifyTestClass class TestFourierRatioDeconvolution: - - @pytest.mark.parametrize(('extrapolate_lowloss'), [True, False]) + @pytest.mark.parametrize(("extrapolate_lowloss"), [True, False]) def test_running(self, extrapolate_lowloss): s = exspy.signals.EELSSpectrum(np.arange(200)) gaussian = hs.model.components1D.Gaussian() @@ -337,8 +313,7 @@ def test_running(self, extrapolate_lowloss): gaussian.centre.value = 20 s_ll = exspy.signals.EELSSpectrum(gaussian.function(np.arange(0, 200, 1))) s_ll.axes_manager[0].offset = -50 - s.fourier_ratio_deconvolution(s_ll, - extrapolate_lowloss=extrapolate_lowloss) + s.fourier_ratio_deconvolution(s_ll, extrapolate_lowloss=extrapolate_lowloss) @lazifyTestClass @@ -382,7 +357,6 @@ def test_offset_after_rebin(self): @lazifyTestClass class Test_Estimate_Thickness: - def setup_method(self, method): # Create an empty spectrum self.s = hs.load( @@ -394,34 +368,36 @@ def setup_method(self, method): def test_relative_thickness(self): t = self.s.estimate_thickness(zlp=self.zlp) - np.testing.assert_allclose(t.data, np.arange(0.3,2,0.1), atol=4e-3) + np.testing.assert_allclose(t.data, np.arange(0.3, 2, 0.1), atol=4e-3) assert t.metadata.Signal.quantity == "$\\frac{t}{\\lambda}$" def test_thickness_mfp(self): t = self.s.estimate_thickness(zlp=self.zlp, mean_free_path=120) - np.testing.assert_allclose(t.data, 120 * np.arange(0.3,2,0.1), rtol=3e-3) + np.testing.assert_allclose(t.data, 120 * np.arange(0.3, 2, 0.1), rtol=3e-3) assert t.metadata.Signal.quantity == "thickness (nm)" def test_thickness_density(self): t = self.s.estimate_thickness(zlp=self.zlp, density=3.6) - np.testing.assert_allclose(t.data, 142 * np.arange(0.3,2,0.1), rtol=3e-3) + np.testing.assert_allclose(t.data, 142 * np.arange(0.3, 2, 0.1), rtol=3e-3) assert t.metadata.Signal.quantity == "thickness (nm)" def test_thickness_density_and_mfp(self): t = self.s.estimate_thickness(zlp=self.zlp, density=3.6, mean_free_path=120) - np.testing.assert_allclose(t.data, 127.5 * np.arange(0.3,2,0.1), rtol=3e-3) + np.testing.assert_allclose(t.data, 127.5 * np.arange(0.3, 2, 0.1), rtol=3e-3) assert t.metadata.Signal.quantity == "thickness (nm)" def test_threshold(self): t = self.s.estimate_thickness(threshold=4.5, density=3.6, mean_free_path=120) - np.testing.assert_allclose(t.data, 127.5 * np.arange(0.3,2,0.1), rtol=3e-3) + np.testing.assert_allclose(t.data, 127.5 * np.arange(0.3, 2, 0.1), rtol=3e-3) assert t.metadata.Signal.quantity == "thickness (nm)" def test_threshold_nd(self): threshold = self.s._get_navigation_signal() threshold.data[:] = 4.5 - t = self.s.estimate_thickness(threshold=threshold, density=3.6, mean_free_path=120) - np.testing.assert_allclose(t.data, 127.5 * np.arange(0.3,2,0.1), rtol=3e-3) + t = self.s.estimate_thickness( + threshold=threshold, density=3.6, mean_free_path=120 + ) + np.testing.assert_allclose(t.data, 127.5 * np.arange(0.3, 2, 0.1), rtol=3e-3) assert t.metadata.Signal.quantity == "thickness (nm)" def test_no_zlp_or_threshold(self): @@ -445,31 +421,31 @@ def test_at_532eV(self, capsys): s.print_edges_near_energy(532) captured = capsys.readouterr() expected_out = ( - '+-------+-------------------+-----------+-----------------+\n' - '| edge | onset energy (eV) | relevance | description |\n' - '+-------+-------------------+-----------+-----------------+\n' - '| O_K | 532.0 | Major | Abrupt onset |\n' - '| Pd_M3 | 531.0 | Minor | |\n' - '| At_N5 | 533.0 | Minor | |\n' - '| Sb_M5 | 528.0 | Major | Delayed maximum |\n' - '| Sb_M4 | 537.0 | Major | Delayed maximum |\n' - '+-------+-------------------+-----------+-----------------+\n' + "+-------+-------------------+-----------+-----------------+\n" + "| edge | onset energy (eV) | relevance | description |\n" + "+-------+-------------------+-----------+-----------------+\n" + "| O_K | 532.0 | Major | Abrupt onset |\n" + "| Pd_M3 | 531.0 | Minor | |\n" + "| At_N5 | 533.0 | Minor | |\n" + "| Sb_M5 | 528.0 | Major | Delayed maximum |\n" + "| Sb_M4 | 537.0 | Major | Delayed maximum |\n" + "+-------+-------------------+-----------+-----------------+\n" ) assert captured.out == expected_out def test_sequence_edges(self, capsys): s = self.signal - s.print_edges_near_energy(123, edges=['Mn_L2', 'O_K', 'Fe_L2']) + s.print_edges_near_energy(123, edges=["Mn_L2", "O_K", "Fe_L2"]) captured = capsys.readouterr() expected_out = ( - '+-------+-------------------+-----------+-----------------------------+\n' - '| edge | onset energy (eV) | relevance | description |\n' - '+-------+-------------------+-----------+-----------------------------+\n' - '| Mn_L2 | 651.0 | Major | Sharp peak. Delayed maximum |\n' - '| O_K | 532.0 | Major | Abrupt onset |\n' - '| Fe_L2 | 721.0 | Major | Sharp peak. Delayed maximum |\n' - '+-------+-------------------+-----------+-----------------------------+\n' - ) + "+-------+-------------------+-----------+-----------------------------+\n" + "| edge | onset energy (eV) | relevance | description |\n" + "+-------+-------------------+-----------+-----------------------------+\n" + "| Mn_L2 | 651.0 | Major | Sharp peak. Delayed maximum |\n" + "| O_K | 532.0 | Major | Abrupt onset |\n" + "| Fe_L2 | 721.0 | Major | Sharp peak. Delayed maximum |\n" + "+-------+-------------------+-----------+-----------------------------+\n" + ) assert captured.out == expected_out def test_no_energy_and_edges(self): @@ -486,18 +462,18 @@ def setup_method(self, method): def test_at_532eV(self, capsys): s = self.signal - s.edges_at_energy(532, width=20, only_major=True, order='ascending') + s.edges_at_energy(532, width=20, only_major=True, order="ascending") captured = capsys.readouterr() expected_out = ( - '+-------+-------------------+-----------+-----------------+\n' - '| edge | onset energy (eV) | relevance | description |\n' - '+-------+-------------------+-----------+-----------------+\n' - '| Sb_M5 | 528.0 | Major | Delayed maximum |\n' - '| O_K | 532.0 | Major | Abrupt onset |\n' - '| Sb_M4 | 537.0 | Major | Delayed maximum |\n' - '+-------+-------------------+-----------+-----------------+\n' - ) + "+-------+-------------------+-----------+-----------------+\n" + "| edge | onset energy (eV) | relevance | description |\n" + "+-------+-------------------+-----------+-----------------+\n" + "| Sb_M5 | 528.0 | Major | Delayed maximum |\n" + "| O_K | 532.0 | Major | Abrupt onset |\n" + "| Sb_M4 | 537.0 | Major | Delayed maximum |\n" + "+-------+-------------------+-----------+-----------------+\n" + ) assert captured.out == expected_out @@ -509,16 +485,15 @@ def setup_method(self, method): def test_Fe_O(self): s = self.signal - complementary = s._get_complementary_edges(['Fe_L2', 'O_K']) + complementary = s._get_complementary_edges(["Fe_L2", "O_K"]) - assert complementary == ['Fe_L1', 'Fe_L3', 'Fe_M3', 'Fe_M2'] + assert complementary == ["Fe_L1", "Fe_L3", "Fe_M3", "Fe_M2"] def test_Fe_O_only_major(self): s = self.signal - complementary = s._get_complementary_edges(['Fe_L2', 'O_K'], - only_major=True) + complementary = s._get_complementary_edges(["Fe_L2", "O_K"], only_major=True) - assert complementary == ['Fe_L3', 'Fe_M3', 'Fe_M2'] + assert complementary == ["Fe_L3", "Fe_M3", "Fe_M2"] class Test_Plot_EELS: @@ -528,22 +503,29 @@ def setup_method(self, method): def test_plot_no_markers(self): s = self.signal - s.add_elements(('Mn','Cr')) + s.add_elements(("Mn", "Cr")) s.plot() assert len(s._edge_markers["names"]) == 0 def test_plot_edges_True(self): s = self.signal - s.add_elements(('Mn','Cr')) + s.add_elements(("Mn", "Cr")) s.plot(plot_edges=True) print(s._edge_markers["names"]) assert len(s._edge_markers["names"]) == 8 assert set(s._edge_markers["names"]) == { - 'Cr_L2', 'Cr_L3', 'Cr_L1', 'Fe_L2', 'Fe_L3', 'Mn_L2', 'Mn_L3', 'Mn_L1' - } + "Cr_L2", + "Cr_L3", + "Cr_L1", + "Fe_L2", + "Fe_L3", + "Mn_L2", + "Mn_L3", + "Mn_L1", + } def test_plot_edges_True_without_elements(self): s = self.signal @@ -554,58 +536,61 @@ def test_plot_edges_True_without_elements(self): def test_plot_edges_from_element_family_specific(self): s = self.signal - s.plot(plot_edges=['Mn', 'Ti_L', 'Cr_L3'], only_edges=('Major')) + s.plot(plot_edges=["Mn", "Ti_L", "Cr_L3"], only_edges=("Major")) print(s._edge_markers["names"]) assert len(s._edge_markers["names"]) == 7 assert set(s._edge_markers["names"]) == { - 'Fe_L2', 'Fe_L3', 'Mn_L2', 'Mn_L3', 'Ti_L2', 'Ti_L3', 'Cr_L3' - } + "Fe_L2", + "Fe_L3", + "Mn_L2", + "Mn_L3", + "Ti_L2", + "Ti_L3", + "Cr_L3", + } def test_unsupported_edge_family(self): s = self.signal with pytest.raises(AttributeError): - s.plot(plot_edges=['Cr_P']) + s.plot(plot_edges=["Cr_P"]) def test_unsupported_edge(self): s = self.signal with pytest.raises(AttributeError): - s.plot(plot_edges=['Xe_P4']) + s.plot(plot_edges=["Xe_P4"]) def test_unsupported_element(self): s = self.signal with pytest.raises(ValueError): - s.plot(plot_edges=['ABC_L1']) + s.plot(plot_edges=["ABC_L1"]) def test_remove_edge_labels(self): s = self.signal del s.metadata.Sample.elements - s.plot(plot_edges=['Cr_L', 'Fe_L2']) - s._remove_edge_labels(['Cr_L1', 'Fe_L2']) + s.plot(plot_edges=["Cr_L", "Fe_L2"]) + s._remove_edge_labels(["Cr_L1", "Fe_L2"]) assert len(s._edge_markers["names"]) == 2 - assert set(s._edge_markers["names"]) == set(['Cr_L2', 'Cr_L3']) + assert set(s._edge_markers["names"]) == set(["Cr_L2", "Cr_L3"]) def test_plot_edges_without_markers_provided(self): s = self.signal s.plot() - s._plot_edge_labels({'Fe_L2': 721.0, 'O_K': 532.0}) + s._plot_edge_labels({"Fe_L2": 721.0, "O_K": 532.0}) assert len(s._edge_markers["names"]) == 2 - assert set(s._edge_markers["names"]) == set(['Fe_L2', 'O_K']) + assert set(s._edge_markers["names"]) == set(["Fe_L2", "O_K"]) @lazifyTestClass class TestVacuumMask: - def setup_method(self, method): - s = exspy.signals.EELSSpectrum( - np.array([np.linspace(0.001, 0.5, 20)] * 100).T - ) + s = exspy.signals.EELSSpectrum(np.array([np.linspace(0.001, 0.5, 20)] * 100).T) s.add_poissonian_noise(random_state=1) s.axes_manager[-1].scale = 0.25 - s.axes_manager[-1].units = 'eV' + s.axes_manager[-1].units = "eV" s.inav[:10] += 20 self.signal = s diff --git a/exspy/test/signals/test_kramers_kronig_transform.py b/exspy/test/signals/test_kramers_kronig_transform.py index 5bd668b39..07c13523e 100644 --- a/exspy/test/signals/test_kramers_kronig_transform.py +++ b/exspy/test/signals/test_kramers_kronig_transform.py @@ -29,7 +29,6 @@ class Test2D: - def setup_method(self, method): """To test the kramers_kronig_analysis we will generate 3 EELSSpectrum instances. First a model energy loss function(ELF), @@ -41,7 +40,7 @@ def setup_method(self, method): """ # Parameters - i0 = 1. + i0 = 1.0 t = hs.signals.BaseSignal(np.arange(10, 70, 10).reshape((2, 3))) t = t.transpose(signal_axes=0) scale = 0.02 @@ -49,23 +48,22 @@ def setup_method(self, method): # Create an 3x2x2048 spectrum with Drude plasmon s = EELSSpectrum(np.zeros((2, 3, 2 * 2048))) s.set_microscope_parameters( - beam_energy=300.0, - convergence_angle=5, - collection_angle=10.0) + beam_energy=300.0, convergence_angle=5, collection_angle=10.0 + ) s.axes_manager.signal_axes[0].scale = scale k = eels_constant(s, i0, t) vpm = VolumePlasmonDrude() m = s.create_model(auto_background=False) m.append(vpm) - vpm.intensity.map['values'][:] = 1 - vpm.plasmon_energy.map['values'] = np.array([[8., 18.4, 15.8], - [16.6, 4.3, 3.7]]) - vpm.fwhm.map['values'] = np.array([[2.3, 4.8, 0.53], - [3.7, 0.3, 0.3]]) - vpm.intensity.map['is_set'][:] = True - vpm.plasmon_energy.map['is_set'][:] = True - vpm.fwhm.map['is_set'][:] = True + vpm.intensity.map["values"][:] = 1 + vpm.plasmon_energy.map["values"] = np.array( + [[8.0, 18.4, 15.8], [16.6, 4.3, 3.7]] + ) + vpm.fwhm.map["values"] = np.array([[2.3, 4.8, 0.53], [3.7, 0.3, 0.3]]) + vpm.intensity.map["is_set"][:] = True + vpm.plasmon_energy.map["is_set"][:] = True + vpm.fwhm.map["is_set"][:] = True s.data = (m.as_signal() * k).data # Create ZLP @@ -90,13 +88,9 @@ def test_df_given_n(self): """ # i use n=1000 to simulate a metal (enormous n) - cdf = self.s.kramers_kronig_analysis(zlp=self.zlp, - iterations=1, - n=1000.) + cdf = self.s.kramers_kronig_analysis(zlp=self.zlp, iterations=1, n=1000.0) s = cdf.get_electron_energy_loss_spectrum(self.zlp, self.thickness) - np.testing.assert_allclose(s.data, - self.s.data[..., 1:], - rtol=0.01) + np.testing.assert_allclose(s.data, self.s.data[..., 1:], rtol=0.01) def test_df_given_thickness(self): """The kramers kronig analysis method applied to the signal we @@ -104,28 +98,35 @@ def test_df_given_thickness(self): plasmon. Hopefully, we recover the signal by inverting the CDF. """ - cdf = self.s.kramers_kronig_analysis(zlp=self.zlp, - iterations=1, - t=self.thickness) + cdf = self.s.kramers_kronig_analysis( + zlp=self.zlp, iterations=1, t=self.thickness + ) s = cdf.get_electron_energy_loss_spectrum(self.zlp, self.thickness) - np.testing.assert_allclose(s.data, - self.s.data[..., 1:], - rtol=0.01) + np.testing.assert_allclose(s.data, self.s.data[..., 1:], rtol=0.01) def test_bethe_sum_rule(self): - df = self.s.kramers_kronig_analysis(zlp=self.zlp, - iterations=1, - n=1000.) - neff1, neff2 = df.get_number_of_effective_electrons(nat=50e27, - cumulative=False) - np.testing.assert_allclose(neff1.data, - np.array([[0.91187657, 4.72490711, 3.60594653], - [3.88077047, 0.26759741, 0.19813647]]), - rtol=1e-6) - np.testing.assert_allclose(neff2.data, - np.array([[0.91299039, 4.37469112, 3.41580094], - [3.64866394, 0.15693674, 0.11146413]]), - rtol=1e-6) + df = self.s.kramers_kronig_analysis(zlp=self.zlp, iterations=1, n=1000.0) + neff1, neff2 = df.get_number_of_effective_electrons(nat=50e27, cumulative=False) + np.testing.assert_allclose( + neff1.data, + np.array( + [ + [0.91187657, 4.72490711, 3.60594653], + [3.88077047, 0.26759741, 0.19813647], + ] + ), + rtol=1e-6, + ) + np.testing.assert_allclose( + neff2.data, + np.array( + [ + [0.91299039, 4.37469112, 3.41580094], + [3.64866394, 0.15693674, 0.11146413], + ] + ), + rtol=1e-6, + ) def test_thickness_estimation(self): """Kramers kronig analysis gives a rough estimation of sample @@ -133,25 +134,23 @@ def test_thickness_estimation(self): scattering distribution, we can use it for testing putposes. """ - cdf, output = self.s.kramers_kronig_analysis(zlp=self.zlp, - iterations=1, - n=1000., - full_output=True) + cdf, output = self.s.kramers_kronig_analysis( + zlp=self.zlp, iterations=1, n=1000.0, full_output=True + ) np.testing.assert_allclose( - self.thickness.data, - output['thickness'].data, - rtol=0.01) + self.thickness.data, output["thickness"].data, rtol=0.01 + ) def test_thicness_input_array(self): with pytest.raises(ValueError): - self.s.kramers_kronig_analysis(zlp=self.zlp, - iterations=1, - t=self.thickness.data) + self.s.kramers_kronig_analysis( + zlp=self.zlp, iterations=1, t=self.thickness.data + ) def test_single_spectrum_dielectric(self): s_in = self.s.inav[0, 0] z = self.zlp.inav[0, 0] t = self.thickness.data[0, 0] - cdf = s_in.kramers_kronig_analysis(zlp=z, iterations=1, n=1000.) + cdf = s_in.kramers_kronig_analysis(zlp=z, iterations=1, n=1000.0) s_out = cdf.get_electron_energy_loss_spectrum(z, t) np.testing.assert_allclose(s_out.data, s_in.data[1:], rtol=0.01) diff --git a/exspy/test/test_non-uniform_not-implemented.py b/exspy/test/test_non-uniform_not-implemented.py index 7f0e582f8..e6b7f6a22 100644 --- a/exspy/test/test_non-uniform_not-implemented.py +++ b/exspy/test/test_non-uniform_not-implemented.py @@ -20,6 +20,7 @@ import exspy + def test_eels(): s = exspy.signals.EELSSpectrum(([0, 1])) s0 = s.deepcopy() @@ -41,7 +42,7 @@ def test_eels(): with pytest.raises(NotImplementedError): s.kramers_kronig_analysis() m = s.create_model() - g = exspy.components.EELSCLEdge('N_K') + g = exspy.components.EELSCLEdge("N_K") with pytest.raises(NotImplementedError): m.append(g) @@ -58,8 +59,8 @@ def test_eds(): s2.get_calibration_from(s2) m = s.create_model() with pytest.raises(NotImplementedError): - m.add_family_lines('Al_Ka') + m.add_family_lines("Al_Ka") with pytest.raises(NotImplementedError): - m._set_energy_scale('Al_Ka', [1.0]) + m._set_energy_scale("Al_Ka", [1.0]) with pytest.raises(NotImplementedError): - m._set_energy_offset('Al_Ka', [1.0]) + m._set_energy_offset("Al_Ka", [1.0]) diff --git a/pyproject.toml b/pyproject.toml new file mode 100644 index 000000000..fb58f9eb1 --- /dev/null +++ b/pyproject.toml @@ -0,0 +1,5 @@ +[tool.black] +force-exclude = ''' + exspy/misc/eds/ffast_mac.py + | exspy/misc/elements.py +''' \ No newline at end of file diff --git a/setup.py b/setup.py index ce1119f79..c01b4b373 100644 --- a/setup.py +++ b/setup.py @@ -49,7 +49,6 @@ } - setup( name=name, version=version, @@ -78,7 +77,6 @@ "Topic :: Scientific/Engineering :: Physics", ], entry_points={"hyperspy.extensions": "exspy = exspy"}, - packages=find_packages(), package_dir={"exspy": "exspy"}, extras_require=extra_feature_requirements, @@ -94,7 +92,7 @@ "requests", "scipy", "traits", - ], + ], python_requires="~=3.8", package_data={ "": ["LICENSE", "README.rst"], @@ -102,5 +100,7 @@ "data/*hspy", "test/drawing/data/*hspy", "test/signals/data/*hspy", - "hyperspy_extension.yaml"], - },) + "hyperspy_extension.yaml", + ], + }, +)