diff --git a/README.md b/README.md index d62845d4c..a7d3a7810 100644 --- a/README.md +++ b/README.md @@ -1,4 +1,3 @@ - > :warning: **py4DSTEM version 0.14 update** :warning: Warning: this is a major update and we expect some workflows to break. You can still install previous versions of py4DSTEM [as discussed here](#legacyinstall) > :warning: **Phase retrieval refactor version 0.14.9** :warning: Warning: The phase-retrieval modules in py4DSTEM (DPC, parallax, and ptychography) underwent a major refactor in version 0.14.9 and as such older tutorial notebooks will not work as expected. Notably, class names have been pruned to remove the trailing "Reconstruction" (`DPCReconstruction` -> `DPC` etc.), and regularization functions have dropped the `_iter` suffix (and are instead specified as boolean flags). See the [updated tutorials](https://github.com/py4dstem/py4DSTEM_tutorials) for more information. diff --git a/py4DSTEM/process/__init__.py b/py4DSTEM/process/__init__.py index 6d7d36b28..a1d43401a 100644 --- a/py4DSTEM/process/__init__.py +++ b/py4DSTEM/process/__init__.py @@ -19,3 +19,5 @@ except (ImportError, ModuleNotFoundError) as exc: if not is_package_lite: raise exc + +from py4DSTEM.process.utils import Cluster diff --git a/py4DSTEM/process/diffraction/crystal.py b/py4DSTEM/process/diffraction/crystal.py index 33e6b07b8..09c700125 100644 --- a/py4DSTEM/process/diffraction/crystal.py +++ b/py4DSTEM/process/diffraction/crystal.py @@ -97,7 +97,7 @@ def __init__( # [a a a 90 90 90] # [a b c 90 90 90] # [a b c alpha beta gamma] - cell = np.asarray(cell, dtype="float_") + cell = np.array(cell, dtype="float") if np.size(cell) == 1: self.cell = np.hstack([cell, cell, cell, 90, 90, 90]) elif np.size(cell) == 3: @@ -653,7 +653,7 @@ def calculate_structure_factors( # Calculate single atom scattering factors # Note this can be sped up a lot, but we may want to generalize to allow non-1.0 occupancy in the future. f_all = np.zeros( - (np.size(self.g_vec_leng, 0), self.positions.shape[0]), dtype="float_" + (np.size(self.g_vec_leng, 0), self.positions.shape[0]), dtype="float" ) for a0 in range(self.positions.shape[0]): atom_sf = single_atom_scatter([self.numbers[a0]], [1], self.g_vec_leng, "A") diff --git a/py4DSTEM/process/diffraction/crystal_phase.py b/py4DSTEM/process/diffraction/crystal_phase.py index c161304f9..9edfa24e3 100644 --- a/py4DSTEM/process/diffraction/crystal_phase.py +++ b/py4DSTEM/process/diffraction/crystal_phase.py @@ -102,6 +102,7 @@ def quantify_single_pattern( plot_correlation_radius=False, scale_markers_experiment=40, scale_markers_calculated=200, + max_marker_size=400, crystal_inds_plot=None, phase_colors=None, figsize=(10, 7), @@ -615,8 +616,11 @@ def quantify_single_pattern( qy0, qx0, # s = scale_markers_experiment * intensity0, - s=scale_markers_experiment - * bragg_peaks.data["intensity"][np.logical_not(keep)], + s=np.minimum( + scale_markers_experiment + * bragg_peaks.data["intensity"][np.logical_not(keep)], + max_marker_size, + ), marker="o", facecolor=[0.7, 0.7, 0.7], ) @@ -624,7 +628,10 @@ def quantify_single_pattern( qy, qx, # s = scale_markers_experiment * intensity, - s=scale_markers_experiment * bragg_peaks.data["intensity"][keep], + s=np.minimum( + scale_markers_experiment * bragg_peaks.data["intensity"][keep], + max_marker_size, + ), marker="o", facecolor=[0.7, 0.7, 0.7], ) @@ -799,6 +806,7 @@ def quantify_phase( strain_max=0.02, include_false_positives=True, weight_false_positives=1.0, + weight_unmatched_peaks=1.0, progress_bar=True, ): """ @@ -899,6 +907,7 @@ def quantify_phase( strain_max=strain_max, include_false_positives=include_false_positives, weight_false_positives=weight_false_positives, + weight_unmatched_peaks=weight_unmatched_peaks, plot_result=False, verbose=False, returnfig=False, @@ -1220,6 +1229,7 @@ def plot_dominant_phase( self, use_correlation_scores=False, reliability_range=(0.0, 1.0), + normalize_exp_intensity=True, sigma=0.0, phase_colors=None, ticks=True, @@ -1302,6 +1312,11 @@ def plot_dominant_phase( sigma=sigma, mode="nearest", ) + self.phase_sig = phase_sig + + # # normalize the signal by the intensity of each experimental pattern + # if normalize_exp_intensity: + # phase_sig /= self.int_total[None,:,:] # find highest correlation score for each crystal and match index for a0 in range(self.num_crystals): @@ -1327,6 +1342,11 @@ def plot_dominant_phase( # Estimate the reliability phase_rel = phase_corr - phase_corr_2nd + + # normalize the reliability by the intensity of each experimental pattern + if normalize_exp_intensity: + phase_rel /= self.int_total + phase_scale = np.clip( (phase_rel - reliability_range[0]) / (reliability_range[1] - reliability_range[0]), @@ -1351,6 +1371,8 @@ def plot_dominant_phase( sub = phase_map == a0 for a1 in range(3): self.phase_rgb[:, :, a1][sub] = phase_colors[a0, a1] * phase_scale[sub] + + self.phase_scale = phase_scale # normalize # self.phase_rgb = np.clip( # (self.phase_rgb - rel_range[0]) / (rel_range[1] - rel_range[0]), diff --git a/py4DSTEM/process/diffraction/crystal_viz.py b/py4DSTEM/process/diffraction/crystal_viz.py index 008954443..45de2461b 100644 --- a/py4DSTEM/process/diffraction/crystal_viz.py +++ b/py4DSTEM/process/diffraction/crystal_viz.py @@ -8,6 +8,8 @@ from scipy.signal import medfilt from scipy.ndimage import gaussian_filter from scipy.ndimage import distance_transform_edt +from skimage.morphology import dilation, erosion +from scipy.optimize import nnls import warnings import numpy as np @@ -1113,6 +1115,7 @@ def plot_orientation_maps( self, orientation_map=None, orientation_ind: int = 0, + mask=None, dir_in_plane_degrees: float = 0.0, corr_range: np.ndarray = np.array([0, 5]), corr_normalize: bool = True, @@ -1135,6 +1138,7 @@ def plot_orientation_maps( orientation_map (OrientationMap): Class containing orientation matrices, correlation values, etc. Optional - can reference internally stored OrientationMap. orientation_ind (int): Which orientation match to plot if num_matches > 1 + mask (np.array): Manually apply a mask dir_in_plane_degrees (float): In-plane angle to plot in degrees. Default is 0 / x-axis / vertical down. corr_range (np.ndarray): Correlation intensity range for the plot corr_normalize (bool): If true, set mean correlation to 1. @@ -1213,21 +1217,39 @@ def plot_orientation_maps( dir_in_plane = np.deg2rad(dir_in_plane_degrees) ct = np.cos(dir_in_plane) st = np.sin(dir_in_plane) - basis_x = np.zeros((orientation_map.num_x, orientation_map.num_y, 3)) - basis_y = np.zeros((orientation_map.num_x, orientation_map.num_y, 3)) - basis_z = np.zeros((orientation_map.num_x, orientation_map.num_y, 3)) + rgb_x = np.zeros((orientation_map.num_x, orientation_map.num_y, 3)) rgb_z = np.zeros((orientation_map.num_x, orientation_map.num_y, 3)) - # Basis for fitting orientation projections - A = np.linalg.inv(self.orientation_zone_axis_range).T + if self.pointgroup.get_crystal_system() == "monoclinic": + basis_x = np.zeros((orientation_map.num_x, orientation_map.num_y, 6)) + # basis_y = np.zeros((orientation_map.num_x, orientation_map.num_y, 4)) + basis_z = np.zeros((orientation_map.num_x, orientation_map.num_y, 6)) + # Basis for fitting orientation projections + A = np.linalg.inv(self.orientation_zone_axis_range).T + A = np.hstack((A, -A)) + + # Testing + # print(np.round(self.orientation_zone_axis_range)) + # print() + # d = np.array((-1,0,0)) + # s = nnls(A,d)[0] + # print(np.round(s,3)) - # Correlation masking - corr = orientation_map.corr[:, :, orientation_ind] - if corr_normalize: - corr = corr / np.mean(corr) - mask = (corr - corr_range[0]) / (corr_range[1] - corr_range[0]) - mask = np.clip(mask, 0, 1) + else: + basis_x = np.zeros((orientation_map.num_x, orientation_map.num_y, 3)) + basis_y = np.zeros((orientation_map.num_x, orientation_map.num_y, 3)) + basis_z = np.zeros((orientation_map.num_x, orientation_map.num_y, 3)) + # Basis for fitting orientation projections + A = np.linalg.inv(self.orientation_zone_axis_range).T + + # Correlation or manual masking + if mask is None: + corr = orientation_map.corr[:, :, orientation_ind] + if corr_normalize: + corr = corr / np.mean(corr) + mask = (corr - corr_range[0]) / (corr_range[1] - corr_range[0]) + mask = np.clip(mask, 0, 1) # Generate images for rx, ry in tqdmnd( @@ -1237,22 +1259,34 @@ def plot_orientation_maps( unit=" PointList", disable=not progress_bar, ): - if self.pymatgen_available: - basis_x[rx, ry, :] = ( - A @ orientation_map.family[rx, ry, orientation_ind, :, 0] - ) - basis_y[rx, ry, :] = ( - A @ orientation_map.family[rx, ry, orientation_ind, :, 1] - ) - basis_x[rx, ry, :] = basis_x[rx, ry, :] * ct + basis_y[rx, ry, :] * st - basis_z[rx, ry, :] = ( - A @ orientation_map.family[rx, ry, orientation_ind, :, 2] + if self.pointgroup.get_crystal_system() == "monoclinic": + dir_x = ( + orientation_map.matrix[rx, ry, orientation_ind, :, 0] * ct + + orientation_map.matrix[rx, ry, orientation_ind, :, 1] * st ) + dir_z = orientation_map.matrix[rx, ry, orientation_ind, :, 2] + basis_x[rx, ry, :] = nnls(A, dir_x)[0] + basis_z[rx, ry, :] = nnls(A, dir_z)[0] + else: - basis_z[rx, ry, :] = ( - A @ orientation_map.matrix[rx, ry, orientation_ind, :, 2] - ) + if self.pymatgen_available: + + basis_x[rx, ry, :] = ( + A @ orientation_map.family[rx, ry, orientation_ind, :, 0] + ) + basis_y[rx, ry, :] = ( + A @ orientation_map.family[rx, ry, orientation_ind, :, 1] + ) + basis_x[rx, ry, :] = basis_x[rx, ry, :] * ct + basis_y[rx, ry, :] * st + + basis_z[rx, ry, :] = ( + A @ orientation_map.family[rx, ry, orientation_ind, :, 2] + ) + else: + basis_z[rx, ry, :] = ( + A @ orientation_map.matrix[rx, ry, orientation_ind, :, 2] + ) basis_x = np.clip(basis_x, 0, 1) basis_z = np.clip(basis_z, 0, 1) @@ -1260,101 +1294,267 @@ def plot_orientation_maps( basis_x_max = np.max(basis_x, axis=2) sub = basis_x_max > 0 basis_x_scale = basis_x * mask[:, :, None] - for a0 in range(3): - basis_x_scale[:, :, a0][sub] /= basis_x_max[sub] - basis_x_scale[:, :, a0][np.logical_not(sub)] = 0 - rgb_x = ( - basis_x_scale[:, :, 0][:, :, None] * color_basis[0, :][None, None, :] - + basis_x_scale[:, :, 1][:, :, None] * color_basis[1, :][None, None, :] - + basis_x_scale[:, :, 2][:, :, None] * color_basis[2, :][None, None, :] - ) + if self.pointgroup.get_crystal_system() == "monoclinic": + rgb_x = ( + basis_x_scale[:, :, 0][:, :, None] * np.array((1, 1, 1))[None, None, :] + + basis_x_scale[:, :, 1][:, :, None] * color_basis[2, :][None, None, :] + + basis_x_scale[:, :, 2][:, :, None] * color_basis[1, :][None, None, :] + + basis_x_scale[:, :, 3][:, :, None] * np.array((1, 1, 1))[None, None, :] + + basis_x_scale[:, :, 4][:, :, None] * color_basis[2, :][None, None, :] + + basis_x_scale[:, :, 5][:, :, None] * color_basis[0, :][None, None, :] + ) + else: + for a0 in range(3): + basis_x_scale[:, :, a0][sub] /= basis_x_max[sub] + basis_x_scale[:, :, a0][np.logical_not(sub)] = 0 + rgb_x = ( + basis_x_scale[:, :, 0][:, :, None] * color_basis[0, :][None, None, :] + + basis_x_scale[:, :, 1][:, :, None] * color_basis[1, :][None, None, :] + + basis_x_scale[:, :, 2][:, :, None] * color_basis[2, :][None, None, :] + ) basis_z_max = np.max(basis_z, axis=2) sub = basis_z_max > 0 basis_z_scale = basis_z * mask[:, :, None] - for a0 in range(3): - basis_z_scale[:, :, a0][sub] /= basis_z_max[sub] - basis_z_scale[:, :, a0][np.logical_not(sub)] = 0 - rgb_z = ( - basis_z_scale[:, :, 0][:, :, None] * color_basis[0, :][None, None, :] - + basis_z_scale[:, :, 1][:, :, None] * color_basis[1, :][None, None, :] - + basis_z_scale[:, :, 2][:, :, None] * color_basis[2, :][None, None, :] - ) + if self.pointgroup.get_crystal_system() == "monoclinic": + rgb_z = ( + basis_z_scale[:, :, 0][:, :, None] * np.array((1, 1, 1))[None, None, :] + + basis_z_scale[:, :, 1][:, :, None] * color_basis[2, :][None, None, :] + + basis_z_scale[:, :, 2][:, :, None] * color_basis[1, :][None, None, :] + + basis_z_scale[:, :, 3][:, :, None] * np.array((1, 1, 1))[None, None, :] + + basis_z_scale[:, :, 4][:, :, None] * color_basis[2, :][None, None, :] + + basis_z_scale[:, :, 5][:, :, None] * color_basis[0, :][None, None, :] + ) + else: + for a0 in range(3): + basis_z_scale[:, :, a0][sub] /= basis_z_max[sub] + basis_z_scale[:, :, a0][np.logical_not(sub)] = 0 + rgb_z = ( + basis_z_scale[:, :, 0][:, :, None] * color_basis[0, :][None, None, :] + + basis_z_scale[:, :, 1][:, :, None] * color_basis[1, :][None, None, :] + + basis_z_scale[:, :, 2][:, :, None] * color_basis[2, :][None, None, :] + ) - # Legend init - # projection vector - cam_dir = np.mean(self.orientation_zone_axis_range, axis=0) - cam_dir = cam_dir / np.linalg.norm(cam_dir) - az = np.rad2deg(np.arctan2(cam_dir[0], cam_dir[1])) - # el = np.rad2deg(np.arccos(cam_dir[2])) - el = np.rad2deg(np.arcsin(cam_dir[2])) - # coloring - wx = self.orientation_inds[:, 0] / self.orientation_zone_axis_steps - wy = self.orientation_inds[:, 1] / self.orientation_zone_axis_steps - w0 = 1 - wx - 0.5 * wy - w1 = wx - wy - w2 = wy - # w0 = 1 - w1/2 - w2/2 - w_scale = np.maximum(np.maximum(w0, w1), w2) - w_scale = 1 - np.exp(-w_scale) - w0 = w0 / w_scale - w1 = w1 / w_scale - w2 = w2 / w_scale - rgb_legend = np.clip( - w0[:, None] * color_basis[0, :] - + w1[:, None] * color_basis[1, :] - + w2[:, None] * color_basis[2, :], + rgb_x = np.clip(rgb_x, 0, 1) + rgb_z = np.clip(rgb_z, 0, 1) + + # if np.abs(self.cell[4] - 120.0) < 1e-6 or np.abs(self.cell[5] - 120.0) or np.abs(self.cell[6] - 120.0): + # label_0 = self.rational_ind( + # self.lattice_to_hexagonal( + # self.cartesian_to_lattice(self.orientation_zone_axis_range[0, :]) + # ) + # ) + # label_1 = self.rational_ind( + # self.lattice_to_hexagonal( + # self.cartesian_to_lattice(self.orientation_zone_axis_range[1, :]) + # ) + # ) + # label_2 = self.rational_ind( + # self.lattice_to_hexagonal( + # self.cartesian_to_lattice(self.orientation_zone_axis_range[2, :]) + # ) + # ) + # else: + # label_0 = self.rational_ind( + # self.cartesian_to_lattice(self.orientation_zone_axis_range[0, :]) + # ) + # label_1 = self.rational_ind( + # self.cartesian_to_lattice(self.orientation_zone_axis_range[1, :]) + # ) + # label_2 = self.rational_ind( + # self.cartesian_to_lattice(self.orientation_zone_axis_range[2, :]) + # ) + # inds_legend = np.array( + # [ + # 0, + # self.orientation_num_zones - self.orientation_zone_axis_steps - 1, + # self.orientation_num_zones - 1, + # ] + # ) + # print(label_0) + # print(label_1) + # print(label_2) + + # Legend coordinates + # TODO - potentially replace approx. coordinates with stereographic projection + power_radial = 0.9 + num_points = 90 + 1 + r = np.linspace(0, 1, num_points) + xa, ya = np.meshgrid(np.flip(r), r, indexing="ij") + if self.pointgroup.get_crystal_system() == "monoclinic": + xa = np.hstack((xa, xa[:, 1:])) + ya = np.hstack((ya - 1, ya[:, 1:])) + ra = np.sqrt(xa**2 + ya**2) + rmask = np.clip( + (1 - ra) / (r[1] - r[0]) + 0.5, 0, 1, ) - - if np.abs(self.cell[5] - 120.0) < 1e-6: - label_0 = self.rational_ind( - self.lattice_to_hexagonal( - self.cartesian_to_lattice(self.orientation_zone_axis_range[0, :]) + # angular range to span - assume zone axis range vectors are normalized + ta = np.arctan2(xa, ya) # Note theta direction is opposite from standard + if self.pointgroup.get_crystal_system() == "monoclinic": + tspan = np.pi / 2 + tmask = np.ones_like(rmask) + else: + tspan = np.arccos( + np.clip( + self.orientation_zone_axis_range[1] + @ self.orientation_zone_axis_range[2], + 0, + 1, ) ) - label_1 = self.rational_ind( - self.lattice_to_hexagonal( - self.cartesian_to_lattice(self.orientation_zone_axis_range[1, :]) - ) + tmask = np.clip( + (tspan - ta) / (r[1] - r[0]) + 0.5, + 0, + 1, ) - label_2 = self.rational_ind( - self.lattice_to_hexagonal( - self.cartesian_to_lattice(self.orientation_zone_axis_range[2, :]) - ) + # rscale = 1 - np.clip(1-ra,0,1)**power_radial + rscale = ra**power_radial + mask_leg = tmask * rmask + + if self.pointgroup.get_crystal_system() == "monoclinic": + scale = 1.5 + + # Right side coloring + weight_0 = np.clip(1 - rscale, 0, 1) + weight_1 = np.clip(ya, 0, 1) + weight_2 = np.clip(xa, 0, 1) + weight_total = np.maximum(weight_0 + weight_1 + weight_2, 0) + 1e-8 + weight_0 /= weight_total + weight_1 /= weight_total + weight_2 /= weight_total + weight_0 = np.minimum(weight_0, 1) + weight_1 = np.minimum(weight_1, 1) + weight_2 = np.minimum(weight_2, 1) + weight_max = (weight_0**scale + weight_1**scale + weight_2**scale) ** ( + 1 / scale + ) + 1e-8 + weight_0 /= weight_max + weight_1 /= weight_max + weight_2 /= weight_max + rgb_leg_right = ( + weight_0[:, :, None] * np.array((1.0, 1.0, 1.0))[None, None, :] + + weight_1[:, :, None] * color_basis[1, :][None, None, :] + + weight_2[:, :, None] * color_basis[2, :][None, None, :] ) - else: - label_0 = self.rational_ind( - self.cartesian_to_lattice(self.orientation_zone_axis_range[0, :]) + + # left side coloring + weight_0 = np.clip(1 - rscale, 0, 1) + weight_1 = np.clip(rscale * (ta - tspan) / tspan, 0, 1) + weight_2 = np.clip(xa, 0, 1) + weight_total = np.maximum(weight_0 + weight_1 + weight_2, 0) + 1e-8 + weight_0 /= weight_total + weight_1 /= weight_total + weight_2 /= weight_total + weight_0 = np.minimum(weight_0, 1) + weight_1 = np.minimum(weight_1, 1) + weight_2 = np.minimum(weight_2, 1) + weight_max = (weight_0**scale + weight_1**scale + weight_2**scale) ** ( + 1 / scale + ) + 1e-8 + weight_0 /= weight_max + weight_1 /= weight_max + weight_2 /= weight_max + rgb_leg_left = ( + weight_0[:, :, None] * np.array((1.0, 1.0, 1.0))[None, None, :] + + weight_1[:, :, None] * color_basis[0, :][None, None, :] + + weight_2[:, :, None] * color_basis[2, :][None, None, :] ) - label_1 = self.rational_ind( - self.cartesian_to_lattice(self.orientation_zone_axis_range[1, :]) + + # combined legend + rgb_leg = np.zeros((num_points, 2 * num_points - 1, 3)) + sub_right = ya >= 0 + sub_left = np.logical_not(sub_right) + for a0 in range(3): + rgb_leg[:, :, a0][sub_right] = rgb_leg_right[:, :, a0][sub_right] + rgb_leg[:, :, a0][sub_left] = rgb_leg_left[:, :, a0][sub_left] + + else: + weight_0 = np.maximum(1 - rscale, 0) + weight_1 = np.maximum(rscale * (tspan - ta) / tspan, 0) + weight_2 = np.maximum(rscale * ta / tspan, 0) + weight_total = np.maximum(weight_0 + weight_1 + weight_2, 0) + 1e-8 + weight_0 /= weight_total + weight_1 /= weight_total + weight_2 /= weight_total + weight_0 = np.minimum(weight_0, 1) + weight_1 = np.minimum(weight_1, 1) + weight_2 = np.minimum(weight_2, 1) + + # Generate rgb legend image + hkl = ( + weight_0[:, :, None] * self.orientation_zone_axis_range[0][None, None] + + weight_1[:, :, None] * self.orientation_zone_axis_range[1][None, None] + + weight_2[:, :, None] * self.orientation_zone_axis_range[2][None, None] ) - label_2 = self.rational_ind( - self.cartesian_to_lattice(self.orientation_zone_axis_range[2, :]) + hkl /= np.linalg.norm(hkl, axis=2)[:, :, None] + basis_leg = np.zeros((num_points, num_points, 3)) + for rx in range(num_points): + for ry in range(num_points): + basis_leg[rx, ry, :] = A @ hkl[rx, ry, :] + basis_leg_max = np.max(basis_leg, axis=2) + basis_leg_scale = basis_leg + for a0 in range(3): + basis_leg_scale[:, :, a0] /= basis_leg_max + rgb_leg = ( + basis_leg_scale[:, :, 0][:, :, None] * color_basis[0, :][None, None, :] + + basis_leg_scale[:, :, 1][:, :, None] * color_basis[1, :][None, None, :] + + basis_leg_scale[:, :, 2][:, :, None] * color_basis[2, :][None, None, :] ) - inds_legend = np.array( - [ - 0, - self.orientation_num_zones - self.orientation_zone_axis_steps - 1, - self.orientation_num_zones - 1, - ] + rgb_leg = np.clip( + rgb_leg * mask_leg[:, :, None] + (1 - mask_leg[:, :, None]), + 0, + 1, ) - # Determine if lattice direction labels should be left-right - # or right-left aligned. - v0 = self.orientation_vecs[inds_legend[0], :] - v1 = self.orientation_vecs[inds_legend[1], :] - v2 = self.orientation_vecs[inds_legend[2], :] - n = np.cross(v0, cam_dir) - if np.sum(v1 * n) < np.sum(v2 * n): - ha_1 = "left" - ha_2 = "right" - else: - ha_1 = "right" - ha_2 = "left" + # rgb_leg = weight_0 + # w_scale = np.maximum(np.maximum(weight_0, w1), w2) + # w_scale = 1 - np.exp(-w_scale) + # w0 = w0 / w_scale + # w1 = w1 / w_scale + # w2 = w2 / w_scale + # im_leg = weight_0[:,:,None] * color_basis[0, :][None,None,:] \ + # + weight_1[:,:,None] * color_basis[1, :][None,None,:] \ + # + weight_2[:,:,None] * color_basis[2, :][None,None,:] + + # fig,ax = plt.subplots(1,4,figsize=(12,4)) + # ax[0].imshow(basis_leg[:,:,0]) + # ax[1].imshow(basis_leg[:,:,1]) + # ax[2].imshow(basis_leg[:,:,2]) + # ax[3].imshow(rgb_leg) + + # ax[0].imshow(weight_0) + # ax[1].imshow(weight_1) + # ax[2].imshow(weight_2) + # ax[3].imshow(im_leg) + + # # projection vector + # cam_dir = np.mean(self.orientation_zone_axis_range, axis=0) + # cam_dir = cam_dir / np.linalg.norm(cam_dir) + # az = np.rad2deg(np.arctan2(cam_dir[0], cam_dir[1])) + # # el = np.rad2deg(np.arccos(cam_dir[2])) + # el = np.rad2deg(np.arcsin(cam_dir[2])) + # coloring + # wx = self.orientation_inds[:, 0] / self.orientation_zone_axis_steps + # wy = self.orientation_inds[:, 1] / self.orientation_zone_axis_steps + # w0 = 1 - wx - 0.5 * wy + # w1 = wx - wy + # w2 = wy + # # w0 = 1 - w1/2 - w2/2 + # w_scale = np.maximum(np.maximum(w0, w1), w2) + # w_scale = 1 - np.exp(-w_scale) + # w0 = w0 / w_scale + # w1 = w1 / w_scale + # w2 = w2 / w_scale + # rgb_legend = np.clip( + # w0[:, None] * color_basis[0, :] + # + w1[:, None] * color_basis[1, :] + # + w2[:, None] * color_basis[2, :], + # 0, + # 1, + # ) # plotting frame # fig, ax = plt.subplots(1, 3, figsize=figsize) @@ -1364,18 +1564,18 @@ def plot_orientation_maps( ax_z = fig.add_axes([0.4 + figbound[0], 0.0, 0.4 - 2 * +figbound[0], 1.0]) ax_l = fig.add_axes( [0.8 + figbound[0], 0.0, 0.2 - 2 * +figbound[0], 1.0], - projection="3d", - elev=el, - azim=az, + # projection="3d", + # elev=el, + # azim=az, ) elif plot_layout == 1: ax_x = fig.add_axes([0.0, 0.0 + figbound[0], 1.0, 0.4 - 2 * +figbound[0]]) ax_z = fig.add_axes([0.0, 0.4 + figbound[0], 1.0, 0.4 - 2 * +figbound[0]]) ax_l = fig.add_axes( [0.0, 0.8 + figbound[0], 1.0, 0.2 - 2 * +figbound[0]], - projection="3d", - elev=el, - azim=az, + # projection="3d", + # elev=el, + # azim=az, ) # orientation images @@ -1413,63 +1613,9 @@ def plot_orientation_maps( ax_x.axis("off") ax_z.axis("off") + # Legend if show_legend: - # Triangulate faces - p = self.orientation_vecs[:, (1, 0, 2)] - tri = mtri.Triangulation( - self.orientation_inds[:, 1] - self.orientation_inds[:, 0] * 1e-3, - self.orientation_inds[:, 0] - self.orientation_inds[:, 1] * 1e-3, - ) - # convert rgb values of pixels to faces - rgb_faces = ( - rgb_legend[tri.triangles[:, 0], :] - + rgb_legend[tri.triangles[:, 1], :] - + rgb_legend[tri.triangles[:, 2], :] - ) / 3 - # Add triangulated surface plot to axes - pc = art3d.Poly3DCollection( - p[tri.triangles], - facecolors=rgb_faces, - alpha=1, - ) - pc.set_antialiased(False) - ax_l.add_collection(pc) - - if plot_limit is None: - plot_limit = np.array( - [ - [np.min(p[:, 0]), np.min(p[:, 1]), np.min(p[:, 2])], - [np.max(p[:, 0]), np.max(p[:, 1]), np.max(p[:, 2])], - ] - ) - # plot_limit = (plot_limit - np.mean(plot_limit, axis=0)) * 1.5 + np.mean( - # plot_limit, axis=0 - # ) - plot_limit[:, 0] = ( - plot_limit[:, 0] - np.mean(plot_limit[:, 0]) - ) * 1.5 + np.mean(plot_limit[:, 0]) - plot_limit[:, 1] = ( - plot_limit[:, 2] - np.mean(plot_limit[:, 1]) - ) * 1.5 + np.mean(plot_limit[:, 1]) - plot_limit[:, 2] = ( - plot_limit[:, 1] - np.mean(plot_limit[:, 2]) - ) * 1.1 + np.mean(plot_limit[:, 2]) - - # ax_l.view_init(elev=el, azim=az) - # Appearance - ax_l.invert_yaxis() - if swap_axes_xy_limits: - ax_l.axes.set_xlim3d(left=plot_limit[0, 0], right=plot_limit[1, 0]) - ax_l.axes.set_ylim3d(bottom=plot_limit[0, 1], top=plot_limit[1, 1]) - ax_l.axes.set_zlim3d(bottom=plot_limit[0, 2], top=plot_limit[1, 2]) - else: - ax_l.axes.set_xlim3d(left=plot_limit[0, 1], right=plot_limit[1, 1]) - ax_l.axes.set_ylim3d(bottom=plot_limit[0, 0], top=plot_limit[1, 0]) - ax_l.axes.set_zlim3d(bottom=plot_limit[0, 2], top=plot_limit[1, 2]) - axisEqual3D(ax_l) - if camera_dist is not None: - ax_l.dist = camera_dist - ax_l.axis("off") + ax_l.imshow(rgb_leg) # Add text labels text_scale_pos = 0.1 @@ -1478,127 +1624,380 @@ def plot_orientation_maps( "family": "sans-serif", "fontweight": "normal", "color": "k", - "size": 14, + "size": 12, } format_labels = "{0:.2g}" - vec = self.orientation_vecs[inds_legend[0], :] - cam_dir - vec = vec / np.linalg.norm(vec) - if np.abs(self.cell[5] - 120.0) > 1e-6: + + bound = num_points * 0.25 + shift = num_points * 0.10 + if self.pointgroup.get_crystal_system() == "monoclinic": + p0 = np.array((1, 1)) * num_points + p1 = np.array((0, 1)) * num_points + p2 = np.array((1, 2)) * num_points + p3 = np.array((1, 0)) * num_points + else: + p0 = np.array((1, 0)) * num_points + p1 = np.array((1, 1)) * num_points + p2 = ( + np.array((1 - np.cos(np.pi / 2 - tspan), np.sin(np.pi / 2 - tspan))) + * num_points + ) + + if self.pointgroup.get_crystal_system() == "monoclinic": + v = np.double(self.orientation_zone_axis_range[0, :]).copy() + v /= np.max(np.abs(v)) + v = np.round(v, 2) ax_l.text( - self.orientation_vecs[inds_legend[0], 1] + vec[1] * text_scale_pos, - self.orientation_vecs[inds_legend[0], 0] + vec[0] * text_scale_pos, - self.orientation_vecs[inds_legend[0], 2] + vec[2] * text_scale_pos, + p0[1], + p0[0] + shift, "[" - + format_labels.format(label_0[0]) + + format_labels.format(v[0]) + " " - + format_labels.format(label_0[1]) + + format_labels.format(v[1]) + " " - + format_labels.format(label_0[2]) + + format_labels.format(v[2]) + "]", None, zorder=11, ha="center", **text_params, ) - else: + v = np.double(self.orientation_zone_axis_range[1, :]).copy() + v /= np.max(np.abs(v)) + v = np.round(v, 2) ax_l.text( - self.orientation_vecs[inds_legend[0], 1] + vec[1] * text_scale_pos, - self.orientation_vecs[inds_legend[0], 0] + vec[0] * text_scale_pos, - self.orientation_vecs[inds_legend[0], 2] + vec[2] * text_scale_pos, + p1[1], + p1[0] - shift, "[" - + format_labels.format(label_0[0]) + + format_labels.format(v[0]) + " " - + format_labels.format(label_0[1]) + + format_labels.format(v[1]) + " " - + format_labels.format(label_0[2]) + + format_labels.format(v[2]) + + "]", + None, + zorder=11, + ha="center", + **text_params, + ) + v = np.double(self.orientation_zone_axis_range[2, :]).copy() + v /= np.max(np.abs(v)) + v = np.round(v, 2) + ax_l.text( + p2[1], + p2[0] + shift, + "[" + + format_labels.format(v[0]) + " " - + format_labels.format(label_0[3]) + + format_labels.format(v[1]) + + " " + + format_labels.format(v[2]) + "]", None, zorder=11, ha="center", **text_params, ) - vec = self.orientation_vecs[inds_legend[1], :] - cam_dir - vec = vec / np.linalg.norm(vec) - if np.abs(self.cell[5] - 120.0) > 1e-6: + v = -1 * np.double(self.orientation_zone_axis_range[2, :].copy()) + 0 + v /= np.max(np.abs(v)) + v = np.round(v, 2) ax_l.text( - self.orientation_vecs[inds_legend[1], 1] + vec[1] * text_scale_pos, - self.orientation_vecs[inds_legend[1], 0] + vec[0] * text_scale_pos, - self.orientation_vecs[inds_legend[1], 2] + vec[2] * text_scale_pos, + p3[1], + p3[0] + shift, "[" - + format_labels.format(label_1[0]) + + format_labels.format(v[0]) + " " - + format_labels.format(label_1[1]) + + format_labels.format(v[1]) + " " - + format_labels.format(label_1[2]) + + format_labels.format(v[2]) + "]", None, - zorder=12, - ha=ha_1, + zorder=11, + ha="center", **text_params, ) + else: + v = self.orientation_zone_axis_range[0, :].copy() + v /= np.max(np.abs(v)) + v = np.round(v, 2) ax_l.text( - self.orientation_vecs[inds_legend[1], 1] + vec[1] * text_scale_pos, - self.orientation_vecs[inds_legend[1], 0] + vec[0] * text_scale_pos, - self.orientation_vecs[inds_legend[1], 2] + vec[2] * text_scale_pos, + p0[1], + p0[0] + shift, "[" - + format_labels.format(label_1[0]) + + format_labels.format(v[0]) + " " - + format_labels.format(label_1[1]) + + format_labels.format(v[1]) + " " - + format_labels.format(label_1[2]) - + " " - + format_labels.format(label_1[3]) + + format_labels.format(v[2]) + "]", None, - zorder=12, - ha=ha_1, + zorder=11, + ha="center", **text_params, ) - vec = self.orientation_vecs[inds_legend[2], :] - cam_dir - vec = vec / np.linalg.norm(vec) - if np.abs(self.cell[5] - 120.0) > 1e-6: + v = self.orientation_zone_axis_range[1, :].copy() + v /= np.max(np.abs(v)) + v = np.round(v, 2) ax_l.text( - self.orientation_vecs[inds_legend[2], 1] + vec[1] * text_scale_pos, - self.orientation_vecs[inds_legend[2], 0] + vec[0] * text_scale_pos, - self.orientation_vecs[inds_legend[2], 2] + vec[2] * text_scale_pos, + p1[1], + p1[0] + shift, "[" - + format_labels.format(label_2[0]) + + format_labels.format(v[0]) + " " - + format_labels.format(label_2[1]) + + format_labels.format(v[1]) + " " - + format_labels.format(label_2[2]) + + format_labels.format(v[2]) + "]", None, - zorder=13, - ha=ha_2, + zorder=11, + ha="center", **text_params, ) - else: + v = self.orientation_zone_axis_range[2, :].copy() + v /= np.max(np.abs(v)) + v = np.round(v, 2) ax_l.text( - self.orientation_vecs[inds_legend[2], 1] + vec[1] * text_scale_pos, - self.orientation_vecs[inds_legend[2], 0] + vec[0] * text_scale_pos, - self.orientation_vecs[inds_legend[2], 2] + vec[2] * text_scale_pos, + p2[1], + p2[0] - shift, "[" - + format_labels.format(label_2[0]) + + format_labels.format(v[0]) + " " - + format_labels.format(label_2[1]) + + format_labels.format(v[1]) + " " - + format_labels.format(label_2[2]) - + " " - + format_labels.format(label_2[3]) + + format_labels.format(v[2]) + "]", None, - zorder=13, - ha=ha_2, + zorder=11, + ha="center", **text_params, ) - plt.show() - else: - ax_l.set_axis_off() + if self.pointgroup.get_crystal_system() == "monoclinic": + ax_l.set_xlim((-bound, num_points * 2 - 1 + bound)) + else: + ax_l.set_xlim((-bound, num_points + bound)) + ax_l.set_ylim((num_points + bound, -bound)) + ax_l.axis("off") + + # if np.abs(self.cell[5] - 120.0) > 1e-6: + # ax_l.text( + # self.orientation_vecs[inds_legend[0], 1] + vec[1] * text_scale_pos, + # self.orientation_vecs[inds_legend[0], 0] + vec[0] * text_scale_pos, + # self.orientation_vecs[inds_legend[0], 2] + vec[2] * text_scale_pos, + # "[" + # + format_labels.format(label_0[0]) + # + " " + # + format_labels.format(label_0[1]) + # + " " + # + format_labels.format(label_0[2]) + # + "]", + # None, + # zorder=11, + # ha="center", + # **text_params, + # ) + # else: + # ax_l.text( + # self.orientation_vecs[inds_legend[0], 1] + vec[1] * text_scale_pos, + # self.orientation_vecs[inds_legend[0], 0] + vec[0] * text_scale_pos, + # self.orientation_vecs[inds_legend[0], 2] + vec[2] * text_scale_pos, + # "[" + # + format_labels.format(label_0[0]) + # + " " + # + format_labels.format(label_0[1]) + # + " " + # + format_labels.format(label_0[2]) + # + " " + # + format_labels.format(label_0[3]) + # + "]", + # None, + # zorder=11, + # ha="center", + # **text_params, + # ) + + # # Triangulate faces + # p = self.orientation_vecs[:, (1, 0, 2)] + # tri = mtri.Triangulation( + # self.orientation_inds[:, 1] - self.orientation_inds[:, 0] * 1e-3, + # self.orientation_inds[:, 0] - self.orientation_inds[:, 1] * 1e-3, + # ) + # convert rgb values of pixels to faces + # rgb_faces = ( + # rgb_legend[tri.triangles[:, 0], :] + # + rgb_legend[tri.triangles[:, 1], :] + # + rgb_legend[tri.triangles[:, 2], :] + # ) / 3 + # Add triangulated surface plot to axes + # pc = art3d.Poly3DCollection( + # p[tri.triangles], + # facecolors=rgb_faces, + # alpha=1, + # ) + # pc.set_antialiased(False) + # ax_l.add_collection(pc) + + # if plot_limit is None: + # plot_limit = np.array( + # [ + # [np.min(p[:, 0]), np.min(p[:, 1]), np.min(p[:, 2])], + # [np.max(p[:, 0]), np.max(p[:, 1]), np.max(p[:, 2])], + # ] + # ) + # # plot_limit = (plot_limit - np.mean(plot_limit, axis=0)) * 1.5 + np.mean( + # # plot_limit, axis=0 + # # ) + # plot_limit[:, 0] = ( + # plot_limit[:, 0] - np.mean(plot_limit[:, 0]) + # ) * 1.5 + np.mean(plot_limit[:, 0]) + # plot_limit[:, 1] = ( + # plot_limit[:, 2] - np.mean(plot_limit[:, 1]) + # ) * 1.5 + np.mean(plot_limit[:, 1]) + # plot_limit[:, 2] = ( + # plot_limit[:, 1] - np.mean(plot_limit[:, 2]) + # ) * 1.1 + np.mean(plot_limit[:, 2]) + + # # ax_l.view_init(elev=el, azim=az) + # # Appearance + # ax_l.invert_yaxis() + # if swap_axes_xy_limits: + # ax_l.axes.set_xlim3d(left=plot_limit[0, 0], right=plot_limit[1, 0]) + # ax_l.axes.set_ylim3d(bottom=plot_limit[0, 1], top=plot_limit[1, 1]) + # ax_l.axes.set_zlim3d(bottom=plot_limit[0, 2], top=plot_limit[1, 2]) + # else: + # ax_l.axes.set_xlim3d(left=plot_limit[0, 1], right=plot_limit[1, 1]) + # ax_l.axes.set_ylim3d(bottom=plot_limit[0, 0], top=plot_limit[1, 0]) + # ax_l.axes.set_zlim3d(bottom=plot_limit[0, 2], top=plot_limit[1, 2]) + # axisEqual3D(ax_l) + # if camera_dist is not None: + # ax_l.dist = camera_dist + # ax_l.axis("off") + + # # Add text labels + # text_scale_pos = 0.1 + # text_params = { + # "va": "center", + # "family": "sans-serif", + # "fontweight": "normal", + # "color": "k", + # "size": 14, + # } + # format_labels = "{0:.2g}" + # vec = self.orientation_vecs[inds_legend[0], :] - cam_dir + # vec = vec / np.linalg.norm(vec) + # if np.abs(self.cell[5] - 120.0) > 1e-6: + # ax_l.text( + # self.orientation_vecs[inds_legend[0], 1] + vec[1] * text_scale_pos, + # self.orientation_vecs[inds_legend[0], 0] + vec[0] * text_scale_pos, + # self.orientation_vecs[inds_legend[0], 2] + vec[2] * text_scale_pos, + # "[" + # + format_labels.format(label_0[0]) + # + " " + # + format_labels.format(label_0[1]) + # + " " + # + format_labels.format(label_0[2]) + # + "]", + # None, + # zorder=11, + # ha="center", + # **text_params, + # ) + # else: + # ax_l.text( + # self.orientation_vecs[inds_legend[0], 1] + vec[1] * text_scale_pos, + # self.orientation_vecs[inds_legend[0], 0] + vec[0] * text_scale_pos, + # self.orientation_vecs[inds_legend[0], 2] + vec[2] * text_scale_pos, + # "[" + # + format_labels.format(label_0[0]) + # + " " + # + format_labels.format(label_0[1]) + # + " " + # + format_labels.format(label_0[2]) + # + " " + # + format_labels.format(label_0[3]) + # + "]", + # None, + # zorder=11, + # ha="center", + # **text_params, + # ) + # vec = self.orientation_vecs[inds_legend[1], :] - cam_dir + # vec = vec / np.linalg.norm(vec) + # if np.abs(self.cell[5] - 120.0) > 1e-6: + # ax_l.text( + # self.orientation_vecs[inds_legend[1], 1] + vec[1] * text_scale_pos, + # self.orientation_vecs[inds_legend[1], 0] + vec[0] * text_scale_pos, + # self.orientation_vecs[inds_legend[1], 2] + vec[2] * text_scale_pos, + # "[" + # + format_labels.format(label_1[0]) + # + " " + # + format_labels.format(label_1[1]) + # + " " + # + format_labels.format(label_1[2]) + # + "]", + # None, + # zorder=12, + # ha=ha_1, + # **text_params, + # ) + # else: + # ax_l.text( + # self.orientation_vecs[inds_legend[1], 1] + vec[1] * text_scale_pos, + # self.orientation_vecs[inds_legend[1], 0] + vec[0] * text_scale_pos, + # self.orientation_vecs[inds_legend[1], 2] + vec[2] * text_scale_pos, + # "[" + # + format_labels.format(label_1[0]) + # + " " + # + format_labels.format(label_1[1]) + # + " " + # + format_labels.format(label_1[2]) + # + " " + # + format_labels.format(label_1[3]) + # + "]", + # None, + # zorder=12, + # ha=ha_1, + # **text_params, + # ) + # vec = self.orientation_vecs[inds_legend[2], :] - cam_dir + # vec = vec / np.linalg.norm(vec) + # if np.abs(self.cell[5] - 120.0) > 1e-6: + # ax_l.text( + # self.orientation_vecs[inds_legend[2], 1] + vec[1] * text_scale_pos, + # self.orientation_vecs[inds_legend[2], 0] + vec[0] * text_scale_pos, + # self.orientation_vecs[inds_legend[2], 2] + vec[2] * text_scale_pos, + # "[" + # + format_labels.format(label_2[0]) + # + " " + # + format_labels.format(label_2[1]) + # + " " + # + format_labels.format(label_2[2]) + # + "]", + # None, + # zorder=13, + # ha=ha_2, + # **text_params, + # ) + # else: + # ax_l.text( + # self.orientation_vecs[inds_legend[2], 1] + vec[1] * text_scale_pos, + # self.orientation_vecs[inds_legend[2], 0] + vec[0] * text_scale_pos, + # self.orientation_vecs[inds_legend[2], 2] + vec[2] * text_scale_pos, + # "[" + # + format_labels.format(label_2[0]) + # + " " + # + format_labels.format(label_2[1]) + # + " " + # + format_labels.format(label_2[2]) + # + " " + # + format_labels.format(label_2[3]) + # + "]", + # None, + # zorder=13, + # ha=ha_2, + # **text_params, + # ) images_orientation = np.zeros((orientation_map.num_x, orientation_map.num_y, 3, 2)) if self.pymatgen_available: diff --git a/py4DSTEM/process/polar/polar_analysis.py b/py4DSTEM/process/polar/polar_analysis.py index 67edff2cf..02b6afeda 100644 --- a/py4DSTEM/process/polar/polar_analysis.py +++ b/py4DSTEM/process/polar/polar_analysis.py @@ -5,6 +5,8 @@ import matplotlib.pyplot as plt from scipy.optimize import curve_fit from scipy.ndimage import gaussian_filter +from scipy.signal import argrelextrema +from sklearn.decomposition import PCA from emdfile import tqdmnd @@ -327,6 +329,10 @@ def calculate_pair_dist_function( which `plot_*` is True are returned. """ + k_width = np.array(k_width) + if k_width.size == 1: + k_width = k_width * np.ones(2) + # set up coordinates and scaling k = self.qq dk = k[1] - k[0] @@ -340,7 +346,59 @@ def calculate_pair_dist_function( RxRy[0] : RxRy[0] + hxwy[0], RxRy[1] : RxRy[1] + hxwy[1] ].mean(axis=(0, 1)) int_mean = np.mean(Ik) - sub_fit = k >= k_min + # sub_fit = k >= k_min + # sub_fit = np.logical_and( + # k >= k_min + + # Calculate structure factor mask + if k_max is None: + k_max = np.max(k) + mask_low = ( + np.sin( + np.clip( + (k - k_min) / k_width[0], + 0, + 1, + ) + * np.pi + / 2.0, + ) + ** 2 + ) + mask_high = ( + np.sin( + np.clip( + (k_max - k) / k_width[1], + 0, + 1, + ) + * np.pi + / 2.0, + ) + ** 2 + ) + mask = mask_low * mask_high + + # weighting function for fitting atomic scattering factors + weights_fit = np.divide( + 1, + mask_low, + where=mask_low > 1e-4, + ) + weights_fit[mask_low <= 1e-4] = np.inf + # Scale weighting to favour high k values + weights_fit *= k[-1] - 0.9 * k + dk + + # fig,ax = plt.subplots() + # ax.plot( + # k, + # weights_fit, + # ) + # ax.set_ylim([0,4]) + # ax.plot( + # k, + # mask_high, + # ) # initial guesses for background coefs const_bg = np.min(Ik) / int_mean @@ -350,15 +408,18 @@ def calculate_pair_dist_function( lb = [0, 0, 0, 0, 0] ub = [np.inf, np.inf, np.inf, np.inf, np.inf] # Weight the fit towards high k values - noise_est = k[-1] - k + dk + # noise_est = k[-1] - k + dk # Estimate the mean atomic form factor + background if maxfev is None: coefs = curve_fit( scattering_model, - k2[sub_fit], - Ik[sub_fit] / int_mean, - sigma=noise_est[sub_fit], + # k2[sub_fit], + # Ik[sub_fit] / int_mean, + # sigma=noise_est[sub_fit], + k2, + Ik / int_mean, + sigma=weights_fit, p0=coefs, xtol=1e-8, bounds=(lb, ub), @@ -366,9 +427,12 @@ def calculate_pair_dist_function( else: coefs = curve_fit( scattering_model, - k2[sub_fit], - Ik[sub_fit] / int_mean, - sigma=noise_est[sub_fit], + # k2[sub_fit], + # Ik[sub_fit] / int_mean, + # sigma=noise_est[sub_fit], + k2, + Ik / int_mean, + sigma=weights_fit, p0=coefs, xtol=1e-8, bounds=(lb, ub), @@ -385,19 +449,6 @@ def calculate_pair_dist_function( bg = scattering_model(k2, coefs) fk = bg - coefs[0] - # mask for structure factor estimate - if k_max is None: - k_max = np.max(k) - mask = np.clip( - np.minimum( - (k - 0.0) / k_width, - (k_max - k) / k_width, - ), - 0, - 1, - ) - mask = np.sin(mask * (np.pi / 2)) - # Estimate the reduced structure factor S(k) Sk = (Ik - bg) * k / fk @@ -423,14 +474,39 @@ def calculate_pair_dist_function( axis=0, ) ) - - # Damp the unphysical fluctuations at the PDF origin - if damp_origin_fluctuations: - ind_max = np.argmax(pdf_reduced) - r_ind_max = r[ind_max] - r_mask = np.minimum(r / r_ind_max, 1.0) - r_mask = np.sin(r_mask * np.pi / 2) ** 2 - pdf_reduced *= r_mask + pdf_reduced[0] = 0.0 + + # # Damp the unphysical fluctuations at the PDF origin + # if damp_origin_fluctuations: + # # Find radial value of primary peak + # ind_max = np.argmax(pdf_reduced) + # r_max = r[ind_max] + + # # find adjacent local minimum + # inds = argrelextrema(pdf_reduced, np.less) + # r_local_min = r[inds] + # r_thresh = np.max(r_local_min[r_local_min < r_max]) + # ind_thresh = np.argmin(np.abs(r_thresh - r)) + # pdf_thresh = pdf_reduced[ind_thresh] + # # r_thresh = np.max(r_local_min[r_local_min < r_max]) + + # # replace low r values with linear fit + # m = pdf_thresh / r_thresh + # pdf_reduced[r < r_thresh] = r[r < r_thresh] * m + + # pdf_reduced[r < r[ind_thresh]] = m * r[ind_thresh] + + # r_ind_max = r[ind_max] + # r_mask = np.minimum(r / r_ind_max, 1.0) + # r_mask = np.sin(r_mask * np.pi / 2) ** 2 + # pdf_reduced *= r_mask + + # original version + # ind_max = np.argmax(pdf_reduced) + # r_ind_max = r[ind_max] + # r_mask = np.minimum(r / r_ind_max, 1.0) + # r_mask = np.sin(r_mask * np.pi / 2) ** 2 + # pdf_reduced *= r_mask # Store results self.pdf_r = r @@ -448,9 +524,27 @@ def calculate_pair_dist_function( pdf[1:] /= 4 * np.pi * density * r[1:] * (r[1] - r[0]) pdf += 1 - # damp and clip values below zero + # Damp the unphysical fluctuations at the PDF origin if damp_origin_fluctuations: - pdf *= r_mask + # Find radial value of primary peak + ind_max = np.argmax(pdf) + r_max = r[ind_max] + + # find adjacent local minimum + inds = argrelextrema(pdf, np.less) + r_local_min = r[inds] + r_thresh = np.max(r_local_min[r_local_min < r_max]) + ind_thresh = np.argmin(np.abs(r_thresh - r)) + pdf_thresh = pdf[ind_thresh] + # r_thresh = np.max(r_local_min[r_local_min < r_max]) + + # replace low r values with linear fit + m = pdf_thresh / r_thresh + pdf[r < r_thresh] = r[r < r_thresh] * m + + # damp and clip values below zero + # if damp_origin_fluctuations: + # pdf *= r_mask if enforce_positivity: pdf = np.maximum(pdf, 0.0) diff --git a/py4DSTEM/process/utils/__init__.py b/py4DSTEM/process/utils/__init__.py index 643de1bf5..b79fbdbfa 100644 --- a/py4DSTEM/process/utils/__init__.py +++ b/py4DSTEM/process/utils/__init__.py @@ -4,6 +4,7 @@ from py4DSTEM.process.utils.elliptical_coords import * from py4DSTEM.process.utils.masks import * from py4DSTEM.process.utils.single_atom_scatter import * +from py4DSTEM.process.utils.cluster import Cluster # from preprocessing from py4DSTEM.preprocess.utils import ( diff --git a/py4DSTEM/process/utils/cluster.py b/py4DSTEM/process/utils/cluster.py new file mode 100644 index 000000000..aed0d213c --- /dev/null +++ b/py4DSTEM/process/utils/cluster.py @@ -0,0 +1,242 @@ +import numpy as np +import matplotlib.pyplot as plt +import pymatgen +from scipy.ndimage import binary_erosion +from py4DSTEM.process.utils import tqdmnd +from scipy.ndimage import gaussian_filter + + +class Cluster: + """ + Clustering 4D data + + """ + + def __init__( + self, + datacube, + ): + """ + Args: + datacube (py4DSTEM.DataCube): 4D-STEM data + + + """ + + self.datacube = datacube + + def find_similarity( + self, + mask=None, # by default + ): + # Which neighbors to search + # (-1,-1) will be equivalent to (1,1) + self.dxy = np.array( + ( + (-1, -1), + (-1, 0), + (-1, 1), + (0, -1), + (1, 1), + (1, 0), + (1, -1), + (0, 1), + ) + ) + + # initialize the self.similarity array + self.similarity = -1 * np.ones( + (self.datacube.shape[0], self.datacube.shape[1], self.dxy.shape[0]) + ) + + # Loop over probe positions + for rx, ry in tqdmnd( + range(self.datacube.shape[0]), + range(self.datacube.shape[1]), + ): + if mask is None: + diff_ref = self.datacube[rx, ry] + else: + diff_ref = self.datacube[rx, ry][mask] + + # loop over neighbors + for ind in range(self.dxy.shape[0]): + x_ind = rx + self.dxy[ind, 0] + y_ind = ry + self.dxy[ind, 1] + if ( + x_ind >= 0 + and y_ind >= 0 + and x_ind < self.datacube.shape[0] + and y_ind < self.datacube.shape[1] + ): + + if mask is None: + diff = self.datacube[x_ind, y_ind] + else: + diff = self.datacube[x_ind, y_ind][mask] + + # # image self.similarity with mean abs difference + # self.similarity[rx,ry,ind] = np.mean( + # np.abs( + # diff - diff_ref + # ) + # ) + + # image self.similarity with normalized corr: cosine self.similarity? + self.similarity[rx, ry, ind] = ( + np.sum(diff * diff_ref) + / np.sqrt(np.sum(diff * diff)) + / np.sqrt(np.sum(diff_ref * diff_ref)) + ) + + # Create a function to map cluster index to color + def get_color(self, cluster_index): + colors = [ + "slategray", + "lightcoral", + "gold", + "darkorange", + "yellowgreen", + "lightseagreen", + "cornflowerblue", + "royalblue", + "lightsteelblue", + "darkseagreen", + ] + return colors[(cluster_index - 1) % len(colors)] + + # Find the pixel with the highest self.similarity and start the clustering from there + def indexing_clusters_all( + self, + mask, + threshold, + ): + + self.dxy = np.array( + ( + (-1, -1), + (-1, 0), + (-1, 1), + (0, -1), + (1, 1), + (1, 0), + (1, -1), + (0, 1), + ) + ) + + sim_averaged = np.mean(self.similarity, axis=2) + + # color the pixels with the cluster index + # map_cluster = np.zeros((sim_averaged.shape[0],sim_averaged.shape[1])) + self.cluster_map = np.zeros( + (sim_averaged.shape[0], sim_averaged.shape[1], 4), dtype=np.float64 + ) + + # store arrays of cluster_indices in a list + self.cluster_list = [] + + # incides of pixel in a cluster + cluster_indices = np.empty((0, 2)) + + # Loop over pixels until no new pixel is found (sim_averaged is set to -1 if it is alreaddy serached for NN) + cluster_count_ind = 0 + + while np.any(sim_averaged != -1): + + # finding the pixel that has the highest self.similarity among the pixel that hasn't been clustered yet + # this will be the 'starting pixel' of a new cluster + rx0, ry0 = np.unravel_index(sim_averaged.argmax(), sim_averaged.shape) + # print(rx0, ry0) + + cluster_indices = np.empty((0, 2)) + cluster_indices = (np.append(cluster_indices, [[rx0, ry0]], axis=0)).astype( + np.int32 + ) + + # map_cluster[rx0, ry0] = cluster_count_ind+1 + color = self.get_color(cluster_count_ind + 1) + self.cluster_map[rx0, ry0] = plt.cm.colors.to_rgba(color) + + # Clustering: one cluster per while loop(until it breaks) + # Marching algorithm: find a new position and search the nearest neighbor + + while True: + counting_added_pixel = 0 + + for rx0, ry0 in cluster_indices: + + if sim_averaged[rx0, ry0] != -1: + + # counter to check if pixel in the cluster are checked for NN + counting_added_pixel += 1 + + # set to -1 as its NN will be checked + sim_averaged[rx0, ry0] = -1 + + for ind in range(self.dxy.shape[0]): + x_ind = rx0 + self.dxy[ind, 0] + y_ind = ry0 + self.dxy[ind, 1] + + # add if the neighbor is similar, but don't add if the neighbor is already in a cluster + if self.similarity[ + rx0, ry0, ind + ] > threshold and np.array_equal( + self.cluster_map[x_ind, y_ind], [0, 0, 0, 0] + ): + + cluster_indices = np.append( + cluster_indices, [[x_ind, y_ind]], axis=0 + ) + # self.cluster_map[x_ind, y_ind] = cluster_count_ind+1 + color = self.get_color(cluster_count_ind + 1) + self.cluster_map[x_ind, y_ind] = plt.cm.colors.to_rgba( + color + ) + + # if no new pixel is checked for NN then break + if counting_added_pixel == 0: + break + + # single pixel cluster + if cluster_indices.shape[0] == 1: + self.cluster_map[cluster_indices[0, 0], cluster_indices[0, 1]] = [ + 0, + 0, + 0, + 1, + ] + + self.cluster_list.append(cluster_indices) + cluster_count_ind += 1 + + # return cluster_count_ind, self.cluster_list, map_cluster, sim_averaged + + def create_cluster_cube( + self, + min_cluster_size, + return_cluster_datacube=False, + ): + + self.filtered_cluster_list = [ + arr for arr in self.cluster_list if arr.shape[0] >= min_cluster_size + ] + + # datacube [i,j,k,l] where i is the index of the cluster, and j is a place holder, and k,l are the average diffraction pattern of the + self.cluster_cube = np.empty( + [ + len(self.filtered_cluster_list), + 1, + self.datacube.shape[2], + self.datacube.shape[3], + ] + ) + + for i in tqdmnd(range(len(self.filtered_cluster_list))): + self.cluster_cube[i, 0] = self.datacube[ + np.array(self.filtered_cluster_list[i])[:, 0], + np.array(self.filtered_cluster_list[i])[:, 1], + ].mean(axis=0) + + if return_cluster_datacube: + return self.cluster_cube, self.filtered_cluster_list diff --git a/py4DSTEM/process/utils/elliptical_coords.py b/py4DSTEM/process/utils/elliptical_coords.py index ea72ae390..6816ce9e0 100644 --- a/py4DSTEM/process/utils/elliptical_coords.py +++ b/py4DSTEM/process/utils/elliptical_coords.py @@ -320,8 +320,10 @@ def elliptical_resample( # Get (qx,qy) corresponding to the coordinates distorted by the ellipse xr, yr = np.mgrid[0:Nx, 0:Ny] - xr0 = xr.astype(np.float64) - qx0 - yr0 = yr.astype(np.float64) - qy0 + + xr0 = xr.astype(np.float) - qx0 + yr0 = yr.astype(np.float) - qy0 + xr = xr0 * np.cos(-theta) - yr0 * np.sin(-theta) yr = xr0 * np.sin(-theta) + yr0 * np.cos(-theta) qx = qx0 + xr * np.cos(theta) - yr * (b / a) * np.sin(theta) diff --git a/setup.cfg b/setup.cfg index 0c0ff5d67..b30dcb08b 100644 --- a/setup.cfg +++ b/setup.cfg @@ -1,2 +1,2 @@ [metadata] -license_files="LICENSE.txt", +license_files = ("LICENSE.txt",)