diff --git a/.gitignore b/.gitignore index b7569f7..795acdd 100644 --- a/.gitignore +++ b/.gitignore @@ -35,7 +35,7 @@ vscode .vscode/ .vscode/* */.VSCodeCounter/* - +makefile *.pyc # Local History for Visual Studio Code @@ -123,6 +123,7 @@ venv/ ENV/ env.bak/ venv.bak/ +.micromamba # Spyder project settings .spyderproject @@ -150,4 +151,3 @@ cspell.json tmp_profile_dump pypi_release/ extras/ - diff --git a/docs/source/examples/example_readme.py b/docs/source/examples/example_readme.py index 4d93e10..c120066 100644 --- a/docs/source/examples/example_readme.py +++ b/docs/source/examples/example_readme.py @@ -96,6 +96,5 @@ polycrystal.diffract(beam, detector, motion) ##example: -import os path = os.path.join( os.path.dirname(__file__), "..", 'images', "diffraction_pattern.png") fig.savefig(os.path.abspath(path), bbox_inches='tight', transparent=True) diff --git a/setup.py b/setup.py index b94262b..f396119 100644 --- a/setup.py +++ b/setup.py @@ -33,5 +33,6 @@ "dill", "xfab", "netcdf4", - "h5py"] + "h5py", + "pandas"] ) diff --git a/tests/end_to_end_tests/powder.py b/tests/end_to_end_tests/powder.py index 99db3b2..3de4212 100644 --- a/tests/end_to_end_tests/powder.py +++ b/tests/end_to_end_tests/powder.py @@ -4,6 +4,7 @@ from xrd_simulator.beam import Beam from xrd_simulator.motion import RigidBodyMotion from xrd_simulator.templates import get_uniform_powder_sample +import os pixel_size = 75. detector_size = pixel_size * 1024 diff --git a/tests/end_to_end_tests/saves/fast_polycrystal_from_odf.pc b/tests/end_to_end_tests/saves/fast_polycrystal_from_odf.pc new file mode 100644 index 0000000..41dd1e9 Binary files /dev/null and b/tests/end_to_end_tests/saves/fast_polycrystal_from_odf.pc differ diff --git a/tests/end_to_end_tests/saves/fast_polycrystal_from_odf.xdmf b/tests/end_to_end_tests/saves/fast_polycrystal_from_odf.xdmf index cce19f1..5764c8f 100644 --- a/tests/end_to_end_tests/saves/fast_polycrystal_from_odf.xdmf +++ b/tests/end_to_end_tests/saves/fast_polycrystal_from_odf.xdmf @@ -1 +1 @@ -fast_polycrystal_from_odf.h5:/data0fast_polycrystal_from_odf.h5:/data1fast_polycrystal_from_odf.h5:/data2fast_polycrystal_from_odf.h5:/data3fast_polycrystal_from_odf.h5:/data4fast_polycrystal_from_odf.h5:/data5fast_polycrystal_from_odf.h5:/data6fast_polycrystal_from_odf.h5:/data7fast_polycrystal_from_odf.h5:/data8fast_polycrystal_from_odf.h5:/data9fast_polycrystal_from_odf.h5:/data10fast_polycrystal_from_odf.h5:/data11 \ No newline at end of file +fast_polycrystal_from_odf.h5:/data0fast_polycrystal_from_odf.h5:/data1fast_polycrystal_from_odf.h5:/data2fast_polycrystal_from_odf.h5:/data3fast_polycrystal_from_odf.h5:/data4fast_polycrystal_from_odf.h5:/data5fast_polycrystal_from_odf.h5:/data6fast_polycrystal_from_odf.h5:/data7fast_polycrystal_from_odf.h5:/data8fast_polycrystal_from_odf.h5:/data9fast_polycrystal_from_odf.h5:/data10fast_polycrystal_from_odf.h5:/data11 diff --git a/tests/test_detector.py b/tests/test_detector.py index dbeaa0a..b40a428 100644 --- a/tests/test_detector.py +++ b/tests/test_detector.py @@ -66,6 +66,9 @@ def test_centroid_render(self): scattered_wave_vector = 2 * np.pi * scattered_wave_vector / \ (np.linalg.norm(scattered_wave_vector) * wavelength) + zd1,yd1 = tuple(self.detector.get_intersection(scattered_wave_vector,verts1.mean(axis=0)[np.newaxis,:])[0]) + zd2,yd2 = tuple(self.detector.get_intersection(scattered_wave_vector,verts2.mean(axis=0)[np.newaxis,:])[0]) + data = os.path.join( os.path.join( os.path.dirname(__file__), @@ -86,7 +89,10 @@ def test_centroid_render(self): time=0, phase=phase, hkl_indx=0, - element_index=0) + element_index=0, + zd=zd1, + yd=yd1) + scattering_unit2 = ScatteringUnit(ch2, scattered_wave_vector=scattered_wave_vector, incident_wave_vector=incident_wave_vector, @@ -96,7 +102,9 @@ def test_centroid_render(self): time=0, phase=phase, hkl_indx=0, - element_index=0) + element_index=0, + zd=zd2, + yd=yd2) self.detector.frames.append([scattering_unit1, scattering_unit2]) diffraction_pattern = self.detector.render( @@ -162,6 +170,9 @@ def test_centroid_render_with_scintillator(self): scattered_wave_vector = 2 * np.pi * scattered_wave_vector / \ (np.linalg.norm(scattered_wave_vector) * wavelength) + zd1,yd1 = tuple(self.detector.get_intersection(scattered_wave_vector,verts1.mean(axis=0)[np.newaxis,:])[0]) + zd2,yd2 = tuple(self.detector.get_intersection(scattered_wave_vector,verts2.mean(axis=0)[np.newaxis,:])[0]) + data = os.path.join( os.path.join( os.path.dirname(__file__), @@ -182,7 +193,9 @@ def test_centroid_render_with_scintillator(self): time=0, phase=phase, hkl_indx=0, - element_index=0) + element_index=0, + zd=zd1, + yd=yd1) scattering_unit2 = ScatteringUnit(ch2, scattered_wave_vector=scattered_wave_vector, incident_wave_vector=incident_wave_vector, @@ -192,8 +205,9 @@ def test_centroid_render_with_scintillator(self): time=0, phase=phase, hkl_indx=0, - element_index=0) - + element_index=0, + zd=zd2, + yd=yd2) self.detector.frames.append([scattering_unit1, scattering_unit2]) self.detector.point_spread_kernel_shape = (3,3) diffraction_pattern = self.detector.render( @@ -208,6 +222,8 @@ def test_centroid_render_with_scintillator(self): verts1 = np.array([[0, 0, 0], [0, 0, 1], [0, 1, 0], [1, 0, 0]]) + \ v * np.sqrt(2) * self.detector_size / 2. + self.pixel_size_z*0.1 # tetra at detector center perturbed ch1 = ConvexHull(verts1) + zd1,yd1 = tuple(self.detector.get_intersection(scattered_wave_vector,verts1.mean(axis=0)[np.newaxis,:])[0]) + scattering_unit1 = ScatteringUnit(ch1, scattered_wave_vector=scattered_wave_vector, incident_wave_vector=incident_wave_vector, @@ -217,7 +233,9 @@ def test_centroid_render_with_scintillator(self): time=0, phase=phase, hkl_indx=0, - element_index=0) + element_index=0, + zd=zd1, + yd=yd1) self.detector.frames[-1] = [scattering_unit1, scattering_unit2] diffraction_pattern_2 = self.detector.render( frames_to_render=0, @@ -232,6 +250,8 @@ def test_centroid_render_with_scintillator(self): v = v / np.linalg.norm(v) verts1 = np.array([[0, 0, 0], [0, 0, 1], [0, 1, 0], [1, 0, 0]]) - np.array([0, 1.5, 2.0 ])*self.pixel_size_z ch1 = ConvexHull(verts1) + zd1,yd1 = tuple(self.detector.get_intersection(scattered_wave_vector,verts1.mean(axis=0)[np.newaxis,:])[0]) + scattering_unit1 = ScatteringUnit(ch1, scattered_wave_vector=scattered_wave_vector, incident_wave_vector=incident_wave_vector, @@ -241,7 +261,9 @@ def test_centroid_render_with_scintillator(self): time=0, phase=phase, hkl_indx=0, - element_index=0) + element_index=0, + zd=zd1, + yd=yd1) self.detector.frames[-1] = [scattering_unit1, scattering_unit2] diffraction_pattern_3 = self.detector.render( frames_to_render=0, @@ -255,6 +277,8 @@ def test_centroid_render_with_scintillator(self): v = v / np.linalg.norm(v) verts1 = np.array([[0, 0, 0], [0, 0, 1], [0, 1, 0], [1, 0, 0]]) + np.array([0, 246*self.pixel_size_y, 196 *self.pixel_size_z ]) ch1 = ConvexHull(verts1) + zd1,yd1 = tuple(self.detector.get_intersection(scattered_wave_vector,verts1.mean(axis=0)[np.newaxis,:])[0]) + scattering_unit1 = ScatteringUnit(ch1, scattered_wave_vector=scattered_wave_vector, incident_wave_vector=incident_wave_vector, @@ -264,7 +288,9 @@ def test_centroid_render_with_scintillator(self): time=0, phase=phase, hkl_indx=0, - element_index=0) + element_index=0, + zd=zd1, + yd=yd2) self.detector.frames[-1] = [scattering_unit1, scattering_unit2] diffraction_pattern_3 = self.detector.render( frames_to_render=0, @@ -362,6 +388,8 @@ def test_projection_render(self): phase.setup_diffracting_planes( wavelength, 0, 20 * np.pi / 180) + zd1,yd1 = tuple(self.detector.get_intersection(scattered_wave_vector,hull_points.mean(axis=0)[np.newaxis,:])[0]) + scattering_unit = ScatteringUnit(sphere_hull, scattered_wave_vector=scattered_wave_vector, incident_wave_vector=incident_wave_vector, @@ -371,7 +399,9 @@ def test_projection_render(self): time=0, phase=phase, hkl_indx=0, - element_index=0) + element_index=0, + zd=zd1, + yd=yd1) self.detector.frames.append([scattering_unit]) diffraction_pattern = self.detector.render( @@ -429,7 +459,8 @@ def test_get_intersection(self): # central normal algined ray ray_direction = np.array([2.23, 0., 0.]) source_point = np.array([0., 0., 0.]) - z, y = self.detector.get_intersection(ray_direction, source_point) + + z, y = tuple(self.detector.get_intersection(ray_direction, source_point[np.newaxis,:])[0]) self.assertAlmostEqual( z, 0, msg="central detector-normal algined ray does not intersect at 0") self.assertAlmostEqual( @@ -438,7 +469,7 @@ def test_get_intersection(self): # translate the ray source_point += self.detector.ydhat * self.pixel_size_y source_point -= self.detector.zdhat * 2 * self.pixel_size_z - z, y = self.detector.get_intersection(ray_direction, source_point) + z, y = tuple(self.detector.get_intersection(ray_direction, source_point[np.newaxis,:])[0]) self.assertAlmostEqual( z, -2 * self.pixel_size_z, @@ -452,7 +483,7 @@ def test_get_intersection(self): ang = np.arctan(self.pixel_size_y / self.detector_size) frac = np.tan(ang) * np.linalg.norm(ray_direction) ray_direction += self.detector.ydhat * frac * 3 - z, y = self.detector.get_intersection(ray_direction, source_point) + z, y = tuple(self.detector.get_intersection(ray_direction, source_point[np.newaxis,:])[0]) self.assertAlmostEqual( z, -2 * self.pixel_size_z, msg="translated and tilted ray does not intersect properly") self.assertAlmostEqual( @@ -578,7 +609,7 @@ def test_eta0_render(self): phase = Phase(unit_cell, sgname, path_to_cif_file=data) phase.setup_diffracting_planes( wavelength, 0, 20 * np.pi / 180) - + zd,yd = tuple(self.detector.get_intersection(scattered_wave_vector,verts1.mean(axis=0)[np.newaxis,:])[0]) scattering_unit = ScatteringUnit(ch, scattered_wave_vector=scattered_wave_vector, incident_wave_vector=incident_wave_vector, @@ -588,7 +619,9 @@ def test_eta0_render(self): time=0, phase=phase, hkl_indx=0, - element_index=0) + element_index=0, + zd=zd, + yd=yd) self.detector.frames.append([scattering_unit]) diff --git a/tests/test_laue.py b/tests/test_laue.py index 4e6d5b2..73a4d3b 100644 --- a/tests/test_laue.py +++ b/tests/test_laue.py @@ -16,11 +16,11 @@ def test_get_G_and_bragg_angle(self): U, B, cell, strain = self.get_pseudorandom_crystal() wavelength = self.get_pseudorandom_wavelength() - G = laue.get_G(U, B, G_hkl=np.array([[1, 2, -1], [1, 3, -1]]).T) + G = laue.get_G(U, B, G_hkl=np.array([[1, 2, -1], [1, 3, -1]])) theta = laue.get_bragg_angle(G, wavelength) d = 2 * np.pi / np.linalg.norm(G, axis=0) - for i in range(len(theta)): + for i,angle in enumerate(theta): self.assertLessEqual(np.linalg.norm( G[:, i]), 4 * np.pi / wavelength, msg="Length of G is wrong") self.assertLessEqual(theta[i], np.pi, msg="Bragg angle too large") @@ -34,19 +34,18 @@ def test_get_G_and_bragg_angle(self): def test_get_sin_theta_and_norm_G(self): U, B, cell, strain = self.get_pseudorandom_crystal() wavelength = self.get_pseudorandom_wavelength() - G = laue.get_G(U, B, G_hkl=np.array([[1, 2, -1], [1, 3, -1]]).T) + G = laue.get_G(U, B, G_hkl=np.array([[1, 2, -1], [1, 3, -1]])) theta = laue.get_bragg_angle(G, wavelength) - sinth, Gnorm = laue.get_sin_theta_and_norm_G(G, wavelength) - - for i in range(len(theta)): + for i,angles in enumerate(theta): self.assertAlmostEqual( sinth[i], np.sin( theta[i]), msg="error theta") - self.assertAlmostEqual(Gnorm[i], np.linalg.norm( - G[:, i]), msg="error in norm of G") + self.assertAlmostEqual(Gnorm[i], np.linalg.norm(G[:, i]), msg="error in norm of G",places=6) def test_find_solutions_to_tangens_half_angle_equation(self): + """_Test to check if find_solutions_to_tangens_half_angle equation works properly_ + """ U, B, cell, strain = self.get_pseudorandom_crystal() wavelength = cell[0] / \ 18. # make sure the wavelength fits in the lattice spacing @@ -64,34 +63,37 @@ def test_find_solutions_to_tangens_half_angle_equation(self): K = np.array([[0, -rz, ry], [rz, 0, -rx], [-ry, rx, 0]]) - rho_0 = -k.dot( K.dot(K) ).dot(G_0) - rho_1 = k.dot( K ).dot(G_0) - rho_2 = k.dot( np.eye(3, 3) + K.dot(K) ).dot(G_0) + np.sum((G_0 * G_0), axis=0) / 2. - t1s, t2s = laue.find_solutions_to_tangens_half_angle_equation( - rho_0, rho_1, rho_2, rotation_angle) - - for i,(t1,t2) in enumerate(zip(t1s,t2s)): + rho_0_factor = -k.dot( K.dot(K) ) # The operations involving G_0 are moved inside find_solutions_to_tangens... + rho_1_factor = k.dot( K ) # The operations involving G_0 are moved inside find_solutions_to_tangens... + rho_2_factor = k.dot( np.eye(3, 3) + K.dot(K) ) # The operations involving G_0 are moved inside find_solutions_to_tangens... + + + + # Now G_0 and rho_factors are sent before computation to save memory when diffracting many grains. + reflection_index, time_values = laue.find_solutions_to_tangens_half_angle_equation(G_0,rho_0_factor, rho_1_factor, rho_2_factor, rotation_angle) + + G_0 = G_0[np.newaxis,:,:] + rho_0 = np.matmul(rho_0_factor,G_0) + rho_2 = np.matmul(rho_2_factor,G_0)+ np.sum((G_0 * G_0), axis=1) / 2. + rho_1 = np.matmul(rho_1_factor,G_0) + + for t in time_values: # Check that at least one solution has been found and that it satisfies # the half angle equation. - self.assertTrue((~np.isnan(t1) or ~np.isnan(t2)), + self.assertTrue((~np.isnan(t)), msg="Tangens half angle equation could not be solved") - if ~np.isnan(t1): - self.assertLessEqual(t1, 1, msg="s>1") - self.assertGreaterEqual(t1, 0, msg="s>1") - t1 = np.tan(t1 * rotation_angle / 2.) - self.assertAlmostEqual((rho_2[i] - rho_0[i]) * t1**2 + 2 * rho_1[i] * - t1 + (rho_0[i] + rho_2[i]), 0, msg="Parametric solution wrong") - if ~np.isnan(t2): - self.assertLessEqual(t2, 1, msg="s>1") - self.assertGreaterEqual(t2, 0, msg="s<0") - t2 = np.tan(t1 * rotation_angle / 2.) - self.assertAlmostEqual((rho_2[i] - rho_0[i]) * t2**2 + 2 * rho_1[i] * - t2 + (rho_0[i] + rho_2[i]), 0, msg="Parametric solution wrong") + if ~np.isnan(t): + self.assertLessEqual(t, 1, msg="s>1") + self.assertGreaterEqual(t, 0) + t = np.tan(t * rotation_angle / 2.) + equation = (rho_2 - rho_0) * t**2 + 2 * rho_1 *t + (rho_0 + rho_2) + self.assertAlmostEqual(equation.item(), 0, msg="Parametric solution wrong") + def get_pseudorandom_crystal(self): phi1, PHI, phi2 = np.random.rand(3,) * 2 * np.pi U = tools.euler_to_u(phi1, PHI, phi2) - strain_tensor = (np.random.rand(6,) - 0.5) / 50. + epsilon = (np.random.rand(6,) - 0.5) / 50. unit_cell = [ 2 + np.random.rand() * 5, 2 + np.random.rand() * 5, @@ -99,7 +101,10 @@ def get_pseudorandom_crystal(self): 90., 90., 90.] - B = utils._epsilon_to_b(strain_tensor, unit_cell) + + strain_tensor = utils._strain_as_tensor(epsilon) + B0 = tools.form_b_mat(unit_cell) + B = utils._epsilon_to_b(strain_tensor, B0) return U, B, unit_cell, strain_tensor def get_pseudorandom_wavelength(self): diff --git a/tests/test_mesh.py b/tests/test_mesh.py index 72c3703..1b64736 100644 --- a/tests/test_mesh.py +++ b/tests/test_mesh.py @@ -5,88 +5,118 @@ import os import copy + class TestBeam(unittest.TestCase): def setUp(self): pass def test_generate_mesh_from_vertices(self): - coord = np.array([[0,0,0],[0,1,0],[1,0,0],[0,0,1],[0,0,-1]]) - enod = np.array([[0,1,2,3],[0,1,2,4]]) + coord = np.array([[0, 0, 0], [0, 1, 0], [1, 0, 0], [0, 0, 1], [0, 0, -1]]) + enod = np.array([[0, 1, 2, 3], [0, 1, 2, 4]]) mesh = TetraMesh.generate_mesh_from_vertices(coord, enod) self.assertAlmostEqual(mesh.enod.shape, enod.shape) self.assertAlmostEqual(mesh.coord.shape, coord.shape) - for a,b in zip(mesh.coord.flatten(), coord.flatten()): - self.assertAlmostEqual(a,b) + for a, b in zip(mesh.coord.flatten(), coord.flatten()): + self.assertAlmostEqual(a, b) for r in mesh.eradius: self.assertLessEqual(r, 1.0) for c in mesh.ecentroids: self.assertLessEqual(np.linalg.norm(c), 1.0) def test_save_and_load(self): - nodal_coordinates = np.array([[0,0,0],[0,1,0],[1,0,0],[0,0,1],[0,0,-1]]) - element_node_map = np.array([[0,1,2,3],[0,1,2,4]]) - mesh = TetraMesh.generate_mesh_from_vertices(nodal_coordinates, element_node_map) - path = os.path.join( - os.path.join( - os.path.dirname(__file__), - 'data'), - 'my_mesh') + nodal_coordinates = np.array( + [[0, 0, 0], [0, 1, 0], [1, 0, 0], [0, 0, 1], [0, 0, -1]] + ) + element_node_map = np.array([[0, 1, 2, 3], [0, 1, 2, 4]]) + mesh = TetraMesh.generate_mesh_from_vertices( + nodal_coordinates, element_node_map + ) + path = os.path.join(os.path.join(os.path.dirname(__file__), "data"), "my_mesh") mesh.save(path) mesh_loaded_from_disc = TetraMesh.load(path + ".xdmf") - for a,b in zip(mesh_loaded_from_disc.coord.flatten(), nodal_coordinates.flatten()): - self.assertAlmostEqual(a,b) + for a, b in zip( + mesh_loaded_from_disc.coord.flatten(), nodal_coordinates.flatten() + ): + self.assertAlmostEqual(a, b) os.remove(path + ".xdmf") os.remove(path + ".h5") def test_generate_mesh_from_levelset(self): R = 769.0 - max_cell_circumradius = 450. + max_cell_circumradius = 450.0 mesh = TetraMesh.generate_mesh_from_levelset( level_set=lambda x: np.linalg.norm(x) - R, bounding_radius=769.0, - max_cell_circumradius=max_cell_circumradius) + max_cell_circumradius=max_cell_circumradius, + ) for c in mesh.coord: r = np.linalg.norm(c) - self.assertLessEqual(r, R*1.001) + self.assertLessEqual(r, R * 1.001) for r in mesh.eradius: - self.assertLessEqual(r, max_cell_circumradius*1.001) + self.assertLessEqual(r, max_cell_circumradius * 1.001) def test_move_mesh(self): R = 769.0 - max_cell_circumradius = 450. + max_cell_circumradius = 450.0 mesh = TetraMesh.generate_mesh_from_levelset( level_set=lambda x: np.linalg.norm(x) - R, bounding_radius=769.0, - max_cell_circumradius=max_cell_circumradius) + max_cell_circumradius=max_cell_circumradius, + ) translation = np.array([1.0, 769.0, -5678.0]) new_nodal_coordinates = mesh.coord + translation mesh._mesh.points = new_nodal_coordinates mesh._set_fem_matrices() mesh._expand_mesh_data() for c in mesh.coord: - r = np.linalg.norm(c-translation) - self.assertLessEqual(r, R*1.001) + r = np.linalg.norm(c - translation) + self.assertLessEqual(r, R * 1.001) for r in mesh.eradius: - self.assertLessEqual(r, max_cell_circumradius*1.001) + self.assertLessEqual(r, max_cell_circumradius * 1.001) + + def test_compute_mesh_spheres(self): + + coord = np.array( + [ + [-1.3856244e02, 6.9698529e02, -2.9481543e02], + [5.1198740e02, 5.7321143e02, -1.3369661e01], + [-2.4491163e-01, -2.4171574e-01, 1.4720881e-01], + [2.7638666e02, 6.1436609e02, -3.7048819e02], + ] + ) + + enod = np.array([[0, 1, 2, 3]]) + mesh = TetraMesh.generate_mesh_from_vertices(coord, enod) + + theta = np.deg2rad(1e-4) + c, s = np.cos(theta), np.sin(theta) + R_z = np.array([[c, -s, 0], [s, c, 0], [0, 0, 1]]) + + rotated_coord = R_z.dot(mesh.coord.T).T + + eradius_rotated, _ = mesh._compute_mesh_spheres(rotated_coord, mesh.enod) + + self.assertAlmostEqual(mesh.eradius[0], eradius_rotated[0]) def test_update(self): - rotation_axis=np.array([0, 0, 1.0]) - rotation_angle=np.pi/4.37 + rotation_axis = np.array([0, 0, 1.0]) + rotation_angle = np.pi / 4.37 translation = np.array([1.0, 769.0, -5678.0]) rbm = RigidBodyMotion(rotation_axis, rotation_angle, translation) Rmat = rbm.rotator.get_rotation_matrix(rotation_angle) R = 769.0 - max_cell_circumradius = 450. + max_cell_circumradius = 450.0 mesh1 = TetraMesh.generate_mesh_from_levelset( level_set=lambda x: np.linalg.norm(x) - R, bounding_radius=769.0, - max_cell_circumradius=max_cell_circumradius) + max_cell_circumradius=max_cell_circumradius, + ) mesh2 = copy.deepcopy(mesh1) - new_nodal_coordinates = Rmat.dot( mesh1.coord.T ).T + translation + new_nodal_coordinates = Rmat.dot(mesh1.coord.T).T + translation mesh1._mesh.points = new_nodal_coordinates mesh1._set_fem_matrices() mesh1._expand_mesh_data() @@ -95,59 +125,68 @@ def test_update(self): tol = 1e-3 - for c1,c2 in zip(mesh1.coord, mesh2.coord): + for c1, c2 in zip(mesh1.coord, mesh2.coord): for i in range(3): - self.assertLessEqual(np.abs(c1[i]-c2[i]), tol) + self.assertLessEqual(np.abs(c1[i] - c2[i]), tol) - for c1,c2 in zip(mesh1.ecentroids, mesh2.ecentroids): + for c1, c2 in zip(mesh1.ecentroids, mesh2.ecentroids): for i in range(3): - self.assertLessEqual(np.abs(c1[i]-c2[i]), tol) + self.assertLessEqual(np.abs(c1[i] - c2[i]), tol) - for c1,c2 in zip(mesh1.espherecentroids, mesh2.espherecentroids): + for c1, c2 in zip(mesh1.espherecentroids, mesh2.espherecentroids): for i in range(3): - self.assertLessEqual(np.abs(c1[i]-c2[i]), tol) + self.assertLessEqual(np.abs(c1[i] - c2[i]), tol) for i in range(mesh1.enormals.shape[0]): for j in range(mesh1.enormals.shape[1]): for k in range(3): - c1 = mesh1.enormals[i,j,k] - c2 = mesh2.enormals[i,j,k] - self.assertLessEqual(np.abs(c1-c2), tol) + c1 = mesh1.enormals[i, j, k] + c2 = mesh2.enormals[i, j, k] + self.assertLessEqual(np.abs(c1 - c2), tol) - for c1,c2 in zip(mesh1.eradius, mesh2.eradius): - self.assertLessEqual(np.abs(c1-c2), tol) + for c1, c2 in zip(mesh1.eradius, mesh2.eradius): + self.assertLessEqual(np.abs(c1 - c2), tol) - c1,c2 = mesh1.centroid, mesh2.centroid + c1, c2 = mesh1.centroid, mesh2.centroid for i in range(3): - self.assertLessEqual(np.abs(c1[i]-c2[i]), tol) + self.assertLessEqual(np.abs(c1[i] - c2[i]), tol) def test_translate(self): R = 769.0 - max_cell_circumradius = 450. + max_cell_circumradius = 450.0 mesh = TetraMesh.generate_mesh_from_levelset( - level_set=lambda x: np.sqrt( (x[0] - 2.0)**2 + (x[1] - 1.0)**2 + (x[2] + 1.4)**2 ) - R, + level_set=lambda x: np.sqrt( + (x[0] - 2.0) ** 2 + (x[1] - 1.0) ** 2 + (x[2] + 1.4) ** 2 + ) + - R, bounding_radius=769.0, - max_cell_circumradius=max_cell_circumradius) + max_cell_circumradius=max_cell_circumradius, + ) - mesh.translate( -np.mean(mesh.coord, axis=0) ) + mesh.translate(-np.mean(mesh.coord, axis=0)) for i in range(3): self.assertLessEqual(np.abs(np.mean(mesh.coord, axis=0)[i]), 1e-4) def test_rotate(self): R = 769.0 - max_cell_circumradius = 450. + max_cell_circumradius = 450.0 mesh = TetraMesh.generate_mesh_from_levelset( - level_set=lambda x: np.sqrt( (x[0] - 2.0)**2 + (x[1] - 1.0)**2 + (x[2] + 1.4)**2 ) - R, + level_set=lambda x: np.sqrt( + (x[0] - 2.0) ** 2 + (x[1] - 1.0) ** 2 + (x[2] + 1.4) ** 2 + ) + - R, bounding_radius=769.0, - max_cell_circumradius=max_cell_circumradius) + max_cell_circumradius=max_cell_circumradius, + ) - mesh.translate( -np.mean(mesh.coord, axis=0) ) + mesh.translate(-np.mean(mesh.coord, axis=0)) for i in range(3): self.assertLessEqual(np.abs(np.mean(mesh.coord, axis=0)[i]), 1e-4) - mesh.rotate( np.array([0, 1, 0]), np.pi / 3. ) + mesh.rotate(np.array([0, 1, 0]), np.pi / 3.0) for i in range(3): self.assertLessEqual(np.abs(np.mean(mesh.coord, axis=0)[i]), 1e-4) -if __name__ == '__main__': + +if __name__ == "__main__": unittest.main() diff --git a/tests/test_motion.py b/tests/test_motion.py index 764aeee..0c60b43 100644 --- a/tests/test_motion.py +++ b/tests/test_motion.py @@ -102,7 +102,7 @@ def test_inverse(self): self.assertAlmostEqual( inverse_motion.translation[i], -motion.translation[i] ) self.assertAlmostEqual( inverse_motion.rotation_angle, motion.rotation_angle ) - points_0 = np.random.rand(3, 22) + points_0 = np.random.rand(22, 3) points = motion.rotate(points_0, time=0.243687) points = inverse_motion.rotate(points, time=0.243687) diff --git a/tests/test_polycrystal.py b/tests/test_polycrystal.py index c6505ab..0faf2e2 100644 --- a/tests/test_polycrystal.py +++ b/tests/test_polycrystal.py @@ -6,6 +6,7 @@ from xrd_simulator.detector import Detector from xrd_simulator.beam import Beam from xrd_simulator.motion import RigidBodyMotion +from xrd_simulator.utils import _epsilon_to_b from xfab import tools import os @@ -201,6 +202,8 @@ def test_transformation(self): np.dot( polycrystal.strain_lab[i], new_unit_vector)) + + self.assertAlmostEqual( s1, s2, msg="Transformation does not preserve directional strains") diff --git a/tests/test_templates.py b/tests/test_templates.py index 9b41d89..3911e38 100644 --- a/tests/test_templates.py +++ b/tests/test_templates.py @@ -2,7 +2,7 @@ import numpy as np from scipy.spatial.transform import Rotation from xrd_simulator import templates, utils - +import matplotlib.pyplot as plt class TestUtils(unittest.TestCase): @@ -27,6 +27,7 @@ def test_s3dxrd(self): } beam, detector, motion = templates.s3dxrd(parameters) + for ci in beam.centroid: self.assertAlmostEqual(ci, 0, msg="beam not at origin.") @@ -152,6 +153,7 @@ def strain_tensor(x): return np.array( min_bragg_angle=0, max_bragg_angle=None, verbose=True) + diffraction_pattern = detector.render( frames_to_render=0, lorentz=False, @@ -162,7 +164,6 @@ def strain_tensor(x): return np.array( bins, histogram = utils._diffractogram( diffraction_pattern, parameters['detector_center_pixel_z'], parameters['detector_center_pixel_y']) histogram[histogram < 0.5 * np.median(histogram)] = 0 - csequence, nosequences = 0, 0 for i in range(histogram.shape[0]): if histogram[i] > 0: @@ -215,6 +216,7 @@ def test_get_uniform_powder_sample(self): min_bragg_angle=0, max_bragg_angle=None, verbose=True) + diffraction_pattern = detector.render( frames_to_render=0, lorentz=False, @@ -234,6 +236,7 @@ def test_get_uniform_powder_sample(self): elif csequence >= 1: nosequences += 1 csequence = 0 + self.assertGreaterEqual( nosequences, 10, diff --git a/tests/test_utils.py b/tests/test_utils.py index 58bae2a..a81c35f 100644 --- a/tests/test_utils.py +++ b/tests/test_utils.py @@ -11,99 +11,129 @@ def setUp(self): np.random.seed(10) # changes all randomisation in the test def test_clip_line_with_convex_polyhedron(self): - line_points = np.ascontiguousarray([[-1., 0.2, 0.2], [-1., 0.4, 0.6]]) + line_points = np.ascontiguousarray([[-1.0, 0.2, 0.2], [-1.0, 0.4, 0.6]]) line_direction = np.ascontiguousarray([1.0, 0.0, 0.0]) line_direction = line_direction / np.linalg.norm(line_direction) - plane_points = np.ascontiguousarray([[0., 0.5, 0.5], [1, 0.5, 0.5], [0.5, 0.5, 0.], [ - 0.5, 0.5, 1.], [0.5, 0, 0.5], [0.5, 1., 0.5]]) + plane_points = np.ascontiguousarray( + [ + [0.0, 0.5, 0.5], + [1, 0.5, 0.5], + [0.5, 0.5, 0.0], + [0.5, 0.5, 1.0], + [0.5, 0, 0.5], + [0.5, 1.0, 0.5], + ] + ) plane_normals = np.ascontiguousarray( - [[-1., 0., 0.], [1., 0., 0.], [0., 0., -1.], [0., 0., 1.], [0., -1., 0.], [0., 1., 0.]]) + [ + [-1.0, 0.0, 0.0], + [1.0, 0.0, 0.0], + [0.0, 0.0, -1.0], + [0.0, 0.0, 1.0], + [0.0, -1.0, 0.0], + [0.0, 1.0, 0.0], + ] + ) clip_lengths = utils._clip_line_with_convex_polyhedron( - line_points, line_direction, plane_points, plane_normals) + line_points, line_direction, plane_points, plane_normals + ) for clip_length in clip_lengths: self.assertAlmostEqual( clip_length, 1.0, - msg="Projection through unity cube should give unity clip length") + msg="Projection through unity cube should give unity clip length", + ) line_direction = np.ascontiguousarray([1.0, 0.2, 0.1]) line_direction = line_direction / np.linalg.norm(line_direction) clip_lengths = utils._clip_line_with_convex_polyhedron( - line_points, line_direction, plane_points, plane_normals) + line_points, line_direction, plane_points, plane_normals + ) for clip_length in clip_lengths: self.assertGreater( clip_length, 1.0, - msg="Tilted projection through unity cube should give greater than unity clip length") + msg="Tilted projection through unity cube should give greater than unity clip length", + ) def test_lab_strain_to_B_matrix(self): U = Rotation.random().as_matrix() strain_tensor = (np.random.rand(3, 3) - 0.5) * 1e-2 # random strain tensor - strain_tensor = (strain_tensor.T + strain_tensor) / 2. - unit_cell = [5.028, 5.028, 5.519, 90., 90., 120.] - B = utils.lab_strain_to_B_matrix(strain_tensor, U, unit_cell) + strain_tensor = (strain_tensor.T + strain_tensor) / 2.0 + unit_cell = [5.028, 5.028, 5.519, 90.0, 90.0, 120.0] - n_c = np.random.rand(3,) # crystal unit vector + B0 = tools.form_b_mat(unit_cell) + B = utils.lab_strain_to_B_matrix(strain_tensor, U, B0) + + n_c = np.random.rand( + 3, + ) # crystal unit vector n_c = n_c / np.linalg.norm(n_c) n_l = np.dot(U, n_c) # lab unit vector # strain along n_l described in lab frame strain_l = np.dot(np.dot(n_l, strain_tensor), n_l) - s = utils._b_to_epsilon(B, unit_cell) + s = utils._b_to_epsilon(B, B0) crystal_strain = np.array( - [[s[0], s[1], s[2]], [s[1], s[3], s[4]], [s[2], s[4], s[5]]]) + [[s[0], s[1], s[2]], [s[1], s[3], s[4]], [s[2], s[4], s[5]]] + ) # strain along n_l described in crystal frame strain_c = np.dot(np.dot(n_c, crystal_strain), n_c) # The strain should be invariant along a direction self.assertAlmostEqual( - strain_l, - strain_c, - msg="bad crystal to lab frame conversion") + strain_l, strain_c, msg="bad crystal to lab frame conversion" + ) def test_alpha_to_quarternion(self): - _, alpha_2, alpha_3 = np.random.rand(3,) + _, alpha_2, alpha_3 = np.random.rand( + 3, + ) q = utils.alpha_to_quarternion(0, alpha_2, alpha_3) self.assertAlmostEqual(q[0], 1.0, msg="quarternion wrongly computed") self.assertAlmostEqual(q[1], 0.0, msg="quarternion wrongly computed") self.assertAlmostEqual(q[2], 0.0, msg="quarternion wrongly computed") self.assertAlmostEqual(q[3], 0.0, msg="quarternion wrongly computed") - alpha_1 = np.random.rand(7,) - alpha_2 = np.random.rand(7,) - alpha_3 = np.random.rand(7,) + alpha_1 = np.random.rand( + 7, + ) + alpha_2 = np.random.rand( + 7, + ) + alpha_3 = np.random.rand( + 7, + ) qq = utils.alpha_to_quarternion(alpha_1, alpha_2, alpha_3) for q in qq: self.assertTrue( - np.abs( - np.linalg.norm(q) - - 1.0) < 1e-5, - msg="quarternion not normalised") + np.abs(np.linalg.norm(q) - 1.0) < 1e-5, msg="quarternion not normalised" + ) def test_diffractogram(self): diffraction_pattern = np.zeros((20, 20)) R = 8 - det_c_z, det_c_y = 10., 10. + det_c_z, det_c_y = 10.0, 10.0 for i in range(diffraction_pattern.shape[0]): for j in range(diffraction_pattern.shape[1]): - if np.abs(np.sqrt((i - det_c_z)**2 + - (j - det_c_y)**2) - R) < 0.5: + if np.abs(np.sqrt((i - det_c_z) ** 2 + (j - det_c_y) ** 2) - R) < 0.5: diffraction_pattern[i, j] += 1 bin_centres, histogram = utils._diffractogram( - diffraction_pattern, det_c_z, det_c_y, 1.0) + diffraction_pattern, det_c_z, det_c_y, 1.0 + ) self.assertEqual( - np.sum( - histogram > 0), - 1, - msg="Error in diffractogram azimuth integration") + np.sum(histogram > 0), 1, msg="Error in diffractogram azimuth integration" + ) self.assertEqual( np.sum(histogram), np.sum(diffraction_pattern), - msg="Error in diffractogram azimuth integration") + msg="Error in diffractogram azimuth integration", + ) self.assertEqual( histogram[R], np.sum(diffraction_pattern), - msg="Error in diffractogram azimuth integration") + msg="Error in diffractogram azimuth integration", + ) def test_get_bounding_ball(self): points = np.random.rand(4, 3) - 0.5 @@ -111,15 +141,15 @@ def test_get_bounding_ball(self): mean = np.mean(points, axis=0) base_radius = np.max(np.linalg.norm(points - mean, axis=1)) self.assertLessEqual( - radius, - base_radius, - msg="Ball is larger than initial guess") + radius, base_radius, msg="Ball is larger than initial guess" + ) for p in points: self.assertLessEqual( (p - centre[0:3]).dot(p - centre[0:3]), - (radius * 1.0001)**2, - msg="Point not contained by ball") + (radius * 1.0001) ** 2, + msg="Point not contained by ball", + ) ratios = [] for _ in range(500): @@ -129,34 +159,45 @@ def test_get_bounding_ball(self): base_radius = np.max(np.linalg.norm(points - mean, axis=1)) ratios.append(radius / base_radius) self.assertLessEqual( - np.mean(ratios), - 0.9, - msg="Averag radius decrease less than 10%") + np.mean(ratios), 0.9, msg="Averag radius decrease less than 10%" + ) def test_epsilon_to_b(self): - unit_cell = [4.926, 4.926, 5.4189, 90., 90., 120.] - eps1 = 25 * 1e-4 * (np.random.rand(6,)-0.5) - B = utils._epsilon_to_b(eps1, unit_cell) - eps2 = utils._b_to_epsilon(B, unit_cell) - self.assertTrue( np.allclose( eps1, eps2 ) ) + unit_cell = [4.926, 4.926, 5.4189, 90.0, 90.0, 120.0] + eps1 = ( + 25 + * 1e-4 + * ( + np.random.rand( + 6, + ) + - 0.5 + ) + ) + B0 = tools.form_b_mat(unit_cell) + strain_tensor1 = utils._strain_as_tensor(eps1) + B = utils._epsilon_to_b(strain_tensor1, B0) + eps2 = utils._b_to_epsilon(B, B0) + self.assertTrue(np.allclose(eps1, eps2)) def test_get_misorientations(self): orientations = np.zeros((2, 3, 3)) - orientations[0,:,:] = np.eye(3) - c,s = np.cos(np.radians(10)), np.sin(np.radians(10)) - orientations[1,:,:] = np.array([[c, -s, 0],[s, c, 0], [0, 0, 1]]) - misorientations = utils.get_misorientations(orientations) - self.assertEqual( misorientations.shape[0], 2 ) - self.assertAlmostEqual( misorientations[0], np.radians(5.0)) - self.assertAlmostEqual( misorientations[1], np.radians(5.0)) + orientations[0, :, :] = np.eye(3) + c, s = np.cos(np.radians(10)), np.sin(np.radians(10)) + orientations[1, :, :] = np.array([[c, -s, 0], [s, c, 0], [0, 0, 1]]) + misorientations = utils._get_misorientations(orientations) + self.assertEqual(misorientations.shape[0], 2) + self.assertAlmostEqual(misorientations[0], np.radians(5.0)) + self.assertAlmostEqual(misorientations[1], np.radians(5.0)) orientations = np.zeros((2, 3, 3)) - orientations[0,:,:] = np.eye(3) - orientations[1,:,:] = np.eye(3) - misorientations = utils.get_misorientations(orientations) - self.assertEqual( misorientations.shape[0], 2 ) - self.assertAlmostEqual( misorientations[0], 0 ) - self.assertAlmostEqual( misorientations[1], 0 ) - -if __name__ == '__main__': + orientations[0, :, :] = np.eye(3) + orientations[1, :, :] = np.eye(3) + misorientations = utils._get_misorientations(orientations) + self.assertEqual(misorientations.shape[0], 2) + self.assertAlmostEqual(misorientations[0], 0) + self.assertAlmostEqual(misorientations[1], 0) + + +if __name__ == "__main__": unittest.main() diff --git a/xrd_simulator/beam.py b/xrd_simulator/beam.py index b6a74b3..10b9d86 100644 --- a/xrd_simulator/beam.py +++ b/xrd_simulator/beam.py @@ -6,16 +6,15 @@ .. literalinclude:: examples/example_init_beam.py Below follows a detailed description of the beam class attributes and functions. - """ + import numpy as np import dill from scipy.spatial import ConvexHull, HalfspaceIntersection from scipy.optimize import linprog -from xrd_simulator import laue, utils -from scipy.optimize import root_scalar -class Beam(): + +class Beam: """Represents a monochromatic xray beam as a convex polyhedra with uniform intensity. The beam is described in the laboratory coordinate system. @@ -40,20 +39,22 @@ class Beam(): """ def __init__( - self, - beam_vertices, - xray_propagation_direction, - wavelength, - polarization_vector): - - self.wave_vector = (2 * np.pi / wavelength) * xray_propagation_direction / \ - np.linalg.norm(xray_propagation_direction) + self, beam_vertices, xray_propagation_direction, wavelength, polarization_vector + ): + + self.wave_vector = ( + (2 * np.pi / wavelength) + * xray_propagation_direction + / np.linalg.norm(xray_propagation_direction) + ) self.wavelength = wavelength self.set_beam_vertices(beam_vertices) - self.polarization_vector = polarization_vector / \ - np.linalg.norm(polarization_vector) - assert np.allclose(np.dot(self.polarization_vector, self.wave_vector), - 0), "The xray polarization vector is not orthogonal to the wavevector." + self.polarization_vector = polarization_vector / np.linalg.norm( + polarization_vector + ) + assert np.allclose( + np.dot(self.polarization_vector, self.wave_vector), 0 + ), "The xray polarization vector is not orthogonal to the wavevector." def set_beam_vertices(self, beam_vertices): """Set the beam vertices defining the beam convex hull and update all dependent quantities. @@ -63,17 +64,18 @@ def set_beam_vertices(self, beam_vertices): """ ch = ConvexHull(beam_vertices) - assert ch.points.shape[0] == ch.vertices.shape[0], "The provided beam vertices does not form a convex hull" + assert ( + ch.points.shape[0] == ch.vertices.shape[0] + ), "The provided beam vertices does not form a convex hull" self.vertices = beam_vertices.copy() self.centroid = np.mean(self.vertices, axis=0) - self.halfspaces = ConvexHull(self.vertices, qhull_options='QJ').equations + self.halfspaces = ConvexHull(self.vertices, qhull_options="QJ").equations # ConvexHull triangulates, this removes hull triangles positioned the same plane self.halfspaces = np.unique(self.halfspaces.round(decimals=6), axis=0) - def contains(self, points): - """ Check if the beam contains a number of point(s). + """Check if the beam contains a number of point(s). Args: points (:obj:`numpy array`): Point(s) to evaluate ``shape=(3,n)`` or ``shape=(3,)``. @@ -87,7 +89,17 @@ def contains(self, points): if len(points.shape) == 1: return np.all(normal_distances + self.halfspaces[:, 3] < 0) else: - return np.sum( (normal_distances + self.halfspaces[:, 3].reshape(self.halfspaces.shape[0], 1)) >= 0, axis=0) == 0 + return ( + np.sum( + ( + normal_distances + + self.halfspaces[:, 3].reshape(self.halfspaces.shape[0], 1) + ) + >= 0, + axis=0, + ) + == 0 + ) def intersect(self, vertices): """Compute the beam intersection with a convex polyhedra. @@ -105,7 +117,7 @@ def intersect(self, vertices): if not self.contains(vertex): break else: - return ConvexHull(vertices) # Tetra completely contained by beam + return ConvexHull(vertices) # Tetra completely contained by beam poly_halfspace = ConvexHull(vertices).equations poly_halfspace = np.unique(poly_halfspace.round(decimals=6), axis=0) @@ -118,11 +130,11 @@ def intersect(self, vertices): if self.contains(centroid): interior_point = centroid else: - trial_points = centroid + (vertices - centroid)*0.99 + trial_points = centroid + (vertices - centroid) * 0.99 for i, is_contained in enumerate(self.contains(trial_points.T)): if is_contained: - interior_point = trial_points[i,:].flatten() + interior_point = trial_points[i, :].flatten() break else: interior_point = self._find_feasible_point(combined_halfspaces) @@ -160,7 +172,7 @@ def load(cls, path): """ if not path.endswith(".beam"): raise ValueError("The loaded file must end with .beam") - with open(path, 'rb') as f: + with open(path, "rb") as f: return dill.load(f) def _find_feasible_point(self, halfspaces): @@ -174,32 +186,33 @@ def _find_feasible_point(self, halfspaces): """ res = linprog( - c = np.zeros((3,)), - A_ub = halfspaces[:, :-1], - b_ub = -halfspaces[:, -1], - bounds = (None, None), - method = 'highs', - options = {"maxiter": 5000, 'time_limit':0.1} # typical solve time is ~1-2 ms - ) + c=np.zeros((3,)), + A_ub=halfspaces[:, :-1], + b_ub=-halfspaces[:, -1], + bounds=(None, None), + method="highs", + options={ + "maxiter": 5000, + "time_limit": 0.1, + }, # typical solve time is ~1-2 ms + ) if res.success: - if np.any(res.slack==0): - A = halfspaces[res.slack==0, :-1] - dx = np.linalg.solve(A.T.dot(A), A.T.dot( -np.ones((A.shape[0],))*1e-5 ) ) + if np.any(res.slack == 0): + A = halfspaces[res.slack == 0, :-1] + dx = np.linalg.solve( + A.T.dot(A), A.T.dot(-np.ones((A.shape[0],)) * 1e-5) + ) trial = res.x + dx else: trial = res.x - if np.all( (halfspaces[:, :-1].dot(trial) + halfspaces[:, -1]) < -1e-8 ): + if np.all((halfspaces[:, :-1].dot(trial) + halfspaces[:, -1]) < -1e-8): return trial else: return None else: return None - def _get_candidate_spheres( - self, - sphere_centres, - sphere_radius, - rigid_body_motion): + def _get_candidate_spheres(self, sphere_centres, sphere_radius, rigid_body_motion): """Mask spheres which are close to intersecting a given convex beam hull undergoing a prescribed motion. NOTE: This function is approximate in the sense that the motion path is sampled at discrete moments in time @@ -220,80 +233,88 @@ def _get_candidate_spheres( inverse_rigid_body_motion = rigid_body_motion.inverse() - dx = np.min(sphere_radius) / 2. - translation = np.abs( rigid_body_motion.translation / dx ) - number_of_sampling_points = int( np.max( [np.max(translation), np.degrees(rigid_body_motion.rotation_angle), 2] ) + 1 ) - sample_times = np.linspace(0, 1, number_of_sampling_points) + dx = np.min(sphere_radius) / 2.0 + translation = np.abs(rigid_body_motion.translation / dx) + number_of_sampling_points = int( + np.max( + [np.max(translation), np.degrees(rigid_body_motion.rotation_angle), 2] + ) + + 1 + ) + sample_times = np.linspace(0, 1, number_of_sampling_points) R = sphere_radius.reshape(1, sphere_radius.shape[0]) not_candidates = np.zeros((len(sample_times), R.shape[1]), dtype=bool) - for i,time in enumerate(sample_times): - halfspaces = ConvexHull( inverse_rigid_body_motion( self.vertices.T, time=time ).T ).equations + for i, time in enumerate(sample_times): + + halfspaces = ConvexHull( + inverse_rigid_body_motion(self.vertices, time=time) + ).equations halfspaces = np.unique(halfspaces.round(decimals=6), axis=0) - normals = halfspaces[:, 0:3] - distances = halfspaces[:, 3].reshape(normals.shape[0],1) - sphere_beam_distances = normals.dot(sphere_centres.T) + distances - not_candidates[i,:] += np.any( sphere_beam_distances > R, axis=0) + normals = halfspaces[:, 0:3] + distances = halfspaces[:, 3].reshape(normals.shape[0], 1) + sphere_beam_distances = normals.dot(sphere_centres.T) + distances + not_candidates[i, :] += np.any(sphere_beam_distances > R, axis=0) return ~not_candidates, sample_times def _get_proximity_intervals( - self, - sphere_centres, - sphere_radius, - rigid_body_motion): - """Compute the parametric intervals t=[[t_1,t_2],[t_3,t_4],..] in which spheres are interesecting beam. - - This method can be used as a pre-checker before running the `intersect()` method on a polyhedral - set. This avoids wasting compute resources on polyhedra which clearly do not intersect the beam. - - Args: - sphere_centres (:obj:`numpy array`): Centroids of a spheres ``shape=(3,n)``. - sphere_radius (:obj:`numpy array`): Radius of a spheres ``shape=(n,)``. - rigid_body_motion (:obj:`xrd_simulator.motion.RigidBodyMotion`): Rigid body motion object describing the - polycrystal transformation as a function of time on the domain time=[0,1]. - - Returns: - (:obj:`list` of :obj:`list` of :obj:`list`): Parametric ranges in which the spheres - has an intersection with the beam. [(:obj:`None`)] if no intersection exist in ```time=[0,1]```. - the entry at ```[i][j]``` is a list with two floats ```[t_1,t_2]``` and gives the j:th - intersection interval of sphere number i with the beam. - - """ - # TODO: better unit tests. - candidate_mask, sample_times = self._get_candidate_spheres(sphere_centres, - sphere_radius, - rigid_body_motion) - - all_intersections = [] - for j in range(candidate_mask.shape[1]): - if np.sum(candidate_mask[:,j])==0: - merged_intersection = [None] - all_intersections.append(merged_intersection) - else: - counting = False - merged_intersection = [] - - i = 0 - if candidate_mask[i,j] and not counting: - merged_intersection.append( [sample_times[i], sample_times[i+1]] ) - counting = True + self, sphere_centres, sphere_radius, rigid_body_motion + ): + """Compute the parametric intervals t=[[t_1,t_2],[t_3,t_4],..] in which spheres are interesecting beam. - for i in range(1, candidate_mask.shape[0]-1): + This method can be used as a pre-checker before running the `intersect()` method on a polyhedral + set. This avoids wasting compute resources on polyhedra which clearly do not intersect the beam. - if candidate_mask[i,j] and not counting: - merged_intersection.append( [sample_times[i-1], sample_times[i+1]] ) - counting = True - elif not candidate_mask[i,j] and counting: - merged_intersection[-1][1] = sample_times[i+1] - counting = False + Args: + sphere_centres (:obj:`numpy array`): Centroids of a spheres ``shape=(3,n)``. + sphere_radius (:obj:`numpy array`): Radius of a spheres ``shape=(n,)``. + rigid_body_motion (:obj:`xrd_simulator.motion.RigidBodyMotion`): Rigid body motion object describing the + polycrystal transformation as a function of time on the domain time=[0,1]. + + Returns: + (:obj:`list` of :obj:`list` of :obj:`list`): Parametric ranges in which the spheres + has an intersection with the beam. [(:obj:`None`)] if no intersection exist in ```time=[0,1]```. + the entry at ```[i][j]``` is a list with two floats ```[t_1,t_2]``` and gives the j:th + intersection interval of sphere number i with the beam. + + """ + # TODO: better unit tests. + candidate_mask, sample_times = self._get_candidate_spheres( + sphere_centres, sphere_radius, rigid_body_motion + ) + + all_intersections = [] + for j in range(candidate_mask.shape[1]): + if np.sum(candidate_mask[:, j]) == 0: + merged_intersection = [None] + all_intersections.append(merged_intersection) + else: + counting = False + merged_intersection = [] + + i = 0 + if candidate_mask[i, j] and not counting: + merged_intersection.append([sample_times[i], sample_times[i + 1]]) + counting = True + + for i in range(1, candidate_mask.shape[0] - 1): + + if candidate_mask[i, j] and not counting: + merged_intersection.append( + [sample_times[i - 1], sample_times[i + 1]] + ) + counting = True + elif not candidate_mask[i, j] and counting: + merged_intersection[-1][1] = sample_times[i + 1] + counting = False - i = candidate_mask.shape[0] - 1 - if candidate_mask[i,j] and not counting: - merged_intersection.append( [sample_times[i-1], sample_times[i]] ) - elif candidate_mask[i,j] and counting: - merged_intersection[-1][1] = sample_times[i] + i = candidate_mask.shape[0] - 1 + if candidate_mask[i, j] and not counting: + merged_intersection.append([sample_times[i - 1], sample_times[i]]) + elif candidate_mask[i, j] and counting: + merged_intersection[-1][1] = sample_times[i] - all_intersections.append(merged_intersection) + all_intersections.append(merged_intersection) - return all_intersections \ No newline at end of file + return all_intersections diff --git a/xrd_simulator/detector.py b/xrd_simulator/detector.py index 89c6c80..3ddc759 100644 --- a/xrd_simulator/detector.py +++ b/xrd_simulator/detector.py @@ -11,14 +11,15 @@ Below follows a detailed description of the detector class attributes and functions. """ + import numpy as np from xrd_simulator import utils import dill from scipy.signal import convolve2d from multiprocessing import Pool -class Detector(): +class Detector: """Represents a rectangular 2D area detector. The detector collects :class:`xrd_simulator.scattering_unit.ScatteringUnit` during diffraction from a @@ -52,8 +53,14 @@ class Detector(): """ - def __init__(self, pixel_size_z, pixel_size_y, det_corner_0, det_corner_1, det_corner_2): - self.det_corner_0, self.det_corner_1, self.det_corner_2 = det_corner_0, det_corner_1, det_corner_2 + def __init__( + self, pixel_size_z, pixel_size_y, det_corner_0, det_corner_1, det_corner_2 + ): + self.det_corner_0, self.det_corner_1, self.det_corner_2 = ( + det_corner_0, + det_corner_1, + det_corner_2, + ) self.pixel_size_z = pixel_size_z self.pixel_size_y = pixel_size_y self.zmax = np.linalg.norm(det_corner_2 - det_corner_0) @@ -68,16 +75,16 @@ def __init__(self, pixel_size_z, pixel_size_y, det_corner_0, det_corner_1, det_c self._point_spread_kernel_shape = (5, 5) def point_spread_function(self, z, y): - return np.exp(-0.5 * (z*z + y*y) / (1.0 * 1.0) ) + return np.exp(-0.5 * (z * z + y * y) / (1.0 * 1.0)) @property def point_spread_kernel_shape(self): """point_spread_kernel_shape (:obj:`tuple`): Number of pixels in zdhat and ydhat over which to apply the pointspread - function for each scattering event. I.e the shape of the kernel that will be convolved with the diffraction - pattern. The values of the kernel is defined by the point_spread_function. Defaults to shape (5, 5). + function for each scattering event. I.e the shape of the kernel that will be convolved with the diffraction + pattern. The values of the kernel is defined by the point_spread_function. Defaults to shape (5, 5). - NOTE: The point_spread_function is automatically normalised over the point_spread_kernel_shape domain such that the - final convolution over the detector diffraction pattern is intensity preserving. + NOTE: The point_spread_function is automatically normalised over the point_spread_kernel_shape domain such that the + final convolution over the detector diffraction pattern is intensity preserving. """ return self._point_spread_kernel_shape @@ -85,19 +92,24 @@ def point_spread_kernel_shape(self): def point_spread_kernel_shape(self, kernel_shape): if kernel_shape[0] % 2 == 0 or kernel_shape[1] % 2 == 0: - raise ValueError("Point spread function kernel shape must be odd in both dimensions, but shape "+str(kernel_shape)+" was provided.") + raise ValueError( + "Point spread function kernel shape must be odd in both dimensions, but shape " + + str(kernel_shape) + + " was provided." + ) else: self._point_spread_kernel_shape = kernel_shape def render( - self, - frames_to_render, - lorentz=True, - polarization=True, - structure_factor=True, - method="centroid", - verbose=True, - number_of_processes=1): + self, + frames_to_render, + lorentz=True, + polarization=True, + structure_factor=True, + method="centroid", + verbose=True, + number_of_processes=1, + ): """Render a pixelated diffraction pattern onto the detector plane . NOTE: The value read out on a pixel in the detector is an approximation of the integrated number of counts over @@ -109,9 +121,9 @@ def render( lorentz (:obj:`bool`): Weight scattered intensity by Lorentz factor. Defaults to False. polarization (:obj:`bool`): Weight scattered intensity by Polarization factor. Defaults to False. structure_factor (:obj:`bool`): Weight scattered intensity by Structure Factor factor. Defaults to False. - method (:obj:`str`): Rendering method, must be one of ```project``` , ```centroid``` or ```centroid_with_scintillator```. + method (:obj:`str`): Rendering method, must be one of ```project``` , ```centroid``` or ```centroid_with_scintillator```. Defaults to ```centroid```. The default,```method=centroid```, is a simple deposit of intensity for each scattering_unit - onto the detector by tracing a line from the sample scattering region centroid to the detector plane. The intensity + onto the detector by tracing a line from the sample scattering region centroid to the detector plane. The intensity is deposited into a single detector pixel regardless of the geometrical shape of the scattering_unit. If instead ```method=project``` the scattering regions are projected onto the detector depositing a intensity over possibly several pixels as weighted by the optical path lengths of the rays diffracting from the scattering @@ -128,60 +140,103 @@ def render( """ - if verbose and number_of_processes!=1: - raise NotImplemented('Verbose mode is not implemented for multiprocesses computations') + if verbose and number_of_processes != 1: + raise NotImplemented( + "Verbose mode is not implemented for multiprocesses computations" + ) - if frames_to_render=='all': + if frames_to_render == "all": frames_to_render = list(range(len(self.frames))) elif isinstance(frames_to_render, int): frames_to_render = [frames_to_render] - if method == 'project': + if method == "project": renderer = self._projection_render kernel = self._get_point_spread_function_kernel() - elif method == 'centroid': + elif method == "centroid": renderer = self._centroid_render kernel = self._get_point_spread_function_kernel() - elif method == 'centroid_with_scintillator': + elif method == "centroid_with_scintillator": renderer = self._centroid_render_with_scintillator kernel = None else: - raise ValueError('No such method: '+method+' exist, method should be one of project or centroid') - - if number_of_processes==1: - rendered_frames = self._render_and_convolve( (frames_to_render, kernel, renderer, lorentz, polarization, structure_factor, verbose) ) + raise ValueError( + "No such method: " + + method + + " exist, method should be one of project or centroid" + ) + + if number_of_processes == 1: + rendered_frames = self._render_and_convolve( + ( + frames_to_render, + kernel, + renderer, + lorentz, + polarization, + structure_factor, + verbose, + ) + ) else: args = [] - for frames_bundle in np.array_split(np.array(frames_to_render), number_of_processes): - args.append( (frames_bundle, kernel, renderer, lorentz, polarization, structure_factor, verbose) ) - with Pool(number_of_processes) as p: #TODO: better unit tests + for frames_bundle in np.array_split( + np.array(frames_to_render), number_of_processes + ): + args.append( + ( + frames_bundle, + kernel, + renderer, + lorentz, + polarization, + structure_factor, + verbose, + ) + ) + with Pool(number_of_processes) as p: # TODO: better unit tests nested_frame_bundles = p.map(self._render_and_convolve, args) rendered_frames = [] for frames_bundle in nested_frame_bundles: rendered_frames.extend(frames_bundle) rendered_frames = np.array(rendered_frames) - if len(frames_to_render)==1: - return rendered_frames[0,:,:] + if len(frames_to_render) == 1: + return rendered_frames[0, :, :] else: return rendered_frames def _render_and_convolve(self, args): - frames_bundle, kernel, renderer, lorentz, polarization, structure_factor, verbose = args + ( + frames_bundle, + kernel, + renderer, + lorentz, + polarization, + structure_factor, + verbose, + ) = args rendered_frames = [] for frame_index in frames_bundle: - frame = np.zeros( (self.pixel_coordinates.shape[0], self.pixel_coordinates.shape[1]) ) + frame = np.zeros( + (self.pixel_coordinates.shape[0], self.pixel_coordinates.shape[1]) + ) for si, scattering_unit in enumerate(self.frames[frame_index]): if verbose: - progress_bar_message = "Rendering " + \ - str(len(self.frames[frame_index])) + " scattering volumes unto the detector" - progress_fraction = float( - si + 1) / len(self.frames[frame_index]) + progress_bar_message = ( + "Rendering " + + str(len(self.frames[frame_index])) + + " scattering volumes unto the detector" + ) + progress_fraction = float(si + 1) / len(self.frames[frame_index]) utils._print_progress( - progress_fraction, - message=progress_bar_message) - renderer(scattering_unit, frame, lorentz, polarization, structure_factor) - if kernel is not None: frame = self._apply_point_spread_function( frame, kernel ) + progress_fraction, message=progress_bar_message + ) + renderer( + scattering_unit, frame, lorentz, polarization, structure_factor + ) + if kernel is not None: + frame = self._apply_point_spread_function(frame, kernel) rendered_frames.append(frame) return rendered_frames @@ -193,14 +248,20 @@ def _apply_point_spread_function(self, frame, kernel): """ if self.point_spread_function is not None: - infmask = np.isinf(frame) # Due to approximate Lorentz factors + infmask = np.isinf(frame) # Due to approximate Lorentz factors frame[infmask] = 0 - if not np.all(frame==0): - frame = convolve2d(frame, kernel, mode='same' ) + if not np.all(frame == 0): + frame = convolve2d(frame, kernel, mode="same") frame[infmask] = np.inf return frame - def pixel_index_to_theta_eta(self, incoming_wavevector, pixel_zd_index, pixel_yd_index, scattering_origin=np.array([0, 0, 0]) ): + def pixel_index_to_theta_eta( + self, + incoming_wavevector, + pixel_zd_index, + pixel_yd_index, + scattering_origin=np.array([0, 0, 0]), + ): """Compute bragg angle and azimuth angle for a detector pixel index. Args: @@ -212,12 +273,23 @@ def pixel_index_to_theta_eta(self, incoming_wavevector, pixel_zd_index, pixel_yd (:obj:`tuple`) Bragg angle theta and azimuth angle eta (measured from det_corner_1 - det_corner_0 axis) in radians """ # TODO: unit test - pixel_zd_coord = pixel_zd_index*self.pixel_size_z - pixel_yd_coord = pixel_yd_index*self.pixel_size_y - theta, eta = self.pixel_coord_to_theta_eta(incoming_wavevector, pixel_zd_coord, pixel_yd_coord, scattering_origin=np.array([0, 0, 0]) ) + pixel_zd_coord = pixel_zd_index * self.pixel_size_z + pixel_yd_coord = pixel_yd_index * self.pixel_size_y + theta, eta = self.pixel_coord_to_theta_eta( + incoming_wavevector, + pixel_zd_coord, + pixel_yd_coord, + scattering_origin=np.array([0, 0, 0]), + ) return theta, eta - def pixel_coord_to_theta_eta(self, incoming_wavevector, pixel_zd_coord, pixel_yd_coord, scattering_origin=np.array([0, 0, 0]) ): + def pixel_coord_to_theta_eta( + self, + incoming_wavevector, + pixel_zd_coord, + pixel_yd_coord, + scattering_origin=np.array([0, 0, 0]), + ): """Compute bragg angle and azimuth angle for a detector coordinate. Args: @@ -230,12 +302,17 @@ def pixel_coord_to_theta_eta(self, incoming_wavevector, pixel_zd_coord, pixel_yd """ # TODO: unit test khat = incoming_wavevector / np.linalg.norm(incoming_wavevector) - kp = self.det_corner_0 + pixel_zd_coord*self.zdhat + pixel_yd_coord*self.ydhat - scattering_origin + kp = ( + self.det_corner_0 + + pixel_zd_coord * self.zdhat + + pixel_yd_coord * self.ydhat + - scattering_origin + ) kprimehat = kp / np.linalg.norm(kp) - theta = np.arccos(khat.dot(kprimehat)) / 2. + theta = np.arccos(khat.dot(kprimehat)) / 2.0 korthogonal = kprimehat - (khat * kprimehat.dot(khat)) - eta = np.arccos( self.zdhat.dot(korthogonal) / np.linalg.norm(korthogonal) ) - eta *= np.sign( (np.cross( self.zdhat, korthogonal )).dot(-incoming_wavevector) ) + eta = np.arccos(self.zdhat.dot(korthogonal) / np.linalg.norm(korthogonal)) + eta *= np.sign((np.cross(self.zdhat, korthogonal)).dot(-incoming_wavevector)) return theta, eta def get_intersection(self, ray_direction, source_point): @@ -249,12 +326,14 @@ def get_intersection(self, ray_direction, source_point): (:obj:`tuple`) zd, yd in detector plane coordinates. """ - s = (self.det_corner_0 - source_point).dot(self.normal) / \ - ray_direction.dot(self.normal) - intersection = source_point + ray_direction * s + + s = (self.det_corner_0 - source_point).dot(self.normal) / ray_direction.dot( + self.normal + ) + intersection = source_point + ray_direction * s[:, np.newaxis] zd = np.dot(intersection - self.det_corner_0, self.zdhat) yd = np.dot(intersection - self.det_corner_0, self.ydhat) - return zd, yd + return np.array([zd, yd]).T def contains(self, zd, yd): """Determine if the detector coordinate zd,yd lies within the detector bounds. @@ -267,7 +346,8 @@ def contains(self, zd, yd): (:obj:`boolean`) True if the zd,yd is within the detector bounds. """ - return zd >= 0 and zd <= self.zmax and yd >= 0 and yd <= self.ymax + + return (zd >= 0) & (zd <= self.zmax) & (yd >= 0) & (yd <= self.ymax) def project(self, scattering_unit, box): """Compute parametric projection of scattering region unto detector. @@ -282,24 +362,27 @@ def project(self, scattering_unit, box): """ - ray_points = self.pixel_coordinates[box[0]:box[1], box[2]:box[3], :].reshape( - (box[1] - box[0]) * (box[3] - box[2]), 3) + ray_points = self.pixel_coordinates[ + box[0] : box[1], box[2] : box[3], : + ].reshape((box[1] - box[0]) * (box[3] - box[2]), 3) plane_normals = scattering_unit.convex_hull.equations[:, 0:3] plane_ofsets = scattering_unit.convex_hull.equations[:, 3].reshape( - scattering_unit.convex_hull.equations.shape[0], 1) + scattering_unit.convex_hull.equations.shape[0], 1 + ) plane_points = -np.multiply(plane_ofsets, plane_normals) ray_points = np.ascontiguousarray(ray_points) ray_direction = np.ascontiguousarray( - scattering_unit.scattered_wave_vector / - np.linalg.norm( - scattering_unit.scattered_wave_vector)) + scattering_unit.scattered_wave_vector + / np.linalg.norm(scattering_unit.scattered_wave_vector) + ) plane_points = np.ascontiguousarray(plane_points) plane_normals = np.ascontiguousarray(plane_normals) clip_lengths = utils._clip_line_with_convex_polyhedron( - ray_points, ray_direction, plane_points, plane_normals) + ray_points, ray_direction, plane_points, plane_normals + ) clip_lengths = clip_lengths.reshape(box[1] - box[0], box[3] - box[2]) return clip_lengths @@ -316,18 +399,24 @@ def get_wrapping_cone(self, k, source_point): which scattering will systematically miss the detector. """ - fourth_corner_of_detector = self.det_corner_2 + (self.det_corner_1 - self.det_corner_0[:]) + fourth_corner_of_detector = self.det_corner_2 + ( + self.det_corner_1 - self.det_corner_0[:] + ) geom_mat = np.zeros((3, 4)) for i, det_corner in enumerate( - [self.det_corner_0, self.det_corner_1, self.det_corner_2, fourth_corner_of_detector]): + [ + self.det_corner_0, + self.det_corner_1, + self.det_corner_2, + fourth_corner_of_detector, + ] + ): geom_mat[:, i] = det_corner - source_point - normalised_local_coord_geom_mat = geom_mat / \ - np.linalg.norm(geom_mat, axis=0) + normalised_local_coord_geom_mat = geom_mat / np.linalg.norm(geom_mat, axis=0) cone_opening = np.arccos( - np.dot( - normalised_local_coord_geom_mat.T, - k / np.linalg.norm(k))) # These are two time Bragg angles - return np.max(cone_opening) / 2. + np.dot(normalised_local_coord_geom_mat.T, k / np.linalg.norm(k)) + ) # These are two time Bragg angles + return np.max(cone_opening) / 2.0 def save(self, path): """Save the detector object to disc (via pickling). @@ -356,54 +445,58 @@ def load(cls, path): """ if not path.endswith(".det"): raise ValueError("The loaded motion file must end with .det") - with open(path, 'rb') as f: + with open(path, "rb") as f: return dill.load(f) def _get_point_spread_function_kernel(self): - """Render the point_spread_function onto a grid of shape specified by point_spread_kernel_shape. - """ + """Render the point_spread_function onto a grid of shape specified by point_spread_kernel_shape.""" sz, sy = self.point_spread_kernel_shape - axz = np.linspace(-(sz - 1) / 2., (sz - 1) / 2., sz) - axy = np.linspace(-(sy - 1) / 2., (sy - 1) / 2., sy) - Z, Y = np.meshgrid(axz, axy, indexing='ij') + axz = np.linspace(-(sz - 1) / 2.0, (sz - 1) / 2.0, sz) + axy = np.linspace(-(sy - 1) / 2.0, (sy - 1) / 2.0, sy) + Z, Y = np.meshgrid(axz, axy, indexing="ij") kernel = np.zeros(self.point_spread_kernel_shape) for i in range(Z.shape[0]): for j in range(Y.shape[1]): kernel[i, j] = self.point_spread_function(Z[i, j], Y[i, j]) - assert len( kernel[kernel<0] )==0, "Point spread function must be strictly positive, but negative values were found." - assert np.sum(kernel)>1e-8, "The integrated value of the point spread function over the defined kernel domain is close to zero." + assert ( + len(kernel[kernel < 0]) == 0 + ), "Point spread function must be strictly positive, but negative values were found." + assert ( + np.sum(kernel) > 1e-8 + ), "The integrated value of the point spread function over the defined kernel domain is close to zero." return kernel / np.sum(kernel) def _get_pixel_coordinates(self): zds = np.arange(0, self.zmax, self.pixel_size_z) yds = np.arange(0, self.ymax, self.pixel_size_y) - Z, Y = np.meshgrid(zds, yds, indexing='ij') + Z, Y = np.meshgrid(zds, yds, indexing="ij") Zds = np.zeros((len(zds), len(yds), 3)) Yds = np.zeros((len(zds), len(yds), 3)) for i in range(3): - Zds[:,:,i] = Z - Yds[:,:,i] = Y - pixel_coordinates = self.det_corner_0.reshape(1,1,3) + Zds*self.zdhat.reshape(1,1,3) + Yds*self.ydhat.reshape(1,1,3) + Zds[:, :, i] = Z + Yds[:, :, i] = Y + pixel_coordinates = ( + self.det_corner_0.reshape(1, 1, 3) + + Zds * self.zdhat.reshape(1, 1, 3) + + Yds * self.ydhat.reshape(1, 1, 3) + ) return pixel_coordinates def _centroid_render( - self, - scattering_unit, - frame, - lorentz, - polarization, - structure_factor): + self, scattering_unit, frame, lorentz, polarization, structure_factor + ): """Simple deposit of intensity for each scattering_unit onto the detector by tracing a line from the sample scattering region centroid to the detector plane. The intensity is deposited into a single detector pixel regardless of the geometrical shape of the scattering_unit. """ - zd, yd = self.get_intersection( - scattering_unit.scattered_wave_vector, scattering_unit.centroid) + zd, yd = scattering_unit.zd, scattering_unit.yd + if self.contains(zd, yd): intensity_scaling_factor = self._get_intensity_factor( - scattering_unit, lorentz, polarization, structure_factor) + scattering_unit, lorentz, polarization, structure_factor + ) row, col = self._detector_coordinate_to_pixel_index(zd, yd) if np.isinf(intensity_scaling_factor): frame[row, col] += np.inf @@ -411,12 +504,8 @@ def _centroid_render( frame[row, col] += scattering_unit.volume * intensity_scaling_factor def _centroid_render_with_scintillator( - self, - scattering_unit, - frame, - lorentz, - polarization, - structure_factor): + self, scattering_unit, frame, lorentz, polarization, structure_factor + ): """Simple deposit of intensity for each scattering_unit onto the detector by tracing a line from the sample scattering region centroid to the detector plane. The intensity is deposited by placing the detector point spread function at the hit location and rendering it unto the detector grid. @@ -425,39 +514,38 @@ def _centroid_render_with_scintillator( step using convolution. Here the point spread is simulated to take place in the scintillator, before reaching the chip. """ - zd, yd = self.get_intersection( - scattering_unit.scattered_wave_vector, scattering_unit.centroid) + zd, yd = scattering_unit.zd, scattering_unit.yd if self.contains(zd, yd): intensity_scaling_factor = self._get_intensity_factor( - scattering_unit, lorentz, polarization, structure_factor) + scattering_unit, lorentz, polarization, structure_factor + ) a, b = self.point_spread_kernel_shape row, col = self._detector_coordinate_to_pixel_index(zd, yd) zd_in_pixels = zd / self.pixel_size_z yd_in_pixels = yd / self.pixel_size_y - rl, rh = row - a//2 - 1, row + a//2 + 1 - cl, ch = col - b//2 - 1, col + b//2 + 1 - rl, rh = np.max([rl, 0]), np.min([rh, self.pixel_coordinates.shape[0]-1]) - cl, ch = np.max([cl, 0]), np.min([ch, self.pixel_coordinates.shape[1]-1]) - zg = np.linspace(rl, rh, rh-rl+1 ) + 0.5 # pixel centre coordinates in z - yg = np.linspace(cl, ch, ch-cl+1 ) + 0.5 # pixel centre coordinates in y - Z, Y = np.meshgrid(zg, yg, indexing='ij') - drifted_kernel = self.point_spread_function(Z-zd_in_pixels, Y-yd_in_pixels) + rl, rh = row - a // 2 - 1, row + a // 2 + 1 + cl, ch = col - b // 2 - 1, col + b // 2 + 1 + rl, rh = np.max([rl, 0]), np.min([rh, self.pixel_coordinates.shape[0] - 1]) + cl, ch = np.max([cl, 0]), np.min([ch, self.pixel_coordinates.shape[1] - 1]) + zg = np.linspace(rl, rh, rh - rl + 1) + 0.5 # pixel centre coordinates in z + yg = np.linspace(cl, ch, ch - cl + 1) + 0.5 # pixel centre coordinates in y + Z, Y = np.meshgrid(zg, yg, indexing="ij") + drifted_kernel = self.point_spread_function( + Z - zd_in_pixels, Y - yd_in_pixels + ) drifted_kernel = drifted_kernel / np.sum(drifted_kernel) if np.isinf(intensity_scaling_factor): - frame[rl:rh+1, cl:ch+1] = np.inf + frame[rl : rh + 1, cl : ch + 1] = np.inf else: - frame[rl:rh+1, cl:ch+1] += scattering_unit.volume * intensity_scaling_factor * drifted_kernel - + frame[rl : rh + 1, cl : ch + 1] += ( + scattering_unit.volume * intensity_scaling_factor * drifted_kernel + ) def _projection_render( - self, - scattering_unit, - frame, - lorentz, - polarization, - structure_factor): + self, scattering_unit, frame, lorentz, polarization, structure_factor + ): """Raytrace and project the scattering regions onto the detector plane for increased peak shape accuracy. This is generally very computationally expensive compared to the simpler (:obj:`function`):_centroid_render function. @@ -473,26 +561,25 @@ def _projection_render( # i.e the scattering_unit is small in comparison to the detector # pixels. self._centroid_render( - scattering_unit, - frame, - lorentz, - polarization, - structure_factor) + scattering_unit, frame, lorentz, polarization, structure_factor + ) else: intensity_scaling_factor = self._get_intensity_factor( - scattering_unit, lorentz, polarization, structure_factor) + scattering_unit, lorentz, polarization, structure_factor + ) if np.isinf(intensity_scaling_factor): - frame[box[0]:box[1], box[2]:box[3]] += np.inf + frame[box[0] : box[1], box[2] : box[3]] += np.inf else: - frame[box[0]:box[1], box[2]:box[3]] += projection * \ - intensity_scaling_factor * self.pixel_size_z * self.pixel_size_y + frame[box[0] : box[1], box[2] : box[3]] += ( + projection + * intensity_scaling_factor + * self.pixel_size_z + * self.pixel_size_y + ) def _get_intensity_factor( - self, - scattering_unit, - lorentz, - polarization, - structure_factor): + self, scattering_unit, lorentz, polarization, structure_factor + ): intensity_factor = 1.0 # TODO: Consider solid angle intensity rescaling and air scattering. if lorentz: @@ -501,10 +588,14 @@ def _get_intensity_factor( intensity_factor *= scattering_unit.polarization_factor if structure_factor: if scattering_unit.phase.structure_factors is not None: - intensity_factor *= (scattering_unit.real_structure_factor ** - 2 + scattering_unit.imaginary_structure_factor**2) + intensity_factor *= ( + scattering_unit.real_structure_factor**2 + + scattering_unit.imaginary_structure_factor**2 + ) else: - raise ValueError("Structure factors have not been set, .cif file is required at sample instantiation.") + raise ValueError( + "Structure factors have not been set, .cif file is required at sample instantiation." + ) return intensity_factor @@ -524,31 +615,35 @@ def _get_projected_bounding_box(self, scattering_unit): are within the bounding box. """ - vertices = scattering_unit.convex_hull.points[scattering_unit.convex_hull.vertices] - projected_vertices = np.array([self.get_intersection( - scattering_unit.scattered_wave_vector, v) for v in vertices]) + vertices = scattering_unit.convex_hull.points[ + scattering_unit.convex_hull.vertices + ] + + projected_vertices = self.get_intersection( + scattering_unit.scattered_wave_vector, vertices + ) min_zd, max_zd = np.min(projected_vertices[:, 0]), np.max( - projected_vertices[:, 0]) + projected_vertices[:, 0] + ) min_yd, max_yd = np.min(projected_vertices[:, 1]), np.max( - projected_vertices[:, 1]) + projected_vertices[:, 1] + ) min_zd, max_zd = np.max([min_zd, 0]), np.min([max_zd, self.zmax]) min_yd, max_yd = np.max([min_yd, 0]), np.min([max_yd, self.ymax]) - - if min_zd > max_zd or min_yd > max_yd: return None min_row_indx, min_col_indx = self._detector_coordinate_to_pixel_index( - min_zd, min_yd) + min_zd, min_yd + ) max_row_indx, max_col_indx = self._detector_coordinate_to_pixel_index( - max_zd, max_yd) + max_zd, max_yd + ) - max_row_indx = np.min( - [max_row_indx + 1, int(self.zmax / self.pixel_size_z)]) - max_col_indx = np.min( - [max_col_indx + 1, int(self.ymax / self.pixel_size_y)]) + max_row_indx = np.min([max_row_indx + 1, int(self.zmax / self.pixel_size_z)]) + max_col_indx = np.min([max_col_indx + 1, int(self.ymax / self.pixel_size_y)]) return min_row_indx, max_row_indx, min_col_indx, max_col_indx diff --git a/xrd_simulator/laue.py b/xrd_simulator/laue.py index dbcd2ae..04512ac 100644 --- a/xrd_simulator/laue.py +++ b/xrd_simulator/laue.py @@ -2,8 +2,10 @@ This module is mainly used internally by the :class:`xrd_simulator.polycrystal.Polycrystal`. However, for the advanced user, access to these functions may be of interest. """ + import numpy as np + def get_G(U, B, G_hkl): """Compute the diffraction vector @@ -20,7 +22,8 @@ def get_G(U, B, G_hkl): G (:obj:`numpy array`): Sample coordinate system diffraction vector. (``shape=(3,n)``) """ - return np.dot(np.dot(U, B), G_hkl) + + return np.float32(np.matmul(np.matmul(U, B), G_hkl.T)) def get_bragg_angle(G, wavelength): @@ -46,53 +49,80 @@ def get_sin_theta_and_norm_G(G, wavelength): Returns: sin(Bragg angle) (:obj:`float`): in units of radians and ||G||. + norm_G (:obj:`float`): Norm of the diffraction vector. """ normG = np.linalg.norm(G, axis=0) return normG * wavelength / (4 * np.pi), normG -def find_solutions_to_tangens_half_angle_equation(rho_0, rho_1, rho_2, delta_omega): +def find_solutions_to_tangens_half_angle_equation( + G_0, rho_0_factor, rho_1_factor, rho_2_factor, delta_omega +): """Find all solutions, :obj:`t`, to the equation (maximum 2 solutions exists) - .. math:: - \\rho_0 \\cos(t \\Delta \\omega) + \\rho_1 \\sin(t \\Delta \\omega) + \\rho_2 = 0. \\quad\\quad (1) + .. math:: + \\rho_0 \\cos(t \\Delta \\omega) + \\rho_1 \\sin(t \\Delta \\omega) + \\rho_2 = 0. \\quad\\quad (1) - by rewriting as + by rewriting as - .. math:: - (\\rho_2 - \\rho_0) s^2 + 2 \\rho_1 s + (\\rho_0 + \\rho_2) = 0. \\quad\\quad (2) - - where + .. math:: + (\\rho_2 - \\rho_0) s^2 + 2 \\rho_1 s + (\\rho_0 + \\rho_2) = 0. \\quad\\quad (2) - .. math:: - s = \\tan(t \\Delta \\omega / 2). \\quad\\quad (3) + where - and + .. math:: + s = \\tan(t \\Delta \\omega / 2). \\quad\\quad (3) + #Computed in advance to be + G_0: The non-rotated scattering vectors for all tetrahedra of a given phase. dimensions --> (tetrahedra,coordinates,hkl_planes) + \\rho_0_factor,\\rho_1_factor,\\rho_2_factor (:obj:`float`): Factors to compute the \\rho_0,\\rho_1 and \\rho_2 of equation (1). + delta_omega (:obj:`float`): Radians of rotation. - .. math:: \\Delta \\omega + Returns: + (:obj:`tuple` of :obj:`numpy.array`): A tuple containing two numpy arrays: + - indices: 2D numpy array representing indices for diffraction computation. + - values: 1D numpy array representing values for diffraction computation. - is a rotation angle - - Args: - \\rho_0,\\rho_1,\\rho_2 (:obj:`float`): Coefficients \\rho_0,\\rho_1 and \\rho_2 of equation (1). - delta_omega (:obj:`float`): Radians of rotation. + """ - Returns: - (:obj:`tuple` of :obj:`numpy.array`): 2 Arrays of solutions of matching shape to input. if no solutions exist on - an interval the corresponding instances in the solution array holds np.nan values. + if ( + len(G_0.shape) == 2 + ): # We add an empty dimension first in case it's a single tet G_0 being passed. + G_0 = G_0[np.newaxis, :, :] - """ + rho_0 = np.matmul(rho_0_factor, G_0) + rho_2 = np.matmul(rho_2_factor, G_0) + np.sum((G_0 * G_0), axis=1) / 2.0 denominator = rho_2 - rho_0 - a = np.divide(rho_1, denominator, out=np.full_like(rho_0, np.nan), where=denominator!=0) - b = np.divide(rho_0 + rho_2, denominator, out=np.full_like(rho_0, np.nan), where=denominator!=0) + numerator = rho_2 + rho_0 + del rho_2 + a = np.divide( + np.matmul(rho_1_factor, G_0), + denominator, + out=np.full_like(rho_0, np.nan), + where=denominator != 0, + ) + b = np.divide( + numerator, denominator, out=np.full_like(rho_0, np.nan), where=denominator != 0 + ) + del denominator, numerator, rho_0 rootval = a**2 - b leadingterm = -a - rootval[rootval<0] = np.nan + del a, b + rootval[rootval < 0] = np.nan s1 = leadingterm + np.sqrt(rootval) s2 = leadingterm - np.sqrt(rootval) + del rootval, leadingterm t1 = 2 * np.arctan(s1) / delta_omega + del s1 + indices_t1 = np.array(np.where(np.logical_and(t1 >= 0, t1 <= 1))) + values_t1 = t1[indices_t1[0, :], indices_t1[1, :]] + + del t1 t2 = 2 * np.arctan(s2) / delta_omega - t1[(t1>1)|(t1<0)]=np.nan - t2[(t2>1)|(t2<0)]=np.nan - return t1, t2 + del s2, delta_omega + indices_t2 = np.array(np.where(np.logical_and(t2 >= 0, t2 <= 1))) + values_t2 = t2[indices_t2[0, :], indices_t2[1, :]] + del t2 + return np.concatenate((indices_t1, indices_t2), axis=1), np.concatenate( + (values_t1, values_t2), axis=0 + ) diff --git a/xrd_simulator/mesh.py b/xrd_simulator/mesh.py index 9a07be2..642fde7 100644 --- a/xrd_simulator/mesh.py +++ b/xrd_simulator/mesh.py @@ -15,6 +15,7 @@ Below follows a detailed description of the mesh class attributes and functions. """ + import numpy as np import pygalmesh import meshio @@ -82,10 +83,8 @@ def generate_mesh_from_vertices(cls, coord, enod): @classmethod def generate_mesh_from_levelset( - cls, - level_set, - bounding_radius, - max_cell_circumradius): + cls, level_set, bounding_radius, max_cell_circumradius + ): """Generate a mesh from a level set using `the pygalmesh package`_: .. _the pygalmesh package: https://github.com/nschloe/pygalmesh @@ -105,9 +104,8 @@ def __init__(self): self.get_bounding_sphere_squared_radius = lambda: bounding_radius**2 mesh = pygalmesh.generate_mesh( - LevelSet(), - max_cell_circumradius=max_cell_circumradius, - verbose=False) + LevelSet(), max_cell_circumradius=max_cell_circumradius, verbose=False + ) return cls._build_tetramesh(mesh) @@ -144,17 +142,19 @@ def update(self, rigid_body_motion, time): time (:obj:`float`): Time between [0,1] at which to call the rigid body motion. """ - self._mesh.points = rigid_body_motion(self._mesh.points.T, time=time).T + self._mesh.points = rigid_body_motion(self._mesh.points, time=time) self.coord = np.array(self._mesh.points) - s1,s2,s3 = self.enormals.shape - self.enormals = self.enormals.reshape(s1*s2, 3) - self.enormals = rigid_body_motion.rotate( self.enormals.T, time=time).T + s1, s2, s3 = self.enormals.shape + self.enormals = self.enormals.reshape(s1 * s2, 3) + + self.enormals = rigid_body_motion.rotate(self.enormals, time=time) self.enormals = self.enormals.reshape(s1, s2, s3) - self.ecentroids = rigid_body_motion(self.ecentroids.T, time=time).T - self.espherecentroids = rigid_body_motion(self.espherecentroids.T, time=time).T - self.centroid = rigid_body_motion(self.centroid.reshape(3,1), time=time).reshape(3,) + self.ecentroids = rigid_body_motion(self.ecentroids, time=time) + self.espherecentroids = rigid_body_motion(self.espherecentroids, time=time) + + self.centroid = rigid_body_motion(self.centroid.reshape(1, 3), time=time)[0] def save(self, file, element_data=None): """Save the tetra mesh to .xdmf paraview readable format for visualization. @@ -174,8 +174,8 @@ def save(self, file, element_data=None): element_data[key] = [list(element_data[key])] meshio.write_points_cells( - save_path, self.coord, [ - ("tetra", self.enod)], cell_data=element_data) + save_path, self.coord, [("tetra", self.enod)], cell_data=element_data + ) @classmethod def load(cls, path): @@ -199,95 +199,143 @@ def _build_tetramesh(cls, mesh): return tetmesh def _compute_mesh_faces(self, enod): - """Compute all element faces nodal numbers. - """ - efaces = np.zeros((enod.shape[0], 4, 3), dtype=int) - for i in range(enod.shape[0]): - # nodal combinations defining 4 unique planes in a tet. - permutations = [[0, 1, 2], [0, 1, 3], [0, 2, 3], [1, 2, 3]] - for j, perm in enumerate(permutations): - efaces[i, j, :] = enod[i, perm] + """Compute all element faces nodal numbers. We create a matrix of all possible permutations and then we index the enod matrix.""" + permutations = np.array([[0, 1, 2], [0, 1, 3], [0, 2, 3], [1, 2, 3]]) + efaces = enod[:, permutations] return efaces def _compute_mesh_normals(self, coord, enod, efaces): - """Compute all element faces outwards unit vector normals. - """ - enormals = np.zeros((enod.shape[0], 4, 3)) - for i in range(enod.shape[0]): - ec = coord[enod[i, :], :] - ecentroid = np.mean(ec, axis=0) - for j in range(efaces.shape[1]): - ef = coord[efaces[i, j, :], :] - enormals[i, j, :] = self._compute_plane_normal(ef, ecentroid) - return enormals + """Compute all element faces outwards unit vector normals.""" + vertices = coord[efaces] + normals = np.cross( + vertices[:, :, 1, :] - vertices[:, :, 0, :], + vertices[:, :, 2, :] - vertices[:, :, 0, :], + ) + faces_centers = np.mean(vertices, axis=2) + centroids = np.mean(faces_centers, axis=1) + centroid_to_face = faces_centers - centroids[:, np.newaxis, :] + signs = np.sum(centroid_to_face * normals, axis=-1) + signs[signs >= 0] = 1 + signs[signs < 0] = -1 + enormals = ( + normals + / np.linalg.norm(normals, axis=2, keepdims=True) + * signs[:, :, np.newaxis] + ) - def _compute_plane_normal(self, points, centroid): - """Compute plane normal (outwards refering to centroid). - """ - v1 = points[1, :] - points[0, :] - v2 = points[2, :] - points[0, :] - # define a vector perpendicular to the plane. - n = np.cross(v1, v2) - # set vector direction outwards from centroid. - n = n * np.sign(n.dot(points[0, :] - centroid)) - # normalised vector and return. - return n / np.linalg.norm(n) + return enormals def _compute_mesh_centroids(self, coord, enod): - """Compute centroids of elements. - """ - ecentroids = np.zeros((enod.shape[0], 3)) - for i in range(enod.shape[0]): - ec = coord[enod[i, :], :] - ecentroids[i, :] = np.sum(ec, axis=0) / ec.shape[0] + """Compute centroids of elements.""" + ecentroids = np.mean(coord[enod], axis=1) return ecentroids def _compute_mesh_volumes(self, enod, coord): - """Compute per element enclosed volume. - """ - evolumes = np.zeros((enod.shape[0],)) - for i in range(enod.shape[0]): - ec = coord[enod[i, :], :] - a = ec[1] - ec[0] - b = ec[2] - ec[0] - c = ec[3] - ec[0] - evolumes[i] = (1 / 6.) * np.dot(np.cross(a, b), c) + """Compute per element enclosed volume.""" + vertices = coord[enod] + a = vertices[:, 1, :] - vertices[:, 0, :] + b = vertices[:, 2, :] - vertices[:, 0, :] + c = vertices[:, 3, :] - vertices[:, 0, :] + evolumes = (1 / 6.0) * np.sum((np.cross(a, b, axis=1) * c), axis=1) return evolumes def _compute_mesh_spheres(self, coord, enod): - """Compute per element minimal bounding spheres. + """Compute per tetrahedron minimal bounding spheres. The approach here described avoids any iterative process, + solving in a vectorized manner all the spheres at once. + + First the minimal sphere for the two most distant vertices of every tetrahedron is calculated. + Then the minimal sphere for the three most distant vertices is calculated. + Finally the sphere containing the 4 vertices in their surface is calculated. + + The first of the three spheres that satisfies all vertices of each tetrahedron being contained in it + is selected. + + coord (coordinates) dimensions --> (triangle,vertices,xyz) + enod (ids of triangles) dimensions --> (tetrahedron,faces) """ - eradius = np.zeros((enod.shape[0],)) - espherecentroids = np.zeros((enod.shape[0], 3)) - for i in range(enod.shape[0]): - ec = coord[enod[i, :], :] - espherecentroids[i], eradius[i] = utils._get_bounding_ball(ec) + vertices = coord[enod] + n_tetra = enod.shape[0] + range_n_tetra = range(n_tetra) + pairs = np.array([[0, 1], [0, 2], [0, 3], [1, 2], [1, 3], [2, 3]]) + all_pairs = np.tile([0, 1, 2, 3], (n_tetra, 1)) + + # We compute the length of every tetrahedron side + sides = utils._compute_sides(vertices) + + # We compute the vertices that are further apart + max_sides_index = np.argmax(sides, axis=1) + furthest2_indices = pairs[max_sides_index] + + # We compute the other 2 vertices of the tetrahedra + mask = np.zeros((n_tetra, 4), dtype=bool) + mask[range_n_tetra, furthest2_indices[:, 0]] = 1 + mask[range_n_tetra, furthest2_indices[:, 1]] = 1 + other2_indices = all_pairs[~mask].reshape(-1, 2) + + # We compute the smallest spheres that pass through the farthest-apart vertices and check if the other-2 vertices fall in + furthest2_vertices = vertices.transpose(1, 0, 2)[ + furthest2_indices.transpose(1, 0), range_n_tetra + ].transpose(1, 0, 2) + other2_vertices = vertices.transpose(1, 0, 2)[ + other2_indices.transpose(1, 0), range_n_tetra + ].transpose(1, 0, 2) + centers_1D, radii_1D = utils._circumsphere_of_segments(furthest2_vertices) + dist_centers_1D_to_vertices = np.linalg.norm( + other2_vertices - centers_1D[:, np.newaxis, :], axis=2 + ) + spheres_solved_with_1D = np.all( + (dist_centers_1D_to_vertices - radii_1D[:, np.newaxis]) <= 0, axis=1 + ) + + # We compute the smallest spheres that pass through the farthest 3 vertices and check if the remaining vertex falls in + next_furthest_index = np.argmax(dist_centers_1D_to_vertices, axis=1) + third_vertex = other2_vertices.transpose(1, 0, 2)[ + next_furthest_index, range_n_tetra + ] + largest_triangles = np.append( + furthest2_vertices, third_vertex[:, np.newaxis, :], axis=1 + ) + centers_2D, radii_2D = utils._circumsphere_of_triangles(largest_triangles) + dist_centers_2D_to_vertices = np.linalg.norm( + vertices - centers_2D[:, np.newaxis, :], axis=2 + ) + spheres_solved_with_2D = np.all( + (dist_centers_2D_to_vertices - radii_2D[:, np.newaxis]) <= 0, axis=1 + ) + + # Finally we compute the spheres that contain the 4 points on the surfaces for the remaining tetrahedrons + centers_3D, radii_3D = utils._circumsphere_of_tetrahedrons(vertices) + + # We select the 1D case if possible, otherwise the 2D case, and finally the 3D case + espherecentroids = np.where( + spheres_solved_with_1D[:, np.newaxis], + centers_1D, + np.where(spheres_solved_with_2D[:, np.newaxis], centers_2D, centers_3D), + ) + eradius = np.where( + spheres_solved_with_1D, + radii_1D, + np.where(spheres_solved_with_2D, radii_2D, radii_3D), + ) + return eradius, espherecentroids def _set_fem_matrices(self): - """Extract and set mesh FEM matrices from pygalmesh object. - """ + """Extract and set mesh FEM matrices from pygalmesh object.""" self.coord = np.array(self._mesh.points) - self.enod = np.array(self._mesh.cells_dict['tetra']) - self.dof = np.arange( - 0, - self.coord.shape[0] * - 3).reshape( - self.coord.shape[0], - 3) + self.enod = np.array(self._mesh.cells_dict["tetra"]) + self.dof = np.arange(0, self.coord.shape[0] * 3).reshape(self.coord.shape[0], 3) self.number_of_elements = self.enod.shape[0] def _expand_mesh_data(self): - """Compute extended mesh quantities such as element faces and normals. - """ + """Compute extended mesh quantities such as element faces and normals.""" self.efaces = self._compute_mesh_faces(self.enod) - self.enormals = self._compute_mesh_normals( - self.coord, self.enod, self.efaces) + self.enormals = self._compute_mesh_normals(self.coord, self.enod, self.efaces) self.ecentroids = self._compute_mesh_centroids(self.coord, self.enod) self.eradius, self.espherecentroids = self._compute_mesh_spheres( - self.coord, self.enod) + self.coord, self.enod + ) self.centroid = np.mean(self.ecentroids, axis=0) self.evolumes = self._compute_mesh_volumes(self.enod, self.coord) # TODO: considering leveraging this in beam.py for speed - # self.econvexhulls = [ ConvexHull( self.coord[nodes,:] ) for nodes in self.enod ] diff --git a/xrd_simulator/motion.py b/xrd_simulator/motion.py index 5829f54..02740cc 100644 --- a/xrd_simulator/motion.py +++ b/xrd_simulator/motion.py @@ -65,18 +65,33 @@ def __call__(self, vectors, time): Transformed vectors (:obj:`numpy array`) of ``shape=(3,N)``. """ - assert time <= 1 and time >= 0, "The rigid body motion is only valid on the interval time=[0,1]" - if len(vectors.shape) > 1: - translation = self.translation.reshape(3, 1) - origin = self.origin.reshape(3, 1) - else: + #assert time <= 1 and time >= 0, "The rigid body motion is only valid on the interval time=[0,1]" + + if len(vectors.shape) == 1: translation = self.translation origin = self.origin + centered_vectors = vectors - origin + centered_rotated_vectors = self.rotator(centered_vectors, self.rotation_angle * time) + rotated_vectors = centered_rotated_vectors + origin + return np.squeeze(rotated_vectors + translation * time) - centered_vectors = vectors - origin - centered_rotated_vectors = self.rotator(centered_vectors, self.rotation_angle * time) - rotated_vectors = centered_rotated_vectors + origin - return rotated_vectors + translation * time + elif len(vectors.shape) == 2: + translation = self.translation.reshape(1,3) + origin = self.origin.reshape(1,3) + centered_vectors = vectors - origin + centered_rotated_vectors = self.rotator(centered_vectors, self.rotation_angle * time) + rotated_vectors = centered_rotated_vectors + origin + if np.isscalar(time): + return rotated_vectors + translation * time + return np.squeeze(rotated_vectors + translation * np.array(time)[:,np.newaxis]) + + elif len(vectors.shape) == 3: + translation = self.translation.reshape(1,3) + origin = self.origin.reshape(1,3) + centered_vectors = vectors - origin + centered_rotated_vectors = self.rotator(centered_vectors.reshape(-1,3), self.rotation_angle * np.tile(time,(4,1)).T.reshape(-1)).reshape(-1,4,3) + rotated_vectors = centered_rotated_vectors + origin + return np.squeeze(rotated_vectors + translation * np.array(time)[:,np.newaxis,np.newaxis]) def rotate(self, vectors, time): """Find the rotational transformation of a set of vectors at a prescribed time. @@ -94,7 +109,7 @@ def rotate(self, vectors, time): Transformed vectors (:obj:`numpy array`) of ``shape=(3,N)``. """ - assert time <= 1 and time >= 0, "The rigid body motion is only valid on the interval time=[0,1]" + #assert time <= 1 and time >= 0, "The rigid body motion is only valid on the interval time=[0,1]" rotated_vectors = self.rotator(vectors, self.rotation_angle * time) return rotated_vectors @@ -183,8 +198,7 @@ def __init__(self, rotation_axis): self.K2 = self.K.dot(self.K) def get_rotation_matrix(self, rotation_angle): - return np.eye(3, 3) + np.sin(rotation_angle) * self.K + \ - (1 - np.cos(rotation_angle)) * self.K2 + return np.squeeze((np.eye(3, 3)[:,:,np.newaxis] + np.sin(rotation_angle) * self.K[:,:,np.newaxis] + (1 - np.cos(rotation_angle)) * self.K2[:,:,np.newaxis]).transpose(2,0,1)) def __call__(self, vectors, rotation_angle): """Rotate a vector in the plane described by v1 and v2 towards v2 a fraction s=[0,1]. @@ -197,5 +211,10 @@ def __call__(self, vectors, rotation_angle): Rotated vectors (:obj:`numpy array`) of ``shape=(3,N)``. """ + R = self.get_rotation_matrix(rotation_angle) - return R.dot(vectors) + + if len(vectors.shape)==1: + vectors = vectors[np.newaxis,:] + + return np.matmul(R,vectors[:,:,np.newaxis])[:,:,0] # Syntax valid for the rotation fo the G vectors from the grains diff --git a/xrd_simulator/phase.py b/xrd_simulator/phase.py index 1c6e624..86323ce 100644 --- a/xrd_simulator/phase.py +++ b/xrd_simulator/phase.py @@ -74,6 +74,7 @@ def setup_diffracting_planes( NOTE: This function will skip Miller indices that have a zero intensity due to the unit cell structure factor vanishing, i.e forbidden reflections, such as a 100 in an fcc for instance, will not be included. """ + sintlmin = np.sin(min_bragg_angle) / wavelength sintlmax = np.sin(max_bragg_angle) / wavelength self.miller_indices = tools.genhkl_all( diff --git a/xrd_simulator/polycrystal.py b/xrd_simulator/polycrystal.py index 500355a..4034247 100644 --- a/xrd_simulator/polycrystal.py +++ b/xrd_simulator/polycrystal.py @@ -9,112 +9,225 @@ Below follows a detailed description of the polycrystal class attributes and functions. """ + +import copy +from multiprocessing import Pool import numpy as np from scipy.spatial import ConvexHull +import pandas as pd import dill -import copy from xfab import tools from xrd_simulator.scattering_unit import ScatteringUnit from xrd_simulator import utils, laue -from multiprocessing import Pool def _diffract(dict): + """ + Compute diffraction for a subset of the polycrystal. + Args: + args (dict): A dictionary containing the following keys: + - 'beam' (Beam): Object representing the incident X-ray beam. + - 'detector' (Detector): Object representing the X-ray detector. + - 'rigid_body_motion' (RigidBodyMotion): Object describing the polycrystal's transformation. + - 'phases' (list): List of Phase objects representing the phases present in the polycrystal. + - 'espherecentroids' (numpy.ndarray): Array containing the centroids of the scattering elements. + - 'eradius' (numpy.ndarray): Array containing the radii of the scattering elements. + - 'orientation_lab' (numpy.ndarray): Array containing orientation matrices in laboratory coordinates. + - 'eB' (numpy.ndarray): Array containing per-element 3x3 B matrices mapping hkl values to crystal coordinates. + - 'element_phase_map' (numpy.ndarray): Array mapping elements to phases. + - 'ecoord' (numpy.ndarray): Array containing coordinates of the scattering elements. + - 'verbose' (bool): Flag indicating whether to print progress. + - 'proximity' (bool): Flag indicating whether to remove grains unlikely to be hit by the beam. + - 'BB_intersection' (bool): Flag indicating whether to use Bounding-Box intersection for speed. + + Returns: + list: A list of ScatteringUnit objects representing diffraction events. + """ - beam = dict['beam'] - detector = dict['detector'] - rigid_body_motion = dict['rigid_body_motion'] - phases = dict['phases'] - espherecentroids = dict['espherecentroids'] - eradius = dict['eradius'] - orientation_lab = dict['orientation_lab'] - eB = dict['eB'] - element_phase_map = dict['element_phase_map'] - ecoord = dict['ecoord'] - verbose = dict['verbose'] - number_of_elements = ecoord.shape[0] - - rho_0_factor = -beam.wave_vector.dot(rigid_body_motion.rotator.K2) - rho_1_factor = beam.wave_vector.dot(rigid_body_motion.rotator.K) - rho_2_factor = beam.wave_vector.dot( - np.eye(3, 3) + rigid_body_motion.rotator.K2) - + beam = dict["beam"] + detector = dict["detector"] + rigid_body_motion = dict["rigid_body_motion"] + phases = dict["phases"] + espherecentroids = dict["espherecentroids"] + eradius = dict["eradius"] + orientation_lab = dict["orientation_lab"] + eB = dict["eB"] + element_phase_map = dict["element_phase_map"] + ecoord = dict["ecoord"] + verbose = dict[ + "verbose" + ] # should be deprecated or repurposed since computation now takes place phase by phase not by individual scatterer + proximity = dict["proximity"] + BB_intersection = dict["BB_intersection"] + number_of_elements = ecoord.shape[0] + + rho_0_factor = np.float32(-beam.wave_vector.dot(rigid_body_motion.rotator.K2)) + rho_1_factor = np.float32(beam.wave_vector.dot(rigid_body_motion.rotator.K)) + rho_2_factor = np.float32( + beam.wave_vector.dot(np.eye(3, 3) + rigid_body_motion.rotator.K2) + ) + + if proximity: + # Grains with no chance to be hit by the beam are removed beforehand, if proximity is toggled as True + proximity_intervals = beam._get_proximity_intervals( + espherecentroids, eradius, rigid_body_motion + ) + possible_scatterers_mask = np.array( + [pi[0] is not None for pi in proximity_intervals] + ) + espherecentroids = np.float32(espherecentroids[possible_scatterers_mask]) + eradius = np.float32(eradius[possible_scatterers_mask]) + + orientation_lab = np.float32(orientation_lab[possible_scatterers_mask]) + eB = np.float32(eB[possible_scatterers_mask]) + element_phase_map = element_phase_map[possible_scatterers_mask] + ecoord = np.float32(ecoord[possible_scatterers_mask]) + + reflections_df = ( + pd.DataFrame() + ) # We create a dataframe to store all the relevant values for each individual reflection inr an organized manner + scattering_units = [] # The output + + # For each phase of the sample, we compute all reflections at once in a vectorized manner + for i, phase in enumerate(phases): + # Get all scatterers belonging to one phase at a time, and the corresponding miller indices. + grain_index = np.where(element_phase_map == i)[0] + miller_indices = np.float32(phase.miller_indices) + # Get all scattering vectors for all scatterers in a given phase + G_0 = laue.get_G(orientation_lab[grain_index], eB[grain_index], miller_indices) + # Now G_0 and rho_factors are sent before computation to save memory when diffracting many grains. + reflection_index, time_values = ( + laue.find_solutions_to_tangens_half_angle_equation( + G_0, + rho_0_factor, + rho_1_factor, + rho_2_factor, + rigid_body_motion.rotation_angle, + ) + ) + G_0_reflected = G_0.transpose(0, 2, 1)[ + reflection_index[0, :], reflection_index[1, :] + ] + + del G_0 + # We now assemble the dataframes with the valid reflections for each grain and phase including time, hkl plane and G vector + + table = pd.DataFrame( + { + "Grain": grain_index[reflection_index[0]], + "phase": i, + "hkl": reflection_index[1], + "time": time_values, + "G_0x": G_0_reflected[:, 0], + "G_0y": G_0_reflected[:, 1], + "G_0z": G_0_reflected[:, 2], + } + ) + + del G_0_reflected, reflection_index, time_values + reflections_df = pd.concat([reflections_df, table], axis=0).sort_values( + by="Grain" + ) + + reflections_df = reflections_df[ + (0 < reflections_df["time"]) & (reflections_df["time"] < 1) + ] # We filter out the times which exceed 0 or 1 + reflections_df[["Gx", "Gy", "Gz"]] = rigid_body_motion.rotate( + reflections_df[["G_0x", "G_0y", "G_0z"]].values, reflections_df["time"].values + ) + reflections_df[["k'x", "k'y", "k'z"]] = ( + reflections_df[["Gx", "Gy", "Gz"]] + beam.wave_vector + ) + reflections_df[["Source_x", "Source_y", "Source_z"]] = rigid_body_motion( + espherecentroids[reflections_df["Grain"]], reflections_df["time"].values + ) + reflections_df[["zd", "yd"]] = detector.get_intersection( + reflections_df[["k'x", "k'y", "k'z"]].values, + reflections_df[["Source_x", "Source_y", "Source_z"]].values, + ) + reflections_df = reflections_df[ + detector.contains(reflections_df["zd"], reflections_df["yd"]) + ] + + element_vertices_0 = ecoord[reflections_df["Grain"]] + element_vertices = rigid_body_motion( + element_vertices_0, reflections_df["time"].values + ) + + reflections_np = ( + reflections_df.values + ) # We move from pandas to numpy for enhanced speed scattering_units = [] - proximity_intervals = beam._get_proximity_intervals( - espherecentroids, - eradius, - rigid_body_motion) - - possible_scatterers = np.sum([pi[0] is not None for pi in proximity_intervals]) - - if verbose: - progress_update_rate = 10**int(len(str(int(number_of_elements/1000.)))-1) - - for ei in range(number_of_elements): - - if verbose and ei % progress_update_rate == 0: - progress_bar_message = "Found " + \ - str(possible_scatterers) + " scatterers (mesh has "+str(number_of_elements)+" elements)" - progress_fraction = float( - ei + 1) / number_of_elements - utils._print_progress( - progress_fraction, - message=progress_bar_message) - - # skip elements not illuminated - if proximity_intervals[ei][0] is None: - continue - - G_0 = laue.get_G(orientation_lab[ei], eB[ei], - phases[element_phase_map[ei]].miller_indices.T) - - rho_0s = rho_0_factor.dot(G_0) - rho_1s = rho_1_factor.dot(G_0) - rho_2s = rho_2_factor.dot(G_0) + np.sum((G_0 * G_0), axis=0) / 2. - - t1, t2 = laue.find_solutions_to_tangens_half_angle_equation(rho_0s, rho_1s, rho_2s, rigid_body_motion.rotation_angle) - - for hkl_indx in range(G_0.shape[1]): - for time in (t1[hkl_indx],t2[hkl_indx]): - if ~np.isnan(time): - - if utils._contained_by_intervals(time, proximity_intervals[ei]): - - G = rigid_body_motion.rotate( - G_0[:, hkl_indx], time) - scattered_wave_vector = G + beam.wave_vector - - source = rigid_body_motion(espherecentroids[ei], time) - zd, yd = detector.get_intersection(scattered_wave_vector, source) - - if detector.contains(zd, yd): - - element_vertices_0 = ecoord[ei] - - element_vertices = rigid_body_motion( - element_vertices_0.T, time).T - - scattering_region = beam.intersect(element_vertices) - - if scattering_region is not None: - scattering_unit = ScatteringUnit(scattering_region, - scattered_wave_vector, - beam.wave_vector, - beam.wavelength, - beam.polarization_vector, - rigid_body_motion.rotation_axis, - time, - phases[element_phase_map[ei]], - hkl_indx, - ei) + if BB_intersection: + # A Bounding-Box intersection is a simplified way of computing the grains that interact with the beam (to enhance speed), + # simply considering the beam as a prism and the tets that interact are those whose centroid is contained in the prism. + + reflections_np = reflections_np[ + reflections_np[:, 14] < (beam.vertices[:, 1].max()) + ] # + reflections_np = reflections_np[ + reflections_np[:, 14] > (beam.vertices[:, 1].min()) + ] # + reflections_np = reflections_np[ + reflections_np[:, 15] < (beam.vertices[:, 2].max()) + ] # + reflections_np = reflections_np[ + reflections_np[:, 15] > (beam.vertices[:, 2].min()) + ] # + + for ei in range(reflections_np.shape[0]): + scattering_unit = ScatteringUnit( + ConvexHull(element_vertices[ei]), + reflections_np[ei, 10:13], # outgoing wavevector + beam.wave_vector, + beam.wavelength, + beam.polarization_vector, + rigid_body_motion.rotation_axis, + reflections_np[ei, 3], # time + phases[reflections_np[ei, 1].astype(int)], # phase + reflections_np[ei, 2].astype(int), # hkl index + ei, + zd=reflections_np[ + ei, 16 + ], # zd saved to avoid recomputing during redering + yd=reflections_np[ei, 17], + ) # yd saved to avoid recomputing during redering) + + scattering_units.append(scattering_unit) + + else: + """Otherwise, compute the true intersection of each tet with the beam to get the true scattering volume.""" + for ei in range(element_vertices.shape[0]): + scattering_region = beam.intersect(element_vertices[ei]) + + if scattering_region is not None: + scattering_unit = ScatteringUnit( + scattering_region, + reflections_np[ei, 10:13], # outgoing wavevector + beam.wave_vector, + beam.wavelength, + beam.polarization_vector, + rigid_body_motion.rotation_axis, + reflections_np[ei, 3], # time + phases[reflections_np[ei, 1].astype(int)], # phase + reflections_np[ei, 2].astype(int), # hkl index + ei, + zd=reflections_np[ + ei, 16 + ], # zd saved to avoid recomputing during redering + yd=reflections_np[ + ei, 17 + ], # yd saved to avoid recomputing during redering + ) + + scattering_units.append(scattering_unit) - scattering_units.append(scattering_unit) return scattering_units -class Polycrystal(): +class Polycrystal: """Represents a multi-phase polycrystal as a tetrahedral mesh where each element can be a single crystal The polycrystal is created in laboratory coordinates. At instantiation it is assumed that the sample and @@ -139,7 +252,12 @@ class Polycrystal(): mesh_lab (:obj:`xrd_simulator.mesh.TetraMesh`): Object representing a tetrahedral mesh which defines the geometry of the sample in a fixed lab frame coordinate system. This quantity is updated when the sample transforms. mesh_sample (:obj:`xrd_simulator.mesh.TetraMesh`): Object representing a tetrahedral mesh which defines the - geometry of the sample in a sample coordinate system. This quantity is not updated when the sample transforms. + geometry of the sample in a sample metadata={'angle':angle, + 'angles':n_angles, + 'steps':n_scans, + 'beam':beam_file, + 'sample':polycrystal_file, + 'detector':detector_file},coordinate system. This quantity is not updated when the sample transforms. orientation_lab (:obj:`numpy array`): Per element orientation matrices mapping from the crystal to the lab coordinate system, this quantity is updated when the sample transforms. (``shape=(N,3,3)``). orientation_sample (:obj:`numpy array`): Per element orientation matrices mapping from the crystal to the sample @@ -154,18 +272,20 @@ class Polycrystal(): """ - def __init__( - self, - mesh, - orientation, - strain, - phases, - element_phase_map=None): + def __init__(self, mesh, orientation, strain, phases, element_phase_map=None): self.orientation_lab = self._instantiate_orientation(orientation, mesh) self.strain_lab = self._instantiate_strain(strain, mesh) - self.element_phase_map, self.phases = self._instantiate_phase(phases, element_phase_map, mesh) - self._eB = self._instantiate_eB(self.orientation_lab, self.strain_lab, self.phases, self.element_phase_map, mesh) + self.element_phase_map, self.phases = self._instantiate_phase( + phases, element_phase_map, mesh + ) + self._eB = self._instantiate_eB( + self.orientation_lab, + self.strain_lab, + self.phases, + self.element_phase_map, + mesh, + ) # Assuming sample and lab frames to be aligned at instantiation. self.mesh_lab = copy.deepcopy(mesh) @@ -174,16 +294,18 @@ def __init__( self.orientation_sample = np.copy(self.orientation_lab) def diffract( - self, - beam, - detector, - rigid_body_motion, - min_bragg_angle=0, - max_bragg_angle=None, - verbose=False, - number_of_processes=1, - number_of_frames=1 - ): + self, + beam, + detector, + rigid_body_motion, + min_bragg_angle=0, + max_bragg_angle=None, + verbose=False, + number_of_processes=1, + number_of_frames=1, + proximity=False, + BB_intersection=False, + ): """Compute diffraction from the rotating and translating polycrystal while illuminated by an xray beam. The xray beam interacts with the polycrystal producing scattering units which are stored in a detector frame. @@ -206,94 +328,114 @@ def diffract( computation. Defaults to 1, i.e a single processes. number_of_frames (:obj:`int`): Optional keyword specifying the number of desired temporally equidistantly spaced frames to be collected. Defaults to 1, which means that the detector reads diffraction during the full rigid body - motion and integrates out the signal to a single frame. The number_of_frames keyword primarily allows for single - rotation axis full 180 dgrs or 360 dgrs sample rotation data sets to be computed rapidly and convinently. + motion and integrates out the signal to a single frame. The number_of_frames keyword primarily allows for single + rotation axis full 180 dgrs or 360 dgrs sample rotation data sets to be computed rapidly and convinently. + proximity (:obj:`bool`): Set to False if all or most grains from the sample are expected to diffract. + For instance, if the diffraction scan illuminates all grains from the sample at least once at a give angle/position. + BB_intersection (:obj:`bool`): Set to True in order to assume the beam as a square prism, the scattering volume for the tetrahedra + to be the whole tetrahedron and the scattering tetrahedra to be all those whose centroids are included in the square prism. + Greatly speeds up computation, valid approximation for powder-like samples. """ - if verbose and number_of_processes!=1: - raise NotImplemented('Verbose mode is not implemented for multiprocesses computations') + if verbose and number_of_processes != 1: + raise NotImplemented( + "Verbose mode is not implemented for multiprocesses computations" + ) min_bragg_angle, max_bragg_angle = self._get_bragg_angle_bounds( - detector, beam, min_bragg_angle, max_bragg_angle) + detector, beam, min_bragg_angle, max_bragg_angle + ) for phase in self.phases: with utils._verbose_manager(verbose): phase.setup_diffracting_planes( - beam.wavelength, - min_bragg_angle, - max_bragg_angle) - - espherecentroids = np.array_split(self.mesh_lab.espherecentroids, number_of_processes, axis=0) - eradius = np.array_split(self.mesh_lab.eradius, number_of_processes, axis=0) - orientation_lab = np.array_split(self.orientation_lab, number_of_processes, axis=0) - eB = np.array_split(self._eB, number_of_processes, axis=0) - element_phase_map = np.array_split(self.element_phase_map, number_of_processes, axis=0) - enod = np.array_split(self.mesh_lab.enod, number_of_processes, axis=0) + beam.wavelength, min_bragg_angle, max_bragg_angle + ) + + espherecentroids = np.array_split( + self.mesh_lab.espherecentroids, number_of_processes, axis=0 + ) + eradius = np.array_split(self.mesh_lab.eradius, number_of_processes, axis=0) + orientation_lab = np.array_split( + self.orientation_lab, number_of_processes, axis=0 + ) + eB = np.array_split(self._eB, number_of_processes, axis=0) + element_phase_map = np.array_split( + self.element_phase_map, number_of_processes, axis=0 + ) + enod = np.array_split(self.mesh_lab.enod, number_of_processes, axis=0) args = [] for i in range(number_of_processes): ecoord = np.zeros((enod[i].shape[0], 4, 3)) - for k,en in enumerate(enod[i]): - ecoord[k,:,:] = self.mesh_lab.coord[en] - args.append( { - 'beam': beam , - 'detector': detector, - 'rigid_body_motion': rigid_body_motion , - 'phases': self.phases, - 'espherecentroids': espherecentroids[i], - 'eradius': eradius[i], - 'orientation_lab': orientation_lab[i], - 'eB': eB[i], - 'element_phase_map': element_phase_map[i], - 'ecoord': ecoord, - 'verbose':verbose - } ) + for k, en in enumerate(enod[i]): + ecoord[k, :, :] = self.mesh_lab.coord[en] + args.append( + { + "beam": beam, + "detector": detector, + "rigid_body_motion": rigid_body_motion, + "phases": self.phases, + "espherecentroids": espherecentroids[i], + "eradius": eradius[i], + "orientation_lab": orientation_lab[i], + "eB": eB[i], + "element_phase_map": element_phase_map[i], + "ecoord": ecoord, + "verbose": verbose, + "proximity": proximity, + "BB_intersection": BB_intersection, + } + ) if number_of_processes == 1: all_scattering_units = _diffract(args[0]) + else: with Pool(number_of_processes) as p: - scattering_units = p.map( _diffract, args ) + scattering_units = p.map(_diffract, args) all_scattering_units = [] for su in scattering_units: all_scattering_units.extend(su) - - if number_of_frames==1: + + if number_of_frames == 1: detector.frames.append(all_scattering_units) else: - #TODO: unit test - all_scattering_units.sort(key=lambda scattering_unit: scattering_unit.time, reverse=True) - dt = 1./number_of_frames + # TODO: unit test + all_scattering_units.sort( + key=lambda scattering_unit: scattering_unit.time, reverse=True + ) + dt = 1.0 / number_of_frames start_time_of_current_frame = 0 - while(start_time_of_current_frame <= 1 - 1e-8): + while start_time_of_current_frame <= 1 - 1e-8: frame = [] - while( len(all_scattering_units)>0 and all_scattering_units[-1].time < start_time_of_current_frame + dt): - frame.append( all_scattering_units.pop() ) + while ( + len(all_scattering_units) > 0 + and all_scattering_units[-1].time < start_time_of_current_frame + dt + ): + frame.append(all_scattering_units.pop()) start_time_of_current_frame += dt - detector.frames.append( frame ) - assert len(all_scattering_units)==0 + detector.frames.append(frame) + assert len(all_scattering_units) == 0 def transform(self, rigid_body_motion, time): """Transform the polycrystal by performing a rigid body motion (translation + rotation) - This function will update the polycrystal mesh (update in lab frame) with any dependent quantities, - such as face normals etc. Likewise, it will update the per element crystal orientation - matrices (U) as well as the lab frame description of strain tensors. - - Args: - rigid_body_motion (:obj:`xrd_simulator.motion.RigidBodyMotion`): Rigid body motion object describing the - polycrystal transformation as a function of time on the domain time=[0,1]. - time (:obj:`float`): Time between [0,1] at which to call the rigid body motion. + This function will update the polycrystal mesh (update in lab frame) with any dependent quantities, + such as face normals etc. Likewise, it will update the per element crystal orientation + matrices (U) as well as the lab frame description of strain tensors. + tt`): Time between [0,1] at which to call the rigid body motion. """ - self.mesh_lab.update(rigid_body_motion, time) Rot_mat = rigid_body_motion.rotator.get_rotation_matrix( - rigid_body_motion.rotation_angle * time) + rigid_body_motion.rotation_angle * time + ) + + self.orientation_lab = np.matmul(Rot_mat, self.orientation_lab) - self.orientation_lab = np.dot(Rot_mat, self.orientation_lab).swapaxes(0,1) - self.strain_lab = np.dot( np.dot(Rot_mat, self.strain_lab), Rot_mat.T ).swapaxes(0,1) + self.strain_lab = np.matmul(np.matmul(Rot_mat, self.strain_lab), Rot_mat.T) def save(self, path, save_mesh_as_xdmf=True): """Save polycrystal to disc (via pickling). @@ -314,33 +456,39 @@ def save(self, path, save_mesh_as_xdmf=True): xdmf_path = path + ".xdmf" else: pickle_path = path - xdmf_path = path.split('.')[0]+ ".xdmf" + xdmf_path = path.split(".")[0] + ".xdmf" with open(pickle_path, "wb") as f: dill.dump(self, f, dill.HIGHEST_PROTOCOL) if save_mesh_as_xdmf: element_data = {} - element_data['Strain Tensor Component xx'] = self.strain_sample[:, 0, 0] - element_data['Strain Tensor Component yy'] = self.strain_sample[:, 1, 1] - element_data['Strain Tensor Component zz'] = self.strain_sample[:, 2, 2] - element_data['Strain Tensor Component xy'] = self.strain_sample[:, 0, 1] - element_data['Strain Tensor Component xz'] = self.strain_sample[:, 0, 2] - element_data['Strain Tensor Component yz'] = self.strain_sample[:, 1, 2] - element_data['Bunge Euler Angle phi_1 [degrees]'] = [] - element_data['Bunge Euler Angle Phi [degrees]'] = [] - element_data['Bunge Euler Angle phi_2 [degrees]'] = [] - element_data['Misorientation from mean orientation [degrees]'] = [] - - misorientations = utils.get_misorientations(self.orientation_sample) + element_data["Strain Tensor Component xx"] = self.strain_sample[:, 0, 0] + element_data["Strain Tensor Component yy"] = self.strain_sample[:, 1, 1] + element_data["Strain Tensor Component zz"] = self.strain_sample[:, 2, 2] + element_data["Strain Tensor Component xy"] = self.strain_sample[:, 0, 1] + element_data["Strain Tensor Component xz"] = self.strain_sample[:, 0, 2] + element_data["Strain Tensor Component yz"] = self.strain_sample[:, 1, 2] + element_data["Bunge Euler Angle phi_1 [degrees]"] = [] + element_data["Bunge Euler Angle Phi [degrees]"] = [] + element_data["Bunge Euler Angle phi_2 [degrees]"] = [] + element_data["Misorientation from mean orientation [degrees]"] = [] + + misorientations = utils._get_misorientations(self.orientation_sample) for U, misorientation in zip(self.orientation_sample, misorientations): phi_1, PHI, phi_2 = tools.u_to_euler(U) - element_data['Bunge Euler Angle phi_1 [degrees]'].append(np.degrees(phi_1)) - element_data['Bunge Euler Angle Phi [degrees]'].append(np.degrees(PHI)) - element_data['Bunge Euler Angle phi_2 [degrees]'].append(np.degrees(phi_2)) - - element_data['Misorientation from mean orientation [degrees]'].append(np.degrees(misorientation)) - - element_data['Material Phase Index'] = self.element_phase_map + element_data["Bunge Euler Angle phi_1 [degrees]"].append( + np.degrees(phi_1) + ) + element_data["Bunge Euler Angle Phi [degrees]"].append(np.degrees(PHI)) + element_data["Bunge Euler Angle phi_2 [degrees]"].append( + np.degrees(phi_2) + ) + + element_data["Misorientation from mean orientation [degrees]"].append( + np.degrees(misorientation) + ) + + element_data["Material Phase Index"] = self.element_phase_map self.mesh_sample.save(xdmf_path, element_data=element_data) @classmethod @@ -358,80 +506,87 @@ def load(cls, path): """ if not path.endswith(".pc"): raise ValueError("The loaded polycrystal file must end with .pc") - with open(path, 'rb') as f: + with open(path, "rb") as f: return dill.load(f) def _instantiate_orientation(self, orientation, mesh): - """Instantiate the orientations using for smart multi shape handling. - - """ - if orientation.shape==(3,3): + """Instantiate the orientations using for smart multi shape handling.""" + if orientation.shape == (3, 3): orientation_lab = np.repeat( - orientation.reshape(1, 3, 3), mesh.number_of_elements, axis=0) - elif orientation.shape==(mesh.number_of_elements,3,3): + orientation.reshape(1, 3, 3), mesh.number_of_elements, axis=0 + ) + elif orientation.shape == (mesh.number_of_elements, 3, 3): orientation_lab = np.copy(orientation) else: raise ValueError("orientation input is of incompatible shape") return orientation_lab def _instantiate_strain(self, strain, mesh): - """Instantiate the strain using for smart multi shape handling. - - """ - if strain.shape==(3,3): - strain_lab = np.repeat(strain.reshape(1, 3, 3), mesh.number_of_elements, axis=0) - elif strain.shape==(mesh.number_of_elements,3,3): + """Instantiate the strain using for smart multi shape handling.""" + if strain.shape == (3, 3): + strain_lab = np.repeat( + strain.reshape(1, 3, 3), mesh.number_of_elements, axis=0 + ) + elif strain.shape == (mesh.number_of_elements, 3, 3): strain_lab = np.copy(strain) else: raise ValueError("strain input is of incompatible shape") return strain_lab def _instantiate_phase(self, phases, element_phase_map, mesh): - """Instantiate the phases using for smart multi shape handling. - - """ + """Instantiate the phases using for smart multi shape handling.""" if not isinstance(phases, list): phases = [phases] if element_phase_map is None: - if len(phases)>1: + if len(phases) > 1: raise ValueError("element_phase_map not set for multiphase polycrystal") element_phase_map = np.zeros((mesh.number_of_elements,), dtype=int) return element_phase_map, phases - def _instantiate_eB(self, orientation_lab, strain_lab, phases, element_phase_map, mesh): + def _instantiate_eB( + self, orientation_lab, strain_lab, phases, element_phase_map, mesh + ): """Compute per element 3x3 B matrices that map hkl (Miller) values to crystal coordinates. - (These are upper triangular matrices such that - G_s = U * B G_hkl - where G_hkl = [h,k,l] lattice plane miller indices and G_s is the sample frame diffraction vectors. - and U are the crystal element orientation matrices.) + (These are upper triangular matrices such that + G_s = U * B G_hkl + where G_hkl = [h,k,l] lattice plane miller indices and G_s is the sample frame diffraction vectors. + and U are the crystal element orientation matrices.) """ _eB = np.zeros((mesh.number_of_elements, 3, 3)) - for i in range(mesh.number_of_elements): - _eB[i,:,:] = utils.lab_strain_to_B_matrix(strain_lab[i,:,:], - orientation_lab[i,:,:], - phases[element_phase_map[i]].unit_cell) + B0s = np.zeros((len(phases), 3, 3)) + for i, phase in enumerate(phases): + B0s[i] = tools.form_b_mat(phase.unit_cell) + grain_indices = np.where(np.array(element_phase_map) == i)[0] + _eB[grain_indices] = utils.lab_strain_to_B_matrix( + strain_lab[grain_indices], orientation_lab[grain_indices], B0s[i] + ) + return _eB - def _get_bragg_angle_bounds( - self, - detector, - beam, - min_bragg_angle, - max_bragg_angle): + def _get_bragg_angle_bounds(self, detector, beam, min_bragg_angle, max_bragg_angle): """Compute a maximum Bragg angle cut of based on the beam sample interection region centroid and detector corners. If the beam graces or misses the sample, the sample centroid is used. """ if max_bragg_angle is None: - mesh_nodes_contained_by_beam = self.mesh_lab.coord[ beam.contains(self.mesh_lab.coord.T), : ] + mesh_nodes_contained_by_beam = self.mesh_lab.coord[ + beam.contains(self.mesh_lab.coord.T), : + ] if mesh_nodes_contained_by_beam.shape[0] != 0: source_point = np.mean(mesh_nodes_contained_by_beam, axis=0) else: source_point = self.mesh_lab.centroid - max_bragg_angle = detector.get_wrapping_cone( - beam.wave_vector, source_point) - assert min_bragg_angle >= 0, "min_bragg_angle must be greater or equal than zero" - assert max_bragg_angle > min_bragg_angle, "max_bragg_angle "+str(np.degrees(max_bragg_angle))+"dgrs must be greater than min_bragg_angle "+str(np.degrees(min_bragg_angle))+"dgrs" + max_bragg_angle = detector.get_wrapping_cone(beam.wave_vector, source_point) + assert ( + min_bragg_angle >= 0 + ), "min_bragg_angle must be greater or equal than zero" + assert max_bragg_angle > min_bragg_angle, ( + "max_bragg_angle " + + str(np.degrees(max_bragg_angle)) + + "dgrs must be greater than min_bragg_angle " + + str(np.degrees(min_bragg_angle)) + + "dgrs" + ) return min_bragg_angle, max_bragg_angle diff --git a/xrd_simulator/scattering_unit.py b/xrd_simulator/scattering_unit.py index 7c9facc..83ccaa9 100644 --- a/xrd_simulator/scattering_unit.py +++ b/xrd_simulator/scattering_unit.py @@ -4,11 +4,11 @@ from the polycrystal. """ + import numpy as np class ScatteringUnit(object): - """Defines a scattering region in space as a single crystal as a convex polyhedra. The scattering unit is described in laboratory coordinates. @@ -38,21 +38,23 @@ class ScatteringUnit(object): hkl_indx (:obj:`int`): Index of Miller index in the `phase.miller_indices` list. element_index (:obj:`int`): Index of mesh tetrahedral element refering to a `xrd_simulator.polycrystal.Polycrystal` object from which the scattering unit originated. - """ def __init__( - self, - convex_hull, - scattered_wave_vector, - incident_wave_vector, - wavelength, - incident_polarization_vector, - rotation_axis, - time, - phase, - hkl_indx, - element_index): + self, + convex_hull, + scattered_wave_vector, + incident_wave_vector, + wavelength, + incident_polarization_vector, + rotation_axis, + time, + phase, + hkl_indx, + element_index, + zd=None, + yd=None, + ): self.convex_hull = convex_hull self.scattered_wave_vector = scattered_wave_vector self.incident_wave_vector = incident_wave_vector @@ -62,6 +64,8 @@ def __init__( self.time = time self.phase = phase self.hkl_indx = hkl_indx + self.zd = zd + self.yd = yd self.element_index = element_index @property @@ -87,38 +91,36 @@ def imaginary_structure_factor(self): @property def lorentz_factor(self): - """Compute the Lorentz intensity factor for a scattering_unit. - """ + """Compute the Lorentz intensity factor for a scattering_unit.""" k = self.incident_wave_vector kp = self.scattered_wave_vector - theta = np.arccos(k.dot(kp) / (np.linalg.norm(k)**2)) / 2. - korthogonal = kp - (k * kp.dot(k) / (np.linalg.norm(k)**2)) + theta = np.arccos(k.dot(kp) / (np.linalg.norm(k) ** 2)) / 2.0 + korthogonal = kp - (k * kp.dot(k) / (np.linalg.norm(k) ** 2)) eta = np.arccos( - self.rotation_axis.dot(korthogonal) / - np.linalg.norm(korthogonal)) + self.rotation_axis.dot(korthogonal) / np.linalg.norm(korthogonal) + ) tol = 0.5 - if np.abs(np.degrees(eta)) < tol or np.abs(np.degrees(eta)) > 180-tol or np.degrees(theta) < tol: + if ( + np.abs(np.degrees(eta)) < tol + or np.abs(np.degrees(eta)) > 180 - tol + or np.degrees(theta) < tol + ): return np.inf else: - return 1. / (np.sin(2 * theta) * np.abs(np.sin(eta))) + return 1.0 / (np.sin(2 * theta) * np.abs(np.sin(eta))) @property def polarization_factor(self): - """Compute the Polarization intensity factor for a scattering_unit. - """ - khatp = self.scattered_wave_vector / \ - np.linalg.norm(self.scattered_wave_vector) - return 1 - np.dot(self.incident_polarization_vector, khatp)**2 + """Compute the Polarization intensity factor for a scattering_unit.""" + khatp = self.scattered_wave_vector / np.linalg.norm(self.scattered_wave_vector) + return 1 - np.dot(self.incident_polarization_vector, khatp) ** 2 @property def centroid(self): - """centroid (:obj:`numpy array`): centroid of the scattering region. ``shape=(3,)`` - """ - return np.mean( - self.convex_hull.points[self.convex_hull.vertices], axis=0) + """centroid (:obj:`numpy array`): centroid of the scattering region. ``shape=(3,)``""" + return np.mean(self.convex_hull.points[self.convex_hull.vertices], axis=0) @property def volume(self): - """volume (:obj:`float`): volume of the scattering region volume - """ + """volume (:obj:`float`): volume of the scattering region volume""" return self.convex_hull.volume diff --git a/xrd_simulator/templates.py b/xrd_simulator/templates.py index e813dd9..37268dd 100644 --- a/xrd_simulator/templates.py +++ b/xrd_simulator/templates.py @@ -2,6 +2,7 @@ worry about any of the "under the hood" scripting. """ + import numpy as np import pygalmesh from scipy.spatial.transform import Rotation @@ -25,7 +26,7 @@ "beam_side_length_z", "beam_side_length_y", "rotation_step", - "rotation_axis" + "rotation_axis", ] @@ -73,9 +74,8 @@ def s3dxrd(parameters): for key in PARAMETER_KEYS: if key not in list(parameters): raise ValueError( - "No keyword " + - key + - " found in the input parameters dictionary") + "No keyword " + key + " found in the input parameters dictionary" + ) detector = _get_detector_from_params(parameters) beam = _get_beam_from_params(parameters) @@ -83,15 +83,18 @@ def s3dxrd(parameters): return beam, detector, motion -def polycrystal_from_odf(orientation_density_function, - number_of_crystals, - sample_bounding_cylinder_height, - sample_bounding_cylinder_radius, - unit_cell, - sgname, - path_to_cif_file=None, - maximum_sampling_bin_seperation=np.radians(5.0), - strain_tensor=lambda x: np.zeros((3, 3))): + +def polycrystal_from_odf( + orientation_density_function, + number_of_crystals, + sample_bounding_cylinder_height, + sample_bounding_cylinder_radius, + unit_cell, + sgname, + path_to_cif_file=None, + maximum_sampling_bin_seperation=np.radians(5.0), + strain_tensor=lambda x: np.zeros((3, 3)), +): """Fill a cylinder with crystals from a given orientation density function. The ``orientation_density_function`` is sampled by discretizing orientation space over the unit @@ -142,22 +145,27 @@ def polycrystal_from_odf(orientation_density_function, """ # Sample topology - volume_per_crystal = np.pi * (sample_bounding_cylinder_radius**2) * \ - sample_bounding_cylinder_height / number_of_crystals - max_cell_circumradius = (3 * volume_per_crystal / (np.pi * 4.))**(1 / 3.) + volume_per_crystal = ( + np.pi + * (sample_bounding_cylinder_radius**2) + * sample_bounding_cylinder_height + / number_of_crystals + ) + max_cell_circumradius = (3 * volume_per_crystal / (np.pi * 4.0)) ** (1 / 3.0) # Fudge factor 2.6 gives approximately number_of_crystals elements in the # mesh max_cell_circumradius = 2.65 * max_cell_circumradius - dz = sample_bounding_cylinder_height / 2. + dz = sample_bounding_cylinder_height / 2.0 R = float(sample_bounding_cylinder_radius) cylinder = pygalmesh.generate_mesh( pygalmesh.Cylinder(-dz, dz, R, max_cell_circumradius), max_cell_circumradius=max_cell_circumradius, max_edge_size_at_feature_edges=max_cell_circumradius, - verbose=False) + verbose=False, + ) mesh = TetraMesh._build_tetramesh(cylinder) @@ -167,9 +175,8 @@ def polycrystal_from_odf(orientation_density_function, # Sample spatial texture orientation = _sample_ODF( - orientation_density_function, - maximum_sampling_bin_seperation, - mesh.ecentroids) + orientation_density_function, maximum_sampling_bin_seperation, mesh.ecentroids + ) # Sample spatial strain strain_lab = np.zeros((mesh.number_of_elements, 3, 3)) @@ -182,15 +189,18 @@ def polycrystal_from_odf(orientation_density_function, orientation, strain=strain_lab, phases=phases, - element_phase_map=element_phase_map) + element_phase_map=element_phase_map, + ) + def get_uniform_powder_sample( - sample_bounding_radius, - number_of_grains, - unit_cell, - sgname, - strain_tensor=np.zeros((3, 3)), - path_to_cif_file=None): + sample_bounding_radius, + number_of_grains, + unit_cell, + sgname, + strain_tensor=np.zeros((3, 3)), + path_to_cif_file=None, +): """Generate a polycyrystal with grains overlayed at the origin and orientations drawn uniformly. Args: @@ -216,9 +226,9 @@ def get_uniform_powder_sample( coord, enod, node_number = [], [], 0 r = sample_bounding_radius for _ in range(number_of_grains): - coord.append([r / np.sqrt(3.), r / np.sqrt(3.), -r / np.sqrt(3.)]) - coord.append([r / np.sqrt(3.), -r / np.sqrt(3.), -r / np.sqrt(3.)]) - coord.append([-r / np.sqrt(2.), 0, -r / np.sqrt(2.)]) + coord.append([r / np.sqrt(3.0), r / np.sqrt(3.0), -r / np.sqrt(3.0)]) + coord.append([r / np.sqrt(3.0), -r / np.sqrt(3.0), -r / np.sqrt(3.0)]) + coord.append([-r / np.sqrt(2.0), 0, -r / np.sqrt(2.0)]) coord.append([0, 0, r]) enod.append(list(range(node_number, node_number + 4))) node_number += 3 @@ -232,53 +242,54 @@ def get_uniform_powder_sample( orientation, strain=strain_tensor, phases=phases, - element_phase_map=element_phase_map) + element_phase_map=element_phase_map, + ) + def _get_motion_from_params(parameters): - """Produce a ``xrd_simulator.motion.RigidBodyMotion`` from the s3dxrd params dictionary. - """ - translation = np.array([0., 0., 0.]) + """Produce a ``xrd_simulator.motion.RigidBodyMotion`` from the s3dxrd params dictionary.""" + translation = np.array([0.0, 0.0, 0.0]) return RigidBodyMotion( - parameters["rotation_axis"], - parameters["rotation_step"], - translation) + parameters["rotation_axis"], parameters["rotation_step"], translation + ) def _get_beam_from_params(parameters): - """Produce a ``xrd_simulator.beam.Beam`` from the s3dxrd params dictionary. - """ - dz = parameters['beam_side_length_z'] / 2. - dy = parameters['beam_side_length_y'] / 2. - beam_vertices = np.array([ - [-parameters['detector_distance'], -dy, -dz], - [-parameters['detector_distance'], dy, -dz], - [-parameters['detector_distance'], -dy, dz], - [-parameters['detector_distance'], dy, dz], - [parameters['detector_distance'], -dy, -dz], - [parameters['detector_distance'], dy, -dz], - [parameters['detector_distance'], -dy, dz], - [parameters['detector_distance'], dy, dz] - ]) + """Produce a ``xrd_simulator.beam.Beam`` from the s3dxrd params dictionary.""" + dz = parameters["beam_side_length_z"] / 2.0 + dy = parameters["beam_side_length_y"] / 2.0 + beam_vertices = np.array( + [ + [-parameters["detector_distance"], -dy, -dz], + [-parameters["detector_distance"], dy, -dz], + [-parameters["detector_distance"], -dy, dz], + [-parameters["detector_distance"], dy, dz], + [parameters["detector_distance"], -dy, -dz], + [parameters["detector_distance"], dy, -dz], + [parameters["detector_distance"], -dy, dz], + [parameters["detector_distance"], dy, dz], + ] + ) beam_direction = np.array([1.0, 0.0, 0.0]) polarization_vector = np.array([0.0, 1.0, 0.0]) return Beam( - beam_vertices, - beam_direction, - parameters['wavelength'], - polarization_vector) + beam_vertices, beam_direction, parameters["wavelength"], polarization_vector + ) def _get_detector_from_params(parameters): - """Produce a ``xrd_simulator.detector.Detector`` from the s3dxrd params dictionary. - """ + """Produce a ``xrd_simulator.detector.Detector`` from the s3dxrd params dictionary.""" - p_z, p_y = parameters['pixel_side_length_z'], parameters['pixel_side_length_y'] - det_dist = parameters['detector_distance'] - det_pix_y = parameters['number_of_detector_pixels_y'] - det_pix_z = parameters['number_of_detector_pixels_z'] - det_cz, det_cy = parameters['detector_center_pixel_z'], parameters['detector_center_pixel_y'] + p_z, p_y = parameters["pixel_side_length_z"], parameters["pixel_side_length_y"] + det_dist = parameters["detector_distance"] + det_pix_y = parameters["number_of_detector_pixels_y"] + det_pix_z = parameters["number_of_detector_pixels_z"] + det_cz, det_cy = ( + parameters["detector_center_pixel_z"], + parameters["detector_center_pixel_y"], + ) d_0 = np.array([det_dist, -det_cy * p_y, -det_cz * p_z]) d_1 = np.array([det_dist, (det_pix_y - det_cy) * p_y, -det_cz * p_z]) @@ -286,49 +297,23 @@ def _get_detector_from_params(parameters): return Detector(p_z, p_y, d_0, d_1, d_2) + def _sample_ODF(ODF, maximum_sampling_bin_seperation, coordinates): - """Draw orientation matrices form an ODF at spatial locations ``coordinates``. - """ + """Draw orientation matrices form an ODF at spatial locations ``coordinates``.""" - dalpha = maximum_sampling_bin_seperation / 2. # TODO: verify this analytically. - dalpha = np.pi / 2. / int(np.pi / (dalpha * 2.)) + dalpha = maximum_sampling_bin_seperation / 2.0 # TODO: verify this analytically. + dalpha = np.pi / 2.0 / int(np.pi / (dalpha * 2.0)) alpha_1 = np.arange( - 0 + - dalpha / - 2. + - 1e-8, - np.pi / - 2. - - dalpha / - 2. - - 1e-8 + - dalpha, - dalpha) + 0 + dalpha / 2.0 + 1e-8, np.pi / 2.0 - dalpha / 2.0 - 1e-8 + dalpha, dalpha + ) alpha_2 = np.arange( - 0 + - dalpha / - 2. + - 1e-8, - np.pi - - dalpha / - 2. - - 1e-8 + - dalpha, - dalpha) + 0 + dalpha / 2.0 + 1e-8, np.pi - dalpha / 2.0 - 1e-8 + dalpha, dalpha + ) alpha_3 = np.arange( - 0 + - dalpha / - 2. + - 1e-8, - 2 * - np.pi - - dalpha / - 2. - - 1e-8 + - dalpha, - dalpha) - - A1, A2, A3 = np.meshgrid(alpha_1, alpha_2, alpha_3, indexing='ij') + 0 + dalpha / 2.0 + 1e-8, 2 * np.pi - dalpha / 2.0 - 1e-8 + dalpha, dalpha + ) + + A1, A2, A3 = np.meshgrid(alpha_1, alpha_2, alpha_3, indexing="ij") A1, A2, A3 = A1.flatten(), A2.flatten(), A3.flatten() q = utils.alpha_to_quarternion(A1, A2, A3) @@ -337,31 +322,29 @@ def _sample_ODF(ODF, maximum_sampling_bin_seperation, coordinates): # volume_element = (np.sin(A1)**2) * np.sin(A2) * (dalpha**3) # Actual volume per bin requires integration as below: - da = dalpha / 2. - a = ((1 / 2.) * (A1 + da - np.sin(A1 + da) * np.cos(A1 + da)) - - (1 / 2.) * (A1 - da - np.sin(A1 - da) * np.cos(A1 - da))) - b = (-np.cos(A2 + da) + np.cos(A2 - da)) - c = ((A3 + da) - (A3 - da)) + da = dalpha / 2.0 + a = (1 / 2.0) * (A1 + da - np.sin(A1 + da) * np.cos(A1 + da)) - (1 / 2.0) * ( + A1 - da - np.sin(A1 - da) * np.cos(A1 - da) + ) + b = -np.cos(A2 + da) + np.cos(A2 - da) + c = (A3 + da) - (A3 - da) volume_element = a * b * c assert np.abs(np.sum(volume_element) - (np.pi**2)) < 1e-5 rotations = [] for x in coordinates: probability = volume_element * ODF(x, q) - assert np.abs(np.sum(probability) - - 1.0) < 0.05, "Orientation density function must be be normalised." + assert ( + np.abs(np.sum(probability) - 1.0) < 0.05 + ), "Orientation density function must be be normalised." # Normalisation is not exact due to the discretization. probability = probability / np.sum(probability) - indices = np.linspace( - 0, - len(probability) - 1, - len(probability)).astype(int) + indices = np.linspace(0, len(probability) - 1, len(probability)).astype(int) draw = np.random.choice(indices, size=1, replace=True, p=probability) a1 = A1[draw] + dalpha * (np.random.rand() - 0.5) a2 = A2[draw] + dalpha * (np.random.rand() - 0.5) a3 = A3[draw] + dalpha * (np.random.rand() - 0.5) q_pertubated = utils.alpha_to_quarternion(a1, a2, a3) - rotations.append(Rotation.from_quat( - q_pertubated).as_matrix().reshape(3, 3)) + rotations.append(Rotation.from_quat(q_pertubated).as_matrix().reshape(3, 3)) return np.array(rotations) diff --git a/xrd_simulator/utils.py b/xrd_simulator/utils.py index f0cfe76..4c09e6b 100644 --- a/xrd_simulator/utils.py +++ b/xrd_simulator/utils.py @@ -1,20 +1,42 @@ -"""General internal package utility functions. +""" +General internal package utility functions. + +This module provides various utility functions used for internal package operations. +The functions include mathematical computations, geometric transformations, file handling, +and progress tracking. + +Functions: + _diffractogram: Compute diffractogram from pixelated diffraction pattern. + _contained_by_intervals: Check if a value is contained within specified intervals. + _cif_open: Open a CIF file using the ReadCif function from the CifFile module. + _print_progress: Print a progress bar in the executing shell terminal. + _clip_line_with_convex_polyhedron: Compute lengths of parallel lines clipped by a convex polyhedron. + alpha_to_quarternion: Generate a unit quaternion from spherical angle coordinates on the S3 ball. + lab_strain_to_B_matrix: Convert strain tensors in lab coordinates to lattice matrices (B matrices). + _get_circumscribed_sphere_centroid: Compute the centroid of a circumscribed sphere for a given set of points. + _get_bounding_ball: Compute a minimal bounding ball for a set of Euclidean points. + _set_xfab_logging: Enable or disable logging for the xfab module. + _verbose_manager: Manage global verbose options for logging within with statements. + _strain_as_tensor: Convert a strain vector to a strain tensor. + _strain_as_vector: Convert a strain tensor to a strain vector. + _b_to_epsilon: Compute strain tensor from B matrix for large deformations. + _epsilon_to_b: Compute B matrix from strain tensor for large deformations. + _get_misorientations: Compute minimal angles required to rotate SO3 elements to their mean orientation. + _compute_sides: Compute the lengths of the sides of tetrahedrons. + _circumsphere_of_segments: Compute the minimum circumsphere of line segments. + _circumsphere_of_triangles: Compute the minimum circumsphere of triangles. + _circumsphere_of_tetrahedrons: Compute the circumcenter of tetrahedrons. +""" -""" # TODO: Move some of these back into their classes where they are used. -import numpy as np import logging -from numba import njit -from xfab import tools -from CifFile import ReadCif from itertools import combinations +from CifFile import ReadCif from scipy.spatial.transform import Rotation +import numpy as np +from numba import njit -def _diffractogram( - diffraction_pattern, - det_centre_z, - det_centre_y, - binsize=1.0): +def _diffractogram(diffraction_pattern, det_centre_z, det_centre_y, binsize=1.0): """Compute diffractogram from pixelated diffraction pattern. Args: @@ -33,10 +55,10 @@ def _diffractogram( m, n = diffraction_pattern.shape max_radius = np.max([m, n]) bin_centres = np.arange(0, int(max_radius + 1), binsize) - histogram = np.zeros((len(bin_centres), )) + histogram = np.zeros((len(bin_centres),)) for i in range(m): for j in range(n): - radius = np.sqrt((i - det_centre_z)**2 + (j - det_centre_y)**2) + radius = np.sqrt((i - det_centre_z) ** 2 + (j - det_centre_y) ** 2) bin_index = np.argmin(np.abs(bin_centres - radius)) histogram[bin_index] += diffraction_pattern[i, j] clip_index = len(histogram) - 1 @@ -48,9 +70,9 @@ def _diffractogram( clip_index = np.min([clip_index + m // 10, len(histogram) - 1]) return bin_centres[0:clip_index], histogram[0:clip_index] + def _contained_by_intervals(value, intervals): - """Assert if a float ``value`` is contained by any of a number of ``intervals``. - """ + """Assert if a float ``value`` is contained by any of a number of ``intervals``.""" for bracket in intervals: if value >= bracket[0] and value <= bracket[1]: return True @@ -58,8 +80,7 @@ def _contained_by_intervals(value, intervals): def _cif_open(cif_file): - """Helper function to be able to use the ``.CIFread`` of ``xfab``. - """ + """Helper function to be able to use the ``.CIFread`` of ``xfab``.""" cif_dict = ReadCif(cif_file) return cif_dict[list(cif_dict.keys())[0]] @@ -72,46 +93,48 @@ def _print_progress(progress_fraction, message): message (:obj:`str`): Optional message prepend the loading bar with. (max 55 characters) """ - assert len( - message) <= 55., "Message to print is too long, max 55 characters allowed." + assert ( + len(message) <= 55.0 + ), "Message to print is too long, max 55 characters allowed." progress_in_precent = np.round(100 * progress_fraction, 1) progress_bar_length = int(progress_fraction * 40) - print("\r{0}{1} |{2}{3}|".format(message, " " * - (55 - - len(message)), "█" * - progress_bar_length, " " * - (40 - - progress_bar_length)) + - " " + - str(progress_in_precent) + - "%", end = '') + print( + "\r{0}{1} |{2}{3}|".format( + message, + " " * (55 - len(message)), + "█" * progress_bar_length, + " " * (40 - progress_bar_length), + ) + + " " + + str(progress_in_precent) + + "%", + end="", + ) if progress_fraction == 1.0: print("") @njit def _clip_line_with_convex_polyhedron( - line_points, - line_direction, - plane_points, - plane_normals): + line_points, line_direction, plane_points, plane_normals +): """Compute lengths of parallel lines clipped by a convex polyhedron defined by 2d planes. - For algorithm description see: Mike Cyrus and Jay Beck. “Generalized two- and three- - dimensional clipping”. (1978) The algorithms is based on solving orthogonal equations and - sorting the resulting plane line interestion points to find which are entry and which are - exit points through the convex polyhedron. + For algorithm description see: Mike Cyrus and Jay Beck. “Generalized two- and three- + dimensional clipping”. (1978) The algorithms is based on solving orthogonal equations and + sorting the resulting plane line interestion points to find which are entry and which are + exit points through the convex polyhedron. - Args: - line_points (:obj:`numpy array`): base points of rays - (exterior to polyhedron), ``shape=(n,3)`` - line_direction (:obj:`numpy array`): normalized ray direction - (all rays have the same direction), ``shape=(3,)`` - plane_points (:obj:`numpy array`): point in each polyhedron face plane, ``shape=(m,3)`` - plane_normals (:obj:`numpy array`): outwards element face normals. ``shape=(m,3)`` + Args: + line_points (:obj:`numpy array`): base points of rays + (exterior to polyhedron), ``shape=(n,3)`` + line_direction (:obj:`numpy array`): normalized ray direction + (all rays have the same direction), ``shape=(3,)`` + plane_points (:obj:`numpy array`): point in each polyhedron face plane, ``shape=(m,3)`` + plane_normals (:obj:`numpy array`): outwards element face normals. ``shape=(m,3)`` - Returns: - clip_lengths (:obj:`numpy array`) : intersection lengths. ``shape=(n,)`` + Returns: + clip_lengths (:obj:`numpy array`) : intersection lengths. ``shape=(n,)`` """ clip_lengths = np.zeros((line_points.shape[0],)) @@ -121,12 +144,7 @@ def _clip_line_with_convex_polyhedron( for i, line_point in enumerate(line_points): # find parametric line-plane intersection based on orthogonal equations - t_1 = np.sum( - np.multiply( - plane_points - - line_point, - plane_normals), - axis=1) + t_1 = np.sum(np.multiply(plane_points - line_point, plane_normals), axis=1) # Zero division for a ray parallel to plane, numpy gives np.inf so it # is ok! @@ -153,43 +171,42 @@ def alpha_to_quarternion(alpha_1, alpha_2, alpha_3): """ sin_alpha_1, sin_alpha_2 = np.sin(alpha_1), np.sin(alpha_2) - return np.array([np.cos(alpha_1), - sin_alpha_1 * sin_alpha_2 * np.cos(alpha_3), - sin_alpha_1 * sin_alpha_2 * np.sin(alpha_3), - sin_alpha_1 * np.cos(alpha_2)]).T + return np.array( + [ + np.cos(alpha_1), + sin_alpha_1 * sin_alpha_2 * np.cos(alpha_3), + sin_alpha_1 * sin_alpha_2 * np.sin(alpha_3), + sin_alpha_1 * np.cos(alpha_2), + ] + ).T -def lab_strain_to_B_matrix( - strain_tensor, - crystal_orientation, - unit_cell): - """Take a strain tensor in lab coordinates and produce the lattice matrix (B matrix). +def lab_strain_to_B_matrix(strain_tensor, crystal_orientation, B0): + """Take n strain tensors in lab coordinates and produce the lattice matrix (B matrix). Args: strain_tensor (:obj:`numpy array`): Symmetric strain tensor in lab - coordinates. ``shape=(3,3)`` + coordinates. ``shape=(n,3,3)`` crystal_orientation (:obj:`numpy array`): Unitary crystal orientation matrix. - ``crystal_orientation`` maps from crystal to lab coordinates. ``shape=(3,3)`` - unit_cell (:obj:`list` of :obj:`float`): Crystal unit cell representation of the form - [a,b,c,alpha,beta,gamma], where alpha,beta and gamma are in units of degrees while - a,b and c are in units of anstrom. + ``crystal_orientation`` maps from crystal to lab coordinates. ``shape=(n,3,3)`` + B0 matrix (:obj:`numpy array`): Matrix containing the reciprocal underformed lattice parameters.``shape=(3,3)`` Returns: - (:obj:`numpy array`) B matrix, mapping from hkl Miller indices to realspace crystal - coordinates, ``shape=(3,3)``. + (:obj:`numpy array`) B matrix mapping from hkl Miller indices to realspace crystal + coordinates. ``shape=(n,3,3)`` """ - crystal_strain = np.dot( - crystal_orientation.T, np.dot( - strain_tensor, crystal_orientation)) - lattice_matrix = _epsilon_to_b([crystal_strain[0, 0], - crystal_strain[0, 1], - crystal_strain[0, 2], - crystal_strain[1, 1], - crystal_strain[1, 2], - crystal_strain[2, 2]], - unit_cell) - return lattice_matrix + if strain_tensor.ndim == 2: + strain_tensor = strain_tensor[np.newaxis, :, :] + if crystal_orientation.ndim == 2: + crystal_orientation = crystal_orientation[np.newaxis, :, :] + + crystal_strain = np.matmul( + crystal_orientation.transpose(0, 2, 1), + np.matmul(strain_tensor, crystal_orientation), + ) + B = _epsilon_to_b(crystal_strain, B0) + return np.squeeze(B) def _get_circumscribed_sphere_centroid(subset_of_points): @@ -207,10 +224,10 @@ def _get_circumscribed_sphere_centroid(subset_of_points): (:obj:`numpy array`) with ``centroid`` of``shape=(3,)`` """ - A = 2*(subset_of_points[0]-subset_of_points[1:]) - pp = np.sum(subset_of_points*subset_of_points,axis=1) + A = 2 * (subset_of_points[0] - subset_of_points[1:]) + pp = np.sum(subset_of_points * subset_of_points, axis=1) b = pp[0] - pp[1:] - B = (subset_of_points[0]-subset_of_points[1:]).T + B = (subset_of_points[0] - subset_of_points[1:]).T x = np.linalg.solve(A.dot(B), b - A.dot(subset_of_points[0])) return subset_of_points[0] + B.dot(x) @@ -232,27 +249,32 @@ def _get_bounding_ball(points): (:obj:`tuple` of :obj:`numpy array` and :obj:`float`) ``centroid`` and ``radius``. """ - radius, centroids = [],[] - for k in range(2,5): + radius, centroids = [], [] + for k in range(2, 5): for subset_of_points in combinations(points, k): - centroids.append(_get_circumscribed_sphere_centroid(np.array(subset_of_points))) + centroids.append( + _get_circumscribed_sphere_centroid(np.array(subset_of_points)) + ) radius.append(np.max(np.linalg.norm(points - centroids[-1], axis=1))) index = np.argmin(radius) return centroids[index], radius[index] + def _set_xfab_logging(disabled): - """Disable/Enable all loging of xfab; it is very verbose! - """ - xfab_modules = ['tools', - 'structure', - 'atomlib', - 'detector', - 'checks', - 'sg', - 'sglib', - 'symmetry'] + """Disable/Enable all loging of xfab; it is very verbose!""" + xfab_modules = [ + "tools", + "structure", + "atomlib", + "detector", + "checks", + "sg", + "sglib", + "symmetry", + ] for sub_module in xfab_modules: - logging.getLogger('xfab.'+sub_module).disabled = disabled + logging.getLogger("xfab." + sub_module).disabled = disabled + class _verbose_manager(object): """Manage global verbose options in with statements; to turn of @@ -270,46 +292,53 @@ def __exit__(self, type, value, traceback): if self.verbose: _set_xfab_logging(disabled=True) + def _strain_as_tensor(strain_vector): e11, e12, e13, e22, e23, e33 = strain_vector - return np.asarray([ [e11, e12, e13], - [e12, e22, e23], - [e13, e23, e33] ], np.float64) + return np.asarray([[e11, e12, e13], [e12, e22, e23], [e13, e23, e33]], np.float64) + def _strain_as_vector(strain_tensor): - return list(strain_tensor[0, :]) + list(strain_tensor[1, 1:]) + [strain_tensor[2, 2]] + return ( + list(strain_tensor[0, :]) + list(strain_tensor[1, 1:]) + [strain_tensor[2, 2]] + ) -def _b_to_epsilon(B_matrix, unit_cell): - """Handle large deformations as opposed to current xfab.tools.b_to_epsilon - """ + +def _b_to_epsilon(B_matrix, B0): + """Handle large deformations as opposed to current xfab.tools.b_to_epsilon""" B = np.asarray(B_matrix, np.float64) - B0 = tools.form_b_mat(unit_cell) F = np.dot(B0, np.linalg.inv(B)) - strain_tensor = 0.5*(F.T.dot(F) - np.eye(3)) # large deformations + strain_tensor = 0.5 * (F.T.dot(F) - np.eye(3)) # large deformations return _strain_as_vector(strain_tensor) -def _epsilon_to_b(epsilon, unit_cell): - """Handle large deformations as opposed to current xfab.tools.epsilon_to_b - """ - strain_tensor = _strain_as_tensor(epsilon) - C = 2*strain_tensor + np.eye(3, dtype=np.float64) + +def _epsilon_to_b(crystal_strain, B0): + """Handle large deformations as opposed to current xfab.tools.epsilon_to_b""" + C = 2 * crystal_strain + np.eye(3, dtype=np.float32) eigen_vals = np.linalg.eigvalsh(C) - if np.any( np.array(eigen_vals) < 0 ): - raise ValueError("Unfeasible strain tensor with value: "+str(_strain_as_vector(strain_tensor))+ \ - ", will invert the unit cell with negative deformation gradient tensor determinant" ) - F = np.linalg.cholesky(C).T - B0 = tools.form_b_mat(unit_cell) + if np.any(np.array(eigen_vals) < 0): + raise ValueError( + "Unfeasible strain tensor with value: " + + str(_strain_as_vector(crystal_strain)) + + ", will invert the unit cell with negative deformation gradient tensor determinant" + ) + if C.ndim == 3: + F = np.linalg.cholesky(C).transpose(0, 2, 1) + else: + F = np.linalg.cholesky(C).T B = np.linalg.inv(F).dot(B0) return B -def get_misorientations(orientations): - """Compute the minimal angles neccessary to rotate a series of SO3 elements back into their mean orientation. + +def _get_misorientations(orientations): + """ + Compute the minimal angles necessary to rotate a series of SO3 elements back into their mean orientation. Args: orientations (:obj: `numpy.array`): Orientation matrices, shape=(N,3,3) Returns: - (:obj: `numpy.array`): misorientations in units of radians, shape=(N,) + :obj: `numpy.array`: misorientations in units of radians, shape=(N,) """ mean_orientation = Rotation.mean(Rotation.from_matrix(orientations)).as_matrix() misorientations = np.zeros((orientations.shape[0],)) @@ -317,3 +346,130 @@ def get_misorientations(orientations): difference_rotation = Rotation.from_matrix(U.dot(mean_orientation.T)) misorientations[i] = Rotation.magnitude(difference_rotation) return misorientations + + +def _compute_sides(points): + """ + Computes the lengths of the sides of multiple tetrahedrons. + + Args: + points (:obj: `numpy.array`): An array of shape (n, 4, 3), where `n` is the number of tetrahedrons. + Each tetrahedron is defined by 4 vertices in 3D space. + + Returns: + :obj: `numpy.array`: An array of shape (n, 6) containing the lengths of the sides of the tetrahedrons. + Each row corresponds to a tetrahedron and contains the lengths of its 6 sides. + """ + # Reshape the points array to have shape (n, 1, 4, 3) + reshaped_points = points[:, np.newaxis, :, :] + + # Compute the differences between each pair of points + differences = reshaped_points - reshaped_points.transpose(0, 2, 1, 3) + + # Compute the squared distances along the last axis + squared_distances = np.sum(differences**2, axis=-1) + + # Compute the distances by taking the square root of the squared distances + dist_mat = np.sqrt(squared_distances) + + # Extract the 1-to-1 values from the distance matrix + distances = np.hstack( + (dist_mat[:, 0, 1:], dist_mat[:, 1, 2:], dist_mat[:, 2, 3][:, np.newaxis]) + ) + + return distances + + +def _circumsphere_of_segments(segments): + """ + Computes the circumcenters and circumradii of multiple line segments. + + Args: + segments (:obj: `numpy.array`): An array of shape (n, 2, 3), where `n` is the number of line segments. + Each line segment is defined by 2 vertices in 3D space. + + Returns: + tuple(:obj: `numpy.array`, :obj: `numpy.array`): A tuple containing: + - centers (:obj: `numpy.array`): An array of shape (n, 3) containing the circumcenters of the line segments. + - radii (:obj: `numpy.array`): An array of shape (n,) containing the circumradii of the line segments. + """ + centers = np.mean(segments, axis=1) + radii = np.linalg.norm(centers - segments[:, 0, :], axis=1) + return centers, radii * 1.0001 # because loss of floating point precision + + +def _circumsphere_of_triangles(triangles): + """ + Computes the circumcenters and circumradii of multiple triangles. + + Args: + triangles (:obj: `numpy.array`): An array of shape (n, 3, 3), where `n` is the number of triangles. + Each triangle is defined by 3 vertices in 3D space. + + Returns: + tuple(:obj: `numpy.array`, :obj: `numpy.array`): A tuple containing: + - centers (:obj: `numpy.array`): An array of shape (n, 3) containing the circumcenters of the triangles. + - radii (:obj: `numpy.array`): An array of shape (n,) containing the circumradii of the triangles. + """ + ab = triangles[:, 1, :] - triangles[:, 0, :] + ac = triangles[:, 2, :] - triangles[:, 0, :] + + abXac = np.cross(ab, ac) + acXab = np.cross(ac, ab) + + a_to_centre = ( + np.cross(abXac, ab) * ((np.linalg.norm(ac, axis=1) ** 2)[:, np.newaxis]) + + np.cross(acXab, ac) * ((np.linalg.norm(ab, axis=1) ** 2)[:, np.newaxis]) + ) / (2 * (np.linalg.norm(abXac, axis=1) ** 2)[:, np.newaxis]) + + centers = triangles[:, 0, :] + a_to_centre + radii = np.linalg.norm(a_to_centre, axis=1) + + return centers, radii * 1.0001 # because loss of floating point precision + + +def _circumsphere_of_tetrahedrons(tetrahedra): + """ + Computes the circumcenters and circumradii of multiple tetrahedrons. + + Args: + tetrahedra (:obj: `numpy.array`): An array of shape (n, 4, 3), where `n` is the number of tetrahedrons. + Each tetrahedron is defined by 4 vertices in 3D space. + + Returns: + tuple(:obj: `numpy.array`, :obj: `numpy.array`): A tuple containing: + - centers (:obj: `numpy.array`): An array of shape (n, 3) containing the circumcenters of the tetrahedrons. + - radii (:obj: `numpy.array`): An array of shape (n,) containing the circumradii of the tetrahedrons. + """ + v0 = tetrahedra[:, 0, :] + v1 = tetrahedra[:, 1, :] + v2 = tetrahedra[:, 2, :] + v3 = tetrahedra[:, 3, :] + + A = np.vstack( + ( + (v1 - v0).T[np.newaxis, :], + (v2 - v0).T[np.newaxis, :], + (v3 - v0).T[np.newaxis, :], + ) + ).transpose(2, 0, 1) + B = ( + 0.5 + * np.vstack( + ( + np.linalg.norm(v1, axis=1) ** 2 - np.linalg.norm(v0, axis=1) ** 2, + np.linalg.norm(v2, axis=1) ** 2 - np.linalg.norm(v0, axis=1) ** 2, + np.linalg.norm(v3, axis=1) ** 2 - np.linalg.norm(v0, axis=1) ** 2, + ) + ).T + ) + + centers = np.matmul(np.linalg.inv(A), B[:, :, np.newaxis])[:, :, 0] + radii = np.linalg.norm( + (tetrahedra.transpose(2, 0, 1) - centers.transpose(1, 0)[:, :, np.newaxis])[ + :, :, 0 + ], + axis=0, + ) + + return centers, radii * 1.0001 # because loss of floating point precision