From 3c19deb1c181bb3100e60184d044ff274ecf4cb2 Mon Sep 17 00:00:00 2001 From: kbonney Date: Wed, 2 Aug 2023 17:45:47 -0400 Subject: [PATCH] Apply black formatting and renaming of some files --- src/pynumad/__init__.py | 4 +- src/pynumad/analysis/__init__.py | 2 +- src/pynumad/analysis/ansys/beamforce.py | 293 +- .../analysis/ansys/mainAnsysAnalysis.py | 429 ++- src/pynumad/analysis/ansys/read.py | 396 +-- src/pynumad/analysis/ansys/utility.py | 477 +-- src/pynumad/analysis/ansys/write.py | 1753 ++++++----- src/pynumad/analysis/beamUtils.py | 458 +-- src/pynumad/analysis/cubit/cubitBlade.py | 328 +- src/pynumad/analysis/cubit/cubitUtils.py | 2766 ++++++++++------- src/pynumad/analysis/cubit/solidModelUtils.py | 483 +-- src/pynumad/analysis/makeModels.py | 154 +- src/pynumad/io/excel_to_blade.py | 396 ++- src/pynumad/io/mesh_to_yaml.py | 69 +- src/pynumad/io/xml_to_airfoil.py | 29 +- src/pynumad/io/yaml_to_blade.py | 703 +++-- src/pynumad/objects/Airfoil.py | 448 +-- src/pynumad/objects/Blade.py | 1533 +++++---- src/pynumad/objects/Component.py | 56 +- src/pynumad/objects/Material.py | 131 +- src/pynumad/objects/Stack.py | 26 +- src/pynumad/objects/Station.py | 78 +- src/pynumad/objects/Subobjects.py | 134 +- src/pynumad/shell/Boundary2DClass.py | 32 - src/pynumad/shell/Mesh2DClass.py | 981 ------ src/pynumad/shell/Mesh3DClass.py | 283 -- src/pynumad/shell/Segment2DClass.py | 100 - src/pynumad/shell/ShellRegionClass.py | 348 --- src/pynumad/shell/SpatialGridList3DClass.py | 78 - src/pynumad/shell/__init__.py | 2 +- src/pynumad/shell/boundary2d.py | 32 + src/pynumad/shell/mesh2d.py | 1043 +++++++ src/pynumad/shell/mesh3d.py | 325 ++ .../shell/{MeshTools.py => mesh_tools.py} | 244 +- src/pynumad/shell/segment2d.py | 114 + src/pynumad/shell/shell.py | 779 +++-- src/pynumad/shell/shell_region.py | 623 ++++ ...dList2DClass.py => spatial_grid_list2d.py} | 56 +- src/pynumad/shell/spatial_grid_list3d.py | 89 + .../shell/{SurfaceClass.py => surface.py} | 75 +- src/pynumad/tests/test_affinetrans.py | 4 +- src/pynumad/tests/test_airfoil.py | 12 +- src/pynumad/tests/test_blade_io.py | 10 +- src/pynumad/tests/test_interpolator.py | 99 +- src/pynumad/tests/test_mesh.py | 8 +- src/pynumad/tests/test_misc.py | 16 +- src/pynumad/utils/__init__.py | 2 - src/pynumad/utils/affinetrans.py | 12 +- src/pynumad/utils/fatigue.py | 346 ++- src/pynumad/utils/interpolation.py | 65 +- src/pynumad/utils/misc_utils.py | 66 +- 51 files changed, 9655 insertions(+), 7335 deletions(-) delete mode 100644 src/pynumad/shell/Boundary2DClass.py delete mode 100644 src/pynumad/shell/Mesh2DClass.py delete mode 100644 src/pynumad/shell/Mesh3DClass.py delete mode 100644 src/pynumad/shell/Segment2DClass.py delete mode 100644 src/pynumad/shell/ShellRegionClass.py delete mode 100644 src/pynumad/shell/SpatialGridList3DClass.py create mode 100644 src/pynumad/shell/boundary2d.py create mode 100644 src/pynumad/shell/mesh2d.py create mode 100644 src/pynumad/shell/mesh3d.py rename src/pynumad/shell/{MeshTools.py => mesh_tools.py} (51%) create mode 100644 src/pynumad/shell/segment2d.py create mode 100644 src/pynumad/shell/shell_region.py rename src/pynumad/shell/{SpatialGridList2DClass.py => spatial_grid_list2d.py} (55%) create mode 100644 src/pynumad/shell/spatial_grid_list3d.py rename src/pynumad/shell/{SurfaceClass.py => surface.py} (53%) diff --git a/src/pynumad/__init__.py b/src/pynumad/__init__.py index c1bc68e..7cde8b4 100644 --- a/src/pynumad/__init__.py +++ b/src/pynumad/__init__.py @@ -7,14 +7,12 @@ from pynumad.objects.Subobjects import MatDBentry, BOM, Ply, Layer, Shearweb from pynumad.io.mesh_to_yaml import mesh_to_yaml -from pynumad import io -from pynumad import objects from pynumad import shell from pynumad import utils from pynumad import analysis -__version__ = '0.0.1' +__version__ = "0.0.1" __copyright__ = """Copyright 2023 National Technology & Engineering Solutions of Sandia, LLC (NTESS). Under the terms of Contract DE-NA0003525 diff --git a/src/pynumad/analysis/__init__.py b/src/pynumad/analysis/__init__.py index be3b513..56e8b0e 100644 --- a/src/pynumad/analysis/__init__.py +++ b/src/pynumad/analysis/__init__.py @@ -1,2 +1,2 @@ from pynumad.analysis import ansys -from pynumad.analysis import cubit \ No newline at end of file +from pynumad.analysis import cubit diff --git a/src/pynumad/analysis/ansys/beamforce.py b/src/pynumad/analysis/ansys/beamforce.py index 1d5a987..f410618 100644 --- a/src/pynumad/analysis/ansys/beamforce.py +++ b/src/pynumad/analysis/ansys/beamforce.py @@ -11,7 +11,7 @@ from pynumad.analysis.ansys.write import * -def beamForceToAnsysShell(nodeData, loads, maptype = 'map3D_fxM0'): +def beamForceToAnsysShell(nodeData, loads, maptype="map3D_fxM0"): """ #beamForceToAnsysShell: Maps AeroDyn forces to an ANSYS blade FE model # ********************************************************************** @@ -20,7 +20,7 @@ def beamForceToAnsysShell(nodeData, loads, maptype = 'map3D_fxM0'): # * See license.txt for disclaimer information * # ********************************************************************** beamForceToAnsysShell(maptype,nodeData,forcesfile,outfile) - + Note: you may omit any or all of the filename arguments and the script will provide a file selection box. @@ -42,7 +42,7 @@ def beamForceToAnsysShell(nodeData, loads, maptype = 'map3D_fxM0'): fx forces produce zero Mz moment 'map3D_fxM0' (default) Maintains Fx, Fy, Mz, and root moments fx forces produce zero Mz moment - + Returns ------- forcemap @@ -52,51 +52,53 @@ def beamForceToAnsysShell(nodeData, loads, maptype = 'map3D_fxM0'): # Transform loads from the FAST blade coordinate system the # ANYS Blade Coordinate System - beta=[[0, -1, 0], #Direction cosine matrix for a 90 degree clockwise rotation - [1, 0, 0], #about the z axis. - [0, 0, 1]] + beta = [ + [0, -1, 0], # Direction cosine matrix for a 90 degree clockwise rotation + [1, 0, 0], # about the z axis. + [0, 0, 1], + ] for i in range(len(loads["rBlade"])): - F = beta @ np.array([loads["Fxb"][i],loads["Fyb"][i],loads["Fzb"][i]]) - M = beta @ np.array([loads["Mxb"][i],loads["Myb"][i],loads["Mzb"][i]]) - #Overwrite loads in the FAST CSYS with the ANSYS CSYS + F = beta @ np.array([loads["Fxb"][i], loads["Fyb"][i], loads["Fzb"][i]]) + M = beta @ np.array([loads["Mxb"][i], loads["Myb"][i], loads["Mzb"][i]]) + # Overwrite loads in the FAST CSYS with the ANSYS CSYS loads["Fxb"][i] = F[0] loads["Fyb"][i] = F[1] loads["Fzb"][i] = F[2] loads["Mxb"][i] = M[0] loads["Myb"][i] = M[1] loads["Mzb"][i] = M[2] - - if 'map2D_fxM0' == maptype: - forcemap = map2D_fxM0(nodeData,loads) - elif 'map3D_fxM0' == maptype: - forcemap = map3D_fxM0(nodeData,loads) - - forcesums = check_sums(nodeData,loads,forcemap) - + + if "map2D_fxM0" == maptype: + forcemap = map2D_fxM0(nodeData, loads) + elif "map3D_fxM0" == maptype: + forcemap = map3D_fxM0(nodeData, loads) + + forcesums = check_sums(nodeData, loads, forcemap) + # write the ansys commands which apply the calculated forces return forcemap, forcesums - -def beamForceToAnsysShellFollower(nodeData, loads, maptype = 'map3D_fxM0'): - #AD2ANSYS Maps AeroDyn forces to an ANSYS blade FE model + +def beamForceToAnsysShellFollower(nodeData, loads, maptype="map3D_fxM0"): + # AD2ANSYS Maps AeroDyn forces to an ANSYS blade FE model # ********************************************************************** # * Part of the SNL NuMAD Toolbox * # * Developed by Sandia National Laboratories Wind Energy Technologies * # * See license.txt for disclaimer information * # ********************************************************************** # ad2ansys(maptype,nodeData,forcesfile,outfile) - + # Note: you may omit any or all of the filename arguments and the script # will provide a file selection box. - + # maptype = 'map2D_fxM0' Maintains Fx, Fy, and Mz # fx forces produce zero Mz moment # 'map3D_fxM0' (default) Maintains Fx, Fy, Mz, and root moments # fx forces produce zero Mz moment - + # nodeData = node numbers and coordinates for each node - + # forcesfile = name of file containing the load definition with # columns: # Z - spanwise location of forces (center of aero element) @@ -106,122 +108,137 @@ def beamForceToAnsysShellFollower(nodeData, loads, maptype = 'map3D_fxM0'): # Alpha - CURRENTLY UNUSED # x_off - # y_off - - + # outfile = name of the output file to be /INPUT in ANSYS - - #>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> + + # >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> # Transform loads from the FAST blade coordinate system the # ANYS Blade Coordinate System - - beta=[[0, -1, 0], #Direction cosine matrix for a 90 degree clockwise rotation - [1, 0, 0], #about the z axis. - [0, 0, 1]] + + beta = [ + [0, -1, 0], # Direction cosine matrix for a 90 degree clockwise rotation + [1, 0, 0], # about the z axis. + [0, 0, 1], + ] for i in range(len(loads["rBlade"])): - F = beta * np.array([[loads["Fxb"](i)],[loads["Fyb"](i)],[loads["Fzb"](i)]]) - M = beta * np.array([[loads["Mxb"](i)],[loads["Myb"](i)],[loads["Mzb"](i)]]) - #Overwrite loads in the FAST CSYS with the ANSYS CSYS + F = beta * np.array([[loads["Fxb"](i)], [loads["Fyb"](i)], [loads["Fzb"](i)]]) + M = beta * np.array([[loads["Mxb"](i)], [loads["Myb"](i)], [loads["Mzb"](i)]]) + # Overwrite loads in the FAST CSYS with the ANSYS CSYS loads["Fxb"][i] = F[0] loads["Fyb"][i] = F[1] loads["Fzb"][i] = F[2] loads["Mxb"][i] = M[0] loads["Myb"][i] = M[1] loads["Mzb"][i] = M[2] - - #<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<< + + # <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<< # set the default mapping algorithm - if not ('maptype' is not None) or len(maptype)==0: - maptype = 'map3D_fxM0' - - #Look through nodeData, remove root nodes + if not ("maptype" is not None) or len(maptype) == 0: + maptype = "map3D_fxM0" + + # Look through nodeData, remove root nodes nodeData_new = [] - for l in range(len(nodeData[:,2])): - if nodeData[l,2] == 0: - continue + for l in range(len(nodeData[:, 2])): + if nodeData[l, 2] == 0: + continue # nodeData_new(l,:) = []; else: - nodeData_new.append(nodeData[l,:]) - + nodeData_new.append(nodeData[l, :]) + nodeData = np.array(nodeData_new) - if 'map2D_fxM0' == maptype: - forcemap = map2D_fxM0(nodeData,loads) + if "map2D_fxM0" == maptype: + forcemap = map2D_fxM0(nodeData, loads) else: - if 'map3D_fxM0' == maptype: - forcemap = map3D_fxM0(nodeData,loads) - + if "map3D_fxM0" == maptype: + forcemap = map3D_fxM0(nodeData, loads) + forcesums = check_sums(nodeData, loads, forcemap) - + # write the ansys commands which apply the calculated forces # writeforcefile(outfile,forcemap,forcesums,maptype) - + return forcemap, forcesums - - + + def check_sums(nodeData, loads, forcemap): forcesums = dict() forcesums["Z"] = np.asarray(loads["rBlade"]) forcesums["Fx"] = np.zeros((len(loads["Fxb"]), 2)) - forcesums["Fx"][:,0] = np.asarray(loads["Fxb"]) + forcesums["Fx"][:, 0] = np.asarray(loads["Fxb"]) forcesums["Fy"] = np.zeros((len(loads["Fyb"]), 2)) - forcesums["Fy"][:,0] = np.asarray(loads["Fyb"]) + forcesums["Fy"][:, 0] = np.asarray(loads["Fyb"]) forcesums["M"] = np.zeros((len(loads["Mzb"]), 2)) - forcesums["M"][:,0] = np.asarray(loads["Mzb"]) + forcesums["M"][:, 0] = np.asarray(loads["Mzb"]) ## EMA original: # forcesums["RootMx"][:,0] = loads["rBlade"] .* loads["Fyb"]; # forcesums["RootMy"][:,0] = loads["rBlade"] .* loads["Fxb"]; ## changed to: forcesums["RootMx"] = np.zeros((len(loads["rBlade"]), 2)) - forcesums["RootMx"][:,0] = np.multiply(- np.asarray(loads["rBlade"]),np.asarray(loads["Fyb"])) + np.multiply(loads["prebend"],loads["Fzb"]) + forcesums["RootMx"][:, 0] = np.multiply( + -np.asarray(loads["rBlade"]), np.asarray(loads["Fyb"]) + ) + np.multiply(loads["prebend"], loads["Fzb"]) forcesums["RootMy"] = np.zeros((len(loads["rBlade"]), 2)) - forcesums["RootMy"][:,0] = np.multiply(np.asarray(loads["rBlade"]), np.asarray(loads["Fxb"])) - np.multiply(loads["presweep"],loads["Fzb"]) + forcesums["RootMy"][:, 0] = np.multiply( + np.asarray(loads["rBlade"]), np.asarray(loads["Fxb"]) + ) - np.multiply(loads["presweep"], loads["Fzb"]) ## END - + for bk in range(len(loads["Fxb"])): i = np.where(forcemap["bin"] == bk) - x = nodeData[i,1] - loads["presweep"][bk] - y = nodeData[i,2] - loads["prebend"][bk] - z = nodeData[i,3] - forcesums["Fx"][bk,1] = sum(forcemap["fx"][i]) - forcesums["Fy"][bk,1] = sum(forcemap["fy"][i]) - forcesums["M"][bk,1] = sum(np.multiply(- y.reshape(-1),forcemap["fx"][i]) + np.multiply(x.reshape(-1),forcemap["fy"][i])) + x = nodeData[i, 1] - loads["presweep"][bk] + y = nodeData[i, 2] - loads["prebend"][bk] + z = nodeData[i, 3] + forcesums["Fx"][bk, 1] = sum(forcemap["fx"][i]) + forcesums["Fy"][bk, 1] = sum(forcemap["fy"][i]) + forcesums["M"][bk, 1] = sum( + np.multiply(-y.reshape(-1), forcemap["fx"][i]) + + np.multiply(x.reshape(-1), forcemap["fy"][i]) + ) ## EMA original: # forcesums["RootMx"](bk,2) = sum(z.*forcemap["fy"][i]); # forcesums["RootMy"](bk,2) = sum(z.*forcemap["fx"][i]); ## changed to: - x = nodeData[i,0] - y = nodeData[i,1] - forcesums["RootMx"][bk,0] = sum(np.multiply(- z.reshape(-1),forcemap["fy"][i]) + np.multiply(y.reshape(-1),forcemap["fz"][i])) - forcesums["RootMy"][bk,0] = sum(np.multiply(z.reshape(-1),forcemap["fx"][i]) - np.multiply(x.reshape(-1),forcemap["fz"][i])) + x = nodeData[i, 0] + y = nodeData[i, 1] + forcesums["RootMx"][bk, 0] = sum( + np.multiply(-z.reshape(-1), forcemap["fy"][i]) + + np.multiply(y.reshape(-1), forcemap["fz"][i]) + ) + forcesums["RootMy"][bk, 0] = sum( + np.multiply(z.reshape(-1), forcemap["fx"][i]) + - np.multiply(x.reshape(-1), forcemap["fz"][i]) + ) ## END - + return forcesums - - -def map2D_fxM0(nodeData, loads): + + +def map2D_fxM0(nodeData, loads): # Map forces such that the equivalent Fx Fy and M are maintained for each -# section. It is assumed that only the fy forces contribute to M and fx -# are balanced to produce no moment. + # section. It is assumed that only the fy forces contribute to M and fx + # are balanced to produce no moment. mesh = dict() - mesh["n"] = nodeData[:,0] - mesh["x"] = nodeData[:,1] - mesh["y"] = nodeData[:,2] - mesh["z"] = nodeData[:,3] + mesh["n"] = nodeData[:, 0] + mesh["x"] = nodeData[:, 1] + mesh["y"] = nodeData[:, 2] + mesh["z"] = nodeData[:, 3] # divide nodes into spanwise groups - bin = np.zeros((mesh["n"].shape,mesh["n"].shape)) - + bin = np.zeros((mesh["n"].shape, mesh["n"].shape)) + for nk in range(len(mesh["n"])): halfDZ = loads["rBlade"][1] - for bk in np.arange(1,np.asarray(loads["rBlade"]).size+1).reshape(-1): + for bk in np.arange(1, np.asarray(loads["rBlade"]).size + 1).reshape(-1): OBedge = loads["rBlade"][bk] + halfDZ if (mesh["z"](nk) <= OBedge) or (bk == np.asarray(loads["rBlade"]).size): bin[nk] = bk break halfDZ = loads["rBlade"](bk + 1) - OBedge - + forcemap = dict() - forcemap["n"] = nodeData[:,0] + forcemap["n"] = nodeData[:, 0] forcemap["bin"] = bin - forcemap["fx"] = np.zeros((mesh["n"].shape,mesh["n"].shape)) - forcemap["fy"] = np.zeros((mesh["n"].shape,mesh["n"].shape)) + forcemap["fx"] = np.zeros((mesh["n"].shape, mesh["n"].shape)) + forcemap["fy"] = np.zeros((mesh["n"].shape, mesh["n"].shape)) for bk in range(len(loads["Fxb"])): i = np.where(bin == bk) N = len(i) @@ -229,31 +246,27 @@ def map2D_fxM0(nodeData, loads): y = mesh["y"][i] - loads["prebend"][bk] mx = np.mean(x) my = np.mean(y) - mxx = np.mean(x ** 2) - myy = np.mean(y ** 2) - #mxy = np.mean(x.*y); - A = np.array([ - [my,1,0,0], - [0,0,mx,1], - [0,0,mxx,mx], - [- myy,- my,0,0]]) - F = 1 / N * np.array([loads["Fxb"][bk],loads["Fyb"][bk],loads["Mzb"][bk],0]) - ab = np.linalg.solve(A,F) + mxx = np.mean(x**2) + myy = np.mean(y**2) + # mxy = np.mean(x.*y); + A = np.array([[my, 1, 0, 0], [0, 0, mx, 1], [0, 0, mxx, mx], [-myy, -my, 0, 0]]) + F = 1 / N * np.array([loads["Fxb"][bk], loads["Fyb"][bk], loads["Mzb"][bk], 0]) + ab = np.linalg.solve(A, F) forcemap["fx"][i] = ab[0] * y + ab[1] forcemap["fy"][i] = ab[2] * x + ab[3] - + return forcemap - - -def map3D_fxM0(nodeData, loads): + + +def map3D_fxM0(nodeData, loads): # Map forces such that the equivalent Fx Fy and M are maintained for each # section. It is assumed that only the fy forces contribute to M and fx # are balanced to produce no moment. mesh = dict() - mesh["n"] = nodeData[:,0] - mesh["x"] = nodeData[:,1] - mesh["y"] = nodeData[:,2] - mesh["z"] = nodeData[:,3] + mesh["n"] = nodeData[:, 0] + mesh["x"] = nodeData[:, 1] + mesh["y"] = nodeData[:, 2] + mesh["z"] = nodeData[:, 3] # aero.Z = forces{1}; # aero.Fx = forces{2}; # aero.Fy = forces{3}; @@ -261,21 +274,21 @@ def map3D_fxM0(nodeData, loads): # aero.alpha = forces{5}; # aero.xoff = forces{6}; # aero.yoff = forces{7}; - + # divide nodes into spanwise groups bin = np.zeros(mesh["n"].shape) - + for nk in range(len(mesh["n"])): halfDZ = loads["rBlade"][0] for bk in range(len(loads["rBlade"])): OBedge = loads["rBlade"][bk] + halfDZ - if (mesh["z"][nk] <= OBedge) or (bk == len(loads["rBlade"])-1): + if (mesh["z"][nk] <= OBedge) or (bk == len(loads["rBlade"]) - 1): bin[nk] = bk break halfDZ = loads["rBlade"][bk + 1] - OBedge - + forcemap = dict() - forcemap["n"] = nodeData[:,0] + forcemap["n"] = nodeData[:, 0] forcemap["bin"] = bin forcemap["fx"] = np.zeros(mesh["n"].shape) forcemap["fy"] = np.zeros(mesh["n"].shape) @@ -291,12 +304,12 @@ def map3D_fxM0(nodeData, loads): mx = np.mean(x) my = np.mean(y) mz = np.mean(z) - mxx = np.mean(x ** 2) - myy = np.mean(y ** 2) - mzz = np.mean(z ** 2) - #mxy = np.mean(x.*y); - mzx = np.mean(np.multiply(z,x)) - mzy = np.mean(np.multiply(z,y)) + mxx = np.mean(x**2) + myy = np.mean(y**2) + mzz = np.mean(z**2) + # mxy = np.mean(x.*y); + mzx = np.mean(np.multiply(z, x)) + mzy = np.mean(np.multiply(z, y)) ## Original # A = [mz, my, 1, 0, 0, 0; # 0, 0, 0, mz, mx, 1; @@ -309,30 +322,38 @@ def map3D_fxM0(nodeData, loads): # abc = A\F; ## Changed to: ## Add/modify equations to include forces in the Z-direction. - A = np.array([[mz,my,1,0,0,0,0], - [0,0,0,mz,mx,1,0], - [0,0,0,0,0,0,1], - [0,0,0,mzx,mxx,mx,0], - [- mzy,- myy,- my,0,0,0,0], - [0,0,0,mzz,mzx,mz,- my], - [mzz,mzy,mz,0,0,0,- mx]]) - F = 1 / N * np.array([ - loads["Fxb"][bk], - loads["Fyb"][bk], - loads["Fzb"][bk], - loads["Mzb"][bk], - 0, - loads["rBlade"][bk] * loads["Fyb"][bk], - loads["rBlade"][bk] * loads["Fxb"][bk] - ]) - abc = np.linalg.solve(A,F) + A = np.array( + [ + [mz, my, 1, 0, 0, 0, 0], + [0, 0, 0, mz, mx, 1, 0], + [0, 0, 0, 0, 0, 0, 1], + [0, 0, 0, mzx, mxx, mx, 0], + [-mzy, -myy, -my, 0, 0, 0, 0], + [0, 0, 0, mzz, mzx, mz, -my], + [mzz, mzy, mz, 0, 0, 0, -mx], + ] + ) + F = ( + 1 + / N + * np.array( + [ + loads["Fxb"][bk], + loads["Fyb"][bk], + loads["Fzb"][bk], + loads["Mzb"][bk], + 0, + loads["rBlade"][bk] * loads["Fyb"][bk], + loads["rBlade"][bk] * loads["Fxb"][bk], + ] + ) + ) + abc = np.linalg.solve(A, F) ## END forcemap["fx"][i] = abc[0] * z + abc[1] * y + abc[2] forcemap["fy"][i] = abc[3] * z + abc[4] * x + abc[5] ## EMA added: forcemap["fz"][i] = abc[6] ## END - + return forcemap - - \ No newline at end of file diff --git a/src/pynumad/analysis/ansys/mainAnsysAnalysis.py b/src/pynumad/analysis/ansys/mainAnsysAnalysis.py index 5cf02f0..e7445ef 100644 --- a/src/pynumad/analysis/ansys/mainAnsysAnalysis.py +++ b/src/pynumad/analysis/ansys/mainAnsysAnalysis.py @@ -13,13 +13,14 @@ from pynumad.analysis.ansys.read import * from pynumad.analysis.ansys.write import * from pynumad.analysis.ansys.beamforce import * - + + def mainAnsysAnalysis( - blade: Blade, - meshData: dict, - loadsTable: list, - analysisConfig: dict, - ): + blade: Blade, + meshData: dict, + loadsTable: list, + analysisConfig: dict, +): """ Parameters ---------- @@ -37,159 +38,201 @@ def mainAnsysAnalysis( ------- """ anFlagNames = analysisConfig["analysisFlags"].keys() - ansysPath = path_data['ansysPath'] - ansys_product = 'ANSYS' - if ('imperfection' in analysisConfig["analysisFlags"]) and \ - analysisConfig["analysisFlags"]["imperfection"] and \ - not analysisConfig["analysisFlags"]["globalBuckling"]: - raise Exception('Specify number of buckling modes when performing nonlinear buckling') - + ansysPath = path_data["ansysPath"] + ansys_product = "ANSYS" + if ( + ("imperfection" in analysisConfig["analysisFlags"]) + and analysisConfig["analysisFlags"]["imperfection"] + and not analysisConfig["analysisFlags"]["globalBuckling"] + ): + raise Exception( + "Specify number of buckling modes when performing nonlinear buckling" + ) + # Original mesh file to analize - if ('meshFile' not in analysisConfig): - analysisConfig["meshFile"] = 'master' - + if "meshFile" not in analysisConfig: + analysisConfig["meshFile"] = "master" + # File name base name for ansys analysis files - if ('analysisFileName' in analysisConfig): + if "analysisFileName" in analysisConfig: ansysFilename = analysisConfig["analysisFileName"] else: - ansysFilename = 'FEmodel' - + ansysFilename = "FEmodel" + # Number of CPUs to use - if 'np' in analysisConfig and analysisConfig["np"] > 0: + if "np" in analysisConfig and analysisConfig["np"] > 0: ncpus = analysisConfig["np"] else: ncpus = 1 - - #Initialize + + # Initialize designvar = dict() for key in anFlagNames: - if key in ['globalBuckling', 'resultantVSspan', 'deflection', 'mass']: + if key in ["globalBuckling", "resultantVSspan", "deflection", "mass"]: if analysisConfig["analysisFlags"][key] != 0: - designvar[key] = [None]*len(loadsTable) - elif key in ['localBuckling', 'failure', 'fatigue', 'imperfection', 'mass']: + designvar[key] = [None] * len(loadsTable) + elif key in ["localBuckling", "failure", "fatigue", "imperfection", "mass"]: if not len(analysisConfig["analysisFlags"][key]) == 0: - designvar[key] = [None]*len(loadsTable) - + designvar[key] = [None] * len(loadsTable) + if not designvar: - raise Exception('no analyses are configured in configuration st.') - + raise Exception("no analyses are configured in configuration st.") + for iLoad in range(len(loadsTable)): ## ************************************************************************ # ================= APPLY LOADS TO FEA MESH ================= #NOTE: Priority - forcefilename = 'forces' + forcefilename = "forces" # only want outershell VVV - nodeData = np.concatenate([np.arange(meshData['nodes'].shape[0]).reshape((-1,1)),meshData['nodes']], axis=1) + nodeData = np.concatenate( + [np.arange(meshData["nodes"].shape[0]).reshape((-1, 1)), meshData["nodes"]], + axis=1, + ) loads = loadsTable[iLoad] write_ansys_loads(nodeData, loads, forcefilename, analysisConfig) ## ************************************************************************ # ================= PERFORM LINEAR STATIC ANALYSIS ================= #NOTE: Priority # run buckling computations in ansys - print(' ') - print('Running ANSYS analysis...') - script_name = 'ansysAnalysis.mac' - script_out = 'ansysAnalysisEcho.out' - fid = open(script_name,'w+') - fid.write('/NERR,,99999999\n' % ()) - fid.write('/CWD, %s\n' % (os.getcwd())) - fid.write('resume,master,db\n' % ()) + print(" ") + print("Running ANSYS analysis...") + script_name = "ansysAnalysis.mac" + script_out = "ansysAnalysisEcho.out" + fid = open(script_name, "w+") + fid.write("/NERR,,99999999\n" % ()) + fid.write("/CWD, %s\n" % (os.getcwd())) + fid.write("resume,master,db\n" % ()) # fprintf(fid,'/FILNAME,''#s'',1\n',ansysFilename); #From master, change the jobname - fid.write('/FILNAME,%s,1\n' % (ansysFilename+'-Load'+str(iLoad))) - #fprintf(fid,'resume\n'); - fid.write('! BEGIN LINEAR STATIC SCRIPT\n' % ()) - fid.write('esel,all\n' % ()) - fid.write('/prep7\n' % ()) - fid.write('fdel,all\n' % ()) - fid.write('/input,%s,src\n' % (forcefilename)) - #Linear Static Analysis - fid.write('/solu\n' % ()) - fid.write('antype,static\n' % ()) - if 'StaticNonlinear' in analysisConfig["analysisFlags"] and not \ - len(analysisConfig["analysisFlags"].StaticNonlinear)==0 and \ - analysisConfig["analysisFlags"].StaticNonlinear != 0: - fid.write('nlgeom,1\n' % ()) - fid.write('OUTRES,all,ALL\n' % ()) + fid.write("/FILNAME,%s,1\n" % (ansysFilename + "-Load" + str(iLoad))) + # fprintf(fid,'resume\n'); + fid.write("! BEGIN LINEAR STATIC SCRIPT\n" % ()) + fid.write("esel,all\n" % ()) + fid.write("/prep7\n" % ()) + fid.write("fdel,all\n" % ()) + fid.write("/input,%s,src\n" % (forcefilename)) + # Linear Static Analysis + fid.write("/solu\n" % ()) + fid.write("antype,static\n" % ()) + if ( + "StaticNonlinear" in analysisConfig["analysisFlags"] + and not len(analysisConfig["analysisFlags"].StaticNonlinear) == 0 + and analysisConfig["analysisFlags"].StaticNonlinear != 0 + ): + fid.write("nlgeom,1\n" % ()) + fid.write("OUTRES,all,ALL\n" % ()) # else # fprintf(fid,'pstres,on\n'); - fid.write('irlf,-1\n' % ()) - fid.write('bcsoption,,incore\n' % ()) - fid.write('solve\n' % ()) - fid.write('finish\n' % ()) - #Only compute mass on the first load case - if iLoad == 0 and 'mass' in analysisConfig["analysisFlags"] and \ - analysisConfig["analysisFlags"]['mass'] != 0: - #Get Mass Here - fid.write('*GET, Z_mass, ELEM, 0, MTOT, X\n' % ()) - fid.write('/output, mass,txt\n' % ()) - fid.write('*status,Z_mass\n' % ()) - fid.write('/output\n' % ()) - fid.write('finish\n' % ()) + fid.write("irlf,-1\n" % ()) + fid.write("bcsoption,,incore\n" % ()) + fid.write("solve\n" % ()) + fid.write("finish\n" % ()) + # Only compute mass on the first load case + if ( + iLoad == 0 + and "mass" in analysisConfig["analysisFlags"] + and analysisConfig["analysisFlags"]["mass"] != 0 + ): + # Get Mass Here + fid.write("*GET, Z_mass, ELEM, 0, MTOT, X\n" % ()) + fid.write("/output, mass,txt\n" % ()) + fid.write("*status,Z_mass\n" % ()) + fid.write("/output\n" % ()) + fid.write("finish\n" % ()) ## ************************************************************************ - #================= PERFORM Deflection ANALYSIS ================= #NOTE: Priority - if 'deflection' in analysisConfig["analysisFlags"] and \ - analysisConfig["analysisFlags"]['deflection'] != 0: - deflectionFilename = 'results_deflection' - writeAnsysDeflections(blade,analysisConfig,iLoad,fid,deflectionFilename) + # ================= PERFORM Deflection ANALYSIS ================= #NOTE: Priority + if ( + "deflection" in analysisConfig["analysisFlags"] + and analysisConfig["analysisFlags"]["deflection"] != 0 + ): + deflectionFilename = "results_deflection" + writeAnsysDeflections(blade, analysisConfig, iLoad, fid, deflectionFilename) # calculate face stresses for wrinkling NOTE: Skip - if 'localBuckling' in analysisConfig["analysisFlags"] and not \ - 'imperfection' in analysisConfig["analysisFlags"] and not \ - len(analysisConfig["analysisFlags"].imperfection)==0: - #Check for wrinkling here in a linear analysis - app,SkinAreas,compsInModel = writeAnsysGetFaceStresses(blade,fid,analysisConfig["analysisFlags"].localBuckling) + if ( + "localBuckling" in analysisConfig["analysisFlags"] + and not "imperfection" in analysisConfig["analysisFlags"] + and not len(analysisConfig["analysisFlags"].imperfection) == 0 + ): + # Check for wrinkling here in a linear analysis + app, SkinAreas, compsInModel = writeAnsysGetFaceStresses( + blade, fid, analysisConfig["analysisFlags"].localBuckling + ) ### Output resultant force and moments to file NOTE: Skip - if 'resultantVSspan' in analysisConfig["analysisFlags"] and \ - analysisConfig["analysisFlags"].resultantVSspan != 0: - writeAnsysResultantVSSpan(blade,analysisConfig,iLoad,fid) + if ( + "resultantVSspan" in analysisConfig["analysisFlags"] + and analysisConfig["analysisFlags"].resultantVSspan != 0 + ): + writeAnsysResultantVSSpan(blade, analysisConfig, iLoad, fid) ## ************************************************************************ - # ================= PERFORM FATIGUE ANALYSIS ================= - if 'fatigue' in analysisConfig["analysisFlags"]: - writeAnsysFatigue(fid,iLoad) + # ================= PERFORM FATIGUE ANALYSIS ================= + if "fatigue" in analysisConfig["analysisFlags"]: + writeAnsysFatigue(fid, iLoad) ## ************************************************************************ # ================= CREAT LOCAL FIELD RESULTS FOR MATLAB ================= #NOTE: Priority - if 'localFields' in analysisConfig["analysisFlags"]: - writeAnsysLocalFields(blade,analysisConfig,iLoad,fid) + if "localFields" in analysisConfig["analysisFlags"]: + writeAnsysLocalFields(blade, analysisConfig, iLoad, fid) ## ************************************************************************ # ================= PERFORM FAILURE ANALYSIS ================= #NOTE: Priority # Initialize GUI commands from batch operation to identify maxima - if 'failure' in analysisConfig["analysisFlags"]: - failureFilename = 'results_failure' - writeAnsysRupture(analysisConfig,iLoad,fid,failureFilename) + if "failure" in analysisConfig["analysisFlags"]: + failureFilename = "results_failure" + writeAnsysRupture(analysisConfig, iLoad, fid, failureFilename) ## ************************************************************************ # ================= PERFORM BUCKLING ANALYSIS ================= #NOTE: Priority - #Linear Buckling Analysis - if 'globalBuckling' in analysisConfig["analysisFlags"] and \ - analysisConfig["analysisFlags"]['globalBuckling'] > 0: - bucklingFilename = 'results_buckling' - writeAnsysLinearBuckling(blade,analysisConfig,iLoad,fid,bucklingFilename) + # Linear Buckling Analysis + if ( + "globalBuckling" in analysisConfig["analysisFlags"] + and analysisConfig["analysisFlags"]["globalBuckling"] > 0 + ): + bucklingFilename = "results_buckling" + writeAnsysLinearBuckling( + blade, analysisConfig, iLoad, fid, bucklingFilename + ) else: - if 'globalBuckling' in analysisConfig["analysisFlags"] and analysisConfig["analysisFlags"]['globalBuckling'] < 0: - raise Exception('analysisConfig["analysisFlags"].globalBuckling must be greater than or equal to zero') + if ( + "globalBuckling" in analysisConfig["analysisFlags"] + and analysisConfig["analysisFlags"]["globalBuckling"] < 0 + ): + raise Exception( + 'analysisConfig["analysisFlags"].globalBuckling must be greater than or equal to zero' + ) fid.close() ## ************************************************************************ # ================= SEND COMMANDS TO ANSYS ================= - args = (ansysPath,ansys_product,script_name,script_out,str(ncpus)) - ansys_call = 'export KMP_STACKSIZE=2048k & %s -b -p %s -I %s -o %s -np %s' % args + args = (ansysPath, ansys_product, script_name, script_out, str(ncpus)) + ansys_call = ( + "export KMP_STACKSIZE=2048k & %s -b -p %s -I %s -o %s -np %s" % args + ) # KMP_STACKSIZE is 512k by default. This is not enough therefore SET # KMP_STACKSIZE=2048k has been specifed. 2048k may not be enough for other # simulations. EC - ansys_ps = subprocess.run(ansys_call, shell=True) + ansys_ps = subprocess.run(ansys_call, shell=True) # MATLAB POST PROCESS ########################################## ## ************************************************************************ # ================= READ MASS RESULTS INTO MATLAB ================= - if iLoad == 0 and 'mass' in analysisConfig["analysisFlags"] and \ - analysisConfig["analysisFlags"]['mass'] != 0: - designvar.mass = read_1_ANSYSoutput('mass.txt') + if ( + iLoad == 0 + and "mass" in analysisConfig["analysisFlags"] + and analysisConfig["analysisFlags"]["mass"] != 0 + ): + designvar.mass = read_1_ANSYSoutput("mass.txt") # delete mass.txt ## ************************************************************************ # ================= READ DEFLECTION RESULTS INTO MATLAB ================= - if 'deflection' in analysisConfig["analysisFlags"] and analysisConfig["analysisFlags"]['deflection'] != 0: - designvar['deflection'] = readAnsysDeflections(blade,analysisConfig,iLoad,deflectionFilename) + if ( + "deflection" in analysisConfig["analysisFlags"] + and analysisConfig["analysisFlags"]["deflection"] != 0 + ): + designvar["deflection"] = readAnsysDeflections( + blade, analysisConfig, iLoad, deflectionFilename + ) ## ************************************************************************ # ================= READ STRESS RESULTANTS INTO MATLAB ================= - if 'resultantVSspan' in analysisConfig["analysisFlags"] and analysisConfig["analysisFlags"]['resultantVSspan'] != 0: - fileName = 'resultantVSspan.txt' + if ( + "resultantVSspan" in analysisConfig["analysisFlags"] + and analysisConfig["analysisFlags"]["resultantVSspan"] != 0 + ): + fileName = "resultantVSspan.txt" designvar.resultantVSspan[iLoad] = txt2mat(fileName) os.delete(fileName) # fileName='resultantVSspan2.txt'; @@ -198,81 +241,157 @@ def mainAnsysAnalysis( ## ************************************************************************ # ================= READ LINEAR BUCKLING RESULTS ================= # read buckling results - if 'globalBuckling' in analysisConfig["analysisFlags"] and analysisConfig["analysisFlags"]['globalBuckling'] > 0: - linearLoadFactors = readAnsysLinearBuckling(blade,analysisConfig,iLoad,fid,bucklingFilename) + if ( + "globalBuckling" in analysisConfig["analysisFlags"] + and analysisConfig["analysisFlags"]["globalBuckling"] > 0 + ): + linearLoadFactors = readAnsysLinearBuckling( + blade, analysisConfig, iLoad, fid, bucklingFilename + ) ## ************************************************************************ # ================= PERFORM NON-LINEAR BUCKLING/WRINKLING ANALYSIS ================= # Perform nonlinear buckling here if required (and writeANSYSgetFaceStresses # at the end of the nonlinear analysis for wrikling check - if 'imperfection' in analysisConfig["analysisFlags"] and not len(analysisConfig["analysisFlags"]['imperfection'])==0 : - warnings.warn('output designvar. Currently does not work for nonlinear cases') + if ( + "imperfection" in analysisConfig["analysisFlags"] + and not len(analysisConfig["analysisFlags"]["imperfection"]) == 0 + ): + warnings.warn( + "output designvar. Currently does not work for nonlinear cases" + ) imperfection = analysisConfig["analysisFlags"].imperfection / 1000 - nonlinearLoadFactors = np.zeros((len(linearLoadFactors),len(imperfection))) - critDesignvar = np.zeros((len(imperfection),1)) - wrinklingLimitingElementData = np.zeros((len(linearLoadFactors),4,len(imperfection))) - marker = np.array(['-ok','-sk','-dk','-*k','-^k','-k','-pk','-hk']) - #SF=max(LLF); #Use one loads file for all buckling modes + nonlinearLoadFactors = np.zeros((len(linearLoadFactors), len(imperfection))) + critDesignvar = np.zeros((len(imperfection), 1)) + wrinklingLimitingElementData = np.zeros( + (len(linearLoadFactors), 4, len(imperfection)) + ) + marker = np.array( + ["-ok", "-sk", "-dk", "-*k", "-^k", "-k", "-pk", "-hk"] + ) + # SF=max(LLF); #Use one loads file for all buckling modes for jj in range(len(imperfection)): for ii in range(len(linearLoadFactors)): # For each load factor, create a new jobname and database and run a nonlinear static analysis - nonlinearLoadFactors[ii,jj] = writeAnsysNonLinearBuckling(ansysFilename,ansysPath,ansys_product,analysisConfig,ii,jj,ncpus,iLoad) - wrinklingLimitingElementData[ii,:,jj] = wrinklingForNonlinearBuckling(blade,analysisConfig["analysisFlags"].localBuckling,settings,ncpus,ansysFilename,ii,jj) - minnLLF,minnLLFMode = np.amin(nonlinearLoadFactors[:,jj]) - minWLF,minWLFMode = np.amin(wrinklingLimitingElementData[:,2,jj]) - critDesignvar[jj] = np.amin(minnLLF,minWLF) + nonlinearLoadFactors[ii, jj] = writeAnsysNonLinearBuckling( + ansysFilename, + ansysPath, + ansys_product, + analysisConfig, + ii, + jj, + ncpus, + iLoad, + ) + wrinklingLimitingElementData[ + ii, :, jj + ] = wrinklingForNonlinearBuckling( + blade, + analysisConfig["analysisFlags"].localBuckling, + settings, + ncpus, + ansysFilename, + ii, + jj, + ) + minnLLF, minnLLFMode = np.amin(nonlinearLoadFactors[:, jj]) + minWLF, minWLFMode = np.amin(wrinklingLimitingElementData[:, 2, jj]) + critDesignvar[jj] = np.amin(minnLLF, minWLF) plt.figure(5) for k in range(len(linearLoadFactors)): - #disp(strcat('-',marker(j),'k')) - plt.plot(imperfection * 1000,nonlinearLoadFactors[k,:],marker[k]) - hold('on') - plt.legend('Mode-' + str(np.arange(len(linearLoadFactors)))) - plt.title('Imperfection Study (Linear Elements) SNL3p0-148-mk0p2-s1-fiberglass') - plt.xlabel('Max Imperfection Size [mm]') - plt.ylabel('Buckling Load Factors [ ]') - #wrinklingLimitingElementData - [ansysSecNumber elno lf phicr] + # disp(strcat('-',marker(j),'k')) + plt.plot(imperfection * 1000, nonlinearLoadFactors[k, :], marker[k]) + hold("on") + plt.legend("Mode-" + str(np.arange(len(linearLoadFactors)))) + plt.title( + "Imperfection Study (Linear Elements) SNL3p0-148-mk0p2-s1-fiberglass" + ) + plt.xlabel("Max Imperfection Size [mm]") + plt.ylabel("Buckling Load Factors [ ]") + # wrinklingLimitingElementData - [ansysSecNumber elno lf phicr] designvar.globalBuckling[iLoad] = np.amin(np.amin(critDesignvar)) else: - if 'globalBuckling' in analysisConfig["analysisFlags"] and analysisConfig["analysisFlags"].globalBuckling > 0: + if ( + "globalBuckling" in analysisConfig["analysisFlags"] + and analysisConfig["analysisFlags"].globalBuckling > 0 + ): designvar.globalBuckling[iLoad] = linearLoadFactors(1) ## ************************************************************************ # ================= POST-PROCESS PANEL WRINKLING FACTORS ================= - if 'localBuckling' in analysisConfig["analysisFlags"] and not \ - len(analysisConfig["analysisFlags"].localBuckling)==0: - if 'imperfection' in analysisConfig["analysisFlags"] and not \ - len(analysisConfig["analysisFlags"].imperfection)==0 : - #UNSUPPORTED AT THIS TIME - writeAnsysNonLinearLocalBuckling(blade,analysisConfig,iLoad,fid,ansysFilename,ii,jj) + if ( + "localBuckling" in analysisConfig["analysisFlags"] + and not len(analysisConfig["analysisFlags"].localBuckling) == 0 + ): + if ( + "imperfection" in analysisConfig["analysisFlags"] + and not len(analysisConfig["analysisFlags"].imperfection) == 0 + ): + # UNSUPPORTED AT THIS TIME + writeAnsysNonLinearLocalBuckling( + blade, analysisConfig, iLoad, fid, ansysFilename, ii, jj + ) # perform wrinkling check - wrinklingLimitingElementData = writeAnsysFagerberWrinkling(app,SkinAreas,compsInModel,analysisConfig["analysisFlags"].localBuckling) + wrinklingLimitingElementData = writeAnsysFagerberWrinkling( + app, + SkinAreas, + compsInModel, + analysisConfig["analysisFlags"].localBuckling, + ) designvar.localBuckling[iLoad] = wrinklingLimitingElementData[2] - os.path.delete('*faceAvgStresses.txt') # NOTE: I believe * is supposed to glob here, but I am not sure it is doing that -kb + os.path.delete( + "*faceAvgStresses.txt" + ) # NOTE: I believe * is supposed to glob here, but I am not sure it is doing that -kb ## ************************************************************************ # ================= READ FAILURE RESULTS INTO MATLAB ================= - if 'failure' in analysisConfig["analysisFlags"] and not len(analysisConfig["analysisFlags"]['failure'])==0 : - fileName = np.array([failureFilename,'.out']) + if ( + "failure" in analysisConfig["analysisFlags"] + and not len(analysisConfig["analysisFlags"]["failure"]) == 0 + ): + fileName = np.array([failureFilename, ".out"]) designvar.failure[iLoad] = read_1_ANSYSoutput(fileName) os.delete(fileName) - + ## ************************************************************************ - + # ================= RUN FATIGUE POST PROCESSOR ================= - #After all load directions are solved compute fatige damage if needed - if 'fatigue' in analysisConfig["analysisFlags"] and not len(analysisConfig["analysisFlags"]['fatigue'])==0 : - if not len(varargin)==0 and class_(varargin[0])=='IECDef': + # After all load directions are solved compute fatige damage if needed + if ( + "fatigue" in analysisConfig["analysisFlags"] + and not len(analysisConfig["analysisFlags"]["fatigue"]) == 0 + ): + if not len(varargin) == 0 and class_(varargin[0]) == "IECDef": # cd .. IEC = varargin[0] - wt,rccdata = getWindSpeedDistribution(IEC.avgws) + wt, rccdata = getWindSpeedDistribution(IEC.avgws) # cd 'NuMAD' - designvar.fatigue = postprocessANSYSfatigue(blade,meshData,wt,rccdata,IEC,loadsTable,analysisConfig) + designvar.fatigue = postprocessANSYSfatigue( + blade, meshData, wt, rccdata, IEC, loadsTable, analysisConfig + ) else: - raise Exception('IECDef required to run fatigue analysis in mainAnsysAnalysis') - + raise Exception( + "IECDef required to run fatigue analysis in mainAnsysAnalysis" + ) + + return designvar + + +def saveData( + designvar=None, + iLoad=None, + airfoilSegmentName=None, + iSpan=None, + nodes=None, + midNodei=None, +): + getattr[designvar.localFields[iLoad], [airfoilSegmentName]].x[iSpan] = nodes[ + midNodei, 1 + ] + getattr[designvar.localFields[iLoad], [airfoilSegmentName]].y[iSpan] = nodes[ + midNodei, 2 + ] + getattr[designvar.localFields[iLoad], [airfoilSegmentName]].z[iSpan] = nodes[ + midNodei, 3 + ] + getattr[designvar.localFields[iLoad], [airfoilSegmentName]].data[iSpan] = nodes[ + midNodei, 4 + ] return designvar - - -def saveData(designvar = None,iLoad = None,airfoilSegmentName = None,iSpan = None,nodes = None,midNodei = None): - getattr[designvar.localFields[iLoad],[airfoilSegmentName]].x[iSpan] = nodes[midNodei,1] - getattr[designvar.localFields[iLoad],[airfoilSegmentName]].y[iSpan] = nodes[midNodei,2] - getattr[designvar.localFields[iLoad],[airfoilSegmentName]].z[iSpan] = nodes[midNodei,3] - getattr[designvar.localFields[iLoad],[airfoilSegmentName]].data[iSpan] = nodes[midNodei,4] - return designvar \ No newline at end of file diff --git a/src/pynumad/analysis/ansys/read.py b/src/pynumad/analysis/ansys/read.py index b9c9574..6677b72 100644 --- a/src/pynumad/analysis/ansys/read.py +++ b/src/pynumad/analysis/ansys/read.py @@ -2,127 +2,130 @@ import os from pynumad.analysis.ansys.utility import txt2mat + def read_1_ANSYSoutput(filename): - with open(filename, 'r') as fid: - format = '%s %f %s' + with open(filename, "r") as fid: + format = "%s %f %s" lines = fid.readlines() imax = len(lines) data = np.nan ct = 0 - + for i in range(imax): tline = lines[i] try: - formatted_line = np.loadtxt(tline,format) + formatted_line = np.loadtxt(tline, format) # will only work if "format" is readable # in that line. - if not len(formatted_line[2])==0: + if not len(formatted_line[2]) == 0: data = formatted_line[2] # w/ a numerical argument is reported. finally: pass - + fid.close() return data -def read_nlist(filename): +def read_nlist(filename): if filename is None: - filename = 'NLIST.lis' - + filename = "NLIST.lis" + # Open the file and read the entire contents - with open(filename, 'rb') as fid: + with open(filename, "rb") as fid: # filecontents = np.transpose(fread(fid,inf,'uint8=>char')) filecontents = fid.read() nlist = [] - tbl_hdrs = regexp(filecontents,'NODE\s*X\s*Y\s*Z\s*') - for kTbl in range(tbl_hdrs.size-1): - tbl = filecontents(range(tbl_hdrs(kTbl),tbl_hdrs(kTbl + 1))) - data = regexprep(tbl,'\s*NODE\s*X\s*Y\s*Z\s*','') - nlist.appen(np.loadtxt(data,'%f %f %f %f')) - + tbl_hdrs = regexp(filecontents, "NODE\s*X\s*Y\s*Z\s*") + for kTbl in range(tbl_hdrs.size - 1): + tbl = filecontents(range(tbl_hdrs(kTbl), tbl_hdrs(kTbl + 1))) + data = regexprep(tbl, "\s*NODE\s*X\s*Y\s*Z\s*", "") + nlist.appen(np.loadtxt(data, "%f %f %f %f")) + # get the last table kTbl = 0 - tbl = filecontents(np.arange(tbl_hdrs(kTbl + 1),end()+1)) - data = regexprep(tbl,'\s*NODE\s*X\s*Y\s*Z\s*','') - - nlist.append(np.loadtxt(data,'%f %f %f %f')) + tbl = filecontents(np.arange(tbl_hdrs(kTbl + 1), end() + 1)) + data = regexprep(tbl, "\s*NODE\s*X\s*Y\s*Z\s*", "") + + nlist.append(np.loadtxt(data, "%f %f %f %f")) nlist = np.array(nlist) return nlist -def readAnsysDeflections(blade, config, iLoad, deflectionFilename): + +def readAnsysDeflections(blade, config, iLoad, deflectionFilename): nSpan = len(blade.ispan) - data = np.zeros((nSpan,6)) - + data = np.zeros((nSpan, 6)) + for iSpan in range(nSpan): - fileName = deflectionFilename+'-'+str(iSpan)+'.out' + fileName = deflectionFilename + "-" + str(iSpan) + ".out" temp_results = txt2mat(fileName) os.remove(fileName) - #Displacement + # Displacement for k in range(3): - data[iSpan,k] = np.mean(temp_results[:,k + 4]) - nNode,__ = temp_results.shape - xmax = np.amax(temp_results[:,1]) - LE = np.argmax(temp_results[:,1]) - xmin = np.amin(temp_results[:,1]) - TE = np.argmin(temp_results[:,1]) - ymax = np.amax(temp_results[:,2]) - LP = np.argmax(temp_results[:,2]) - ymin = np.amin(temp_results[:,2]) - HP = np.argmin(temp_results[:,2]) - #close all; - #plot(temp(:,2),temp(:,3),'ok') - #hold on; - P = temp_results[LE,1:5] - Q = temp_results[TE,1:5] + data[iSpan, k] = np.mean(temp_results[:, k + 4]) + nNode, __ = temp_results.shape + xmax = np.amax(temp_results[:, 1]) + LE = np.argmax(temp_results[:, 1]) + xmin = np.amin(temp_results[:, 1]) + TE = np.argmin(temp_results[:, 1]) + ymax = np.amax(temp_results[:, 2]) + LP = np.argmax(temp_results[:, 2]) + ymin = np.amin(temp_results[:, 2]) + HP = np.argmin(temp_results[:, 2]) + # close all; + # plot(temp(:,2),temp(:,3),'ok') + # hold on; + P = temp_results[LE, 1:5] + Q = temp_results[TE, 1:5] PQ = P - Q - #quiver(Q(1),Q(2),PQ(1),PQ(2)); - #plot(temp(:,2)+temp(LE,2),temp(:,3)+temp(LE,3),'xb') - #axis equal; - R = P + temp_results[LE,1:5] - S = Q + temp_results[TE,1:5] + # quiver(Q(1),Q(2),PQ(1),PQ(2)); + # plot(temp(:,2)+temp(LE,2),temp(:,3)+temp(LE,3),'xb') + # axis equal; + R = P + temp_results[LE, 1:5] + S = Q + temp_results[TE, 1:5] RS = R - S - #quiver(Q(1),Q(2),RS(1),RS(2)); - #data(iSpan, 5) = 180/pi* acos(dot(RS(1:2:3),PQ(1:2:3))/(vecnorm(RS(1:2:3))*vecnorm(PQ(1:2:3)))); - #data(iSpan, 6) = 180/pi* acos(dot(RS(1:2),PQ(1:2))/(vecnorm(RS(1:2))*vecnorm(PQ(1:2)))); - index = [0,2] + # quiver(Q(1),Q(2),RS(1),RS(2)); + # data(iSpan, 5) = 180/pi* acos(dot(RS(1:2:3),PQ(1:2:3))/(vecnorm(RS(1:2:3))*vecnorm(PQ(1:2:3)))); + # data(iSpan, 6) = 180/pi* acos(dot(RS(1:2),PQ(1:2))/(vecnorm(RS(1:2))*vecnorm(PQ(1:2)))); + index = [0, 2] a = RS[index[0]] * PQ[index[0]] b = RS[index[1]] * PQ[index[1]] c = np.sqrt(PQ[index[0]] ** 2 + PQ[index[1]] ** 2) d = np.sqrt(RS[index[0]] ** 2 + RS[index[1]] ** 2) - data[iSpan,5] = 180 / np.pi * np.arccos((a + b) / (c * d)) - index = [0,1] + data[iSpan, 5] = 180 / np.pi * np.arccos((a + b) / (c * d)) + index = [0, 1] a = RS[index[0]] * PQ[index[0]] b = RS[index[1]] * PQ[index[1]] c = np.sqrt(PQ[index[0]] ** 2 + PQ[index[1]] ** 2) d = np.sqrt(RS[index[0]] ** 2 + RS[index[1]] ** 2) arg = (a + b) / (c * d) if arg > 1: - if np.round(arg,6) == 1: - data[iSpan,5] = 180 / np.pi * np.arccos(np.round(arg,6)) + if np.round(arg, 6) == 1: + data[iSpan, 5] = 180 / np.pi * np.arccos(np.round(arg, 6)) else: - data[iSpan,5] = 180 / np.pi * np.arccos(arg) - T = temp_results[LP,1:5] - U = temp_results[HP,1:5] + data[iSpan, 5] = 180 / np.pi * np.arccos(arg) + T = temp_results[LP, 1:5] + U = temp_results[HP, 1:5] TU = T - U - V = T + temp_results[LP,1:5] - W = U + temp_results[HP,1:5] + V = T + temp_results[LP, 1:5] + W = U + temp_results[HP, 1:5] VW = V - W - index = [1,2] + index = [1, 2] a = VW[index[0]] * TU[index[0]] b = VW[index[1]] * TU[index[1]] c = np.sqrt(TU[index[0]] ** 2 + TU[index[1]] ** 2) d = np.sqrt(VW[index[0]] ** 2 + VW[index[1]] ** 2) - data[iSpan,3] = 180 / np.pi * np.arccos((a + b) / (c * d)) - #title(['ispan:' int2str(iSpan) ' theta:' num2str(data(iSpan, 6))]) - + data[iSpan, 3] = 180 / np.pi * np.arccos((a + b) / (c * d)) + # title(['ispan:' int2str(iSpan) ' theta:' num2str(data(iSpan, 6))]) + deflections = [] for jj in range(6): - deflections.append(data[:,jj]) - + deflections.append(data[:, jj]) + return deflections -def readANSYSElementTable(filename, pat, NCOLS): + +def readANSYSElementTable(filename, pat, NCOLS): # readANSYSElementTable Read an ANSYS POST1 element table listing. # ********************************************************************** # * Part of the SNL NuMAD Toolbox * @@ -136,63 +139,62 @@ def readANSYSElementTable(filename, pat, NCOLS): # pat - pattern that repeated in the table e.g pat = 'ELEM\s*EPELX\s*EPELY\s*EPELZ\s*EPELXY\s*EPELYZ\s*EPELXZ\s*'; # NCOL - number of columns in the data table # DATA is 7-column matrix [e.g.ELEM, EPELX, EPELY, EPELZ, EPELXY, EPELYZ, EPELXZ] - - defaultfn = 'Strains.txt' + + defaultfn = "Strains.txt" # hard-code filename if not specified if filename is None: filename = defaultfn - pat = 'ELEM\s*EPELX\s*EPELY\s*EPELZ\s*EPELXY\s*EPELYZ\s*EPELXZ\s*' + pat = "ELEM\s*EPELX\s*EPELY\s*EPELZ\s*EPELXY\s*EPELYZ\s*EPELXZ\s*" NCOLS = 7 - + # # user select filename if not specified -# if ~exist('filename','var') || isempty(filename) -# [fn,pn] = uigetfile( ... -# {'*.txt','Text files(*.txt)'; ... -# '*.*','All files (*.*)'},... -# 'Select ANSYS element list',defaultfn); -# if isequal(fn,0) || isequal(pn,0) -# disp('Operation canceled by user.') -# return; -# end -# filename = fullfile(pn,fn); -# end - + # if ~exist('filename','var') || isempty(filename) + # [fn,pn] = uigetfile( ... + # {'*.txt','Text files(*.txt)'; ... + # '*.*','All files (*.*)'},... + # 'Select ANSYS element list',defaultfn); + # if isequal(fn,0) || isequal(pn,0) + # disp('Operation canceled by user.') + # return; + # end + # filename = fullfile(pn,fn); + # end + # Open the file and read the entire contents - with open(filename, 'rb') as fid: + with open(filename, "rb") as fid: filecontents = fid.read() - #assignin('base','filecontents',filecontents); #debugging - + # assignin('base','filecontents',filecontents); #debugging + # process the tables - data = cell(1,NCOLS) - tbl_hdrs = regexp(filecontents,pat) - + data = cell(1, NCOLS) + tbl_hdrs = regexp(filecontents, pat) + tbl_hdrs[end() + 1] = np.asarray(filecontents).size - - #assignin('base','tbl_hdrs',tbl_hdrs); #debugging - for kTbl in range(tbl_hdrs.size-1): - tbl = filecontents(np.arange(tbl_hdrs(kTbl),tbl_hdrs(kTbl + 1) - 1+1)) - tbl = regexprep(tbl,pat,'') - data = np.array([[data],[np.loadtxt(tbl,np.matlib.repmat(' %f',1,NCOLS))]]) - + + # assignin('base','tbl_hdrs',tbl_hdrs); #debugging + for kTbl in range(tbl_hdrs.size - 1): + tbl = filecontents(np.arange(tbl_hdrs(kTbl), tbl_hdrs(kTbl + 1) - 1 + 1)) + tbl = regexprep(tbl, pat, "") + data = np.array([[data], [np.loadtxt(tbl, np.matlib.repmat(" %f", 1, NCOLS))]]) + data = cell2mat(data) return data -def readAnsysFailure(fileName = None): + +def readAnsysFailure(fileName=None): fid = open(fileName) terminate = 0 elemFailure = [] - while (terminate == 0): - + while terminate == 0: fLine = fgetl(fid) - if (fLine == - 1): + if fLine == -1: terminate = 1 else: fArray = str2num(fLine) - if (len(fArray) > 2): - elemFailure = np.array([[elemFailure],[fArray[1]]]) - #elemFailure = elemFailure + 60000*fArray(2)^2; + if len(fArray) > 2: + elemFailure = np.array([[elemFailure], [fArray[1]]]) + # elemFailure = elemFailure + 60000*fArray(2)^2; - fid.close() # fid = fopen(fileName); # terminate = 0; @@ -212,56 +214,55 @@ def readAnsysFailure(fileName = None): # fid.close(); return elemFailure -def readAnsysFreq(fileName = None): + +def readAnsysFreq(fileName=None): fid = open(fileName) fLine = fgetl(fid) - while (not contains(fLine,'1') ): - + while not contains(fLine, "1"): fLine = fgetl(fid) - terminate = 0 modeNum = 0 Freq = [] - while (terminate == 0): - - if (fLine == - 1): + while terminate == 0: + if fLine == -1: terminate = 1 else: lnLst = str2num(fLine) - if (len(lnLst) >= 2): - if (lnLst[1] > 0.0): + if len(lnLst) >= 2: + if lnLst[1] > 0.0: modeNum = modeNum + 1 - Freq = np.array([Freq,lnLst[1]]) + Freq = np.array([Freq, lnLst[1]]) fLine = fgetl(fid) - fid.close() return Freq -def readAnsysLinearBuckling(blade = None,config = None,iLoad = None,fid = None,bucklingFilename = None): - fid = open(np.array([bucklingFilename,'.out'])) - for jj in np.arange(1,5+1).reshape(-1): + +def readAnsysLinearBuckling( + blade=None, config=None, iLoad=None, fid=None, bucklingFilename=None +): + fid = open(np.array([bucklingFilename, ".out"])) + for jj in np.arange(1, 5 + 1).reshape(-1): tline = fgetl(fid) - - data = cell(1,5) - while 1: + data = cell(1, 5) + while 1: tline = fgetl(fid) - if not ischar(tline) : + if not ischar(tline): break - data = np.array([[data],[np.loadtxt(tline,'%f %f %f %f %f')]]) + data = np.array([[data], [np.loadtxt(tline, "%f %f %f %f %f")]]) - fid.close() - print(' ') + print(" ") data = cell2mat(data) - linearLoadFactors = data(np.arange(1,config.analysisFlags.globalBuckling+1),2) - - os.delete(np.array([bucklingFilename,'.out'])) + linearLoadFactors = data(np.arange(1, config.analysisFlags.globalBuckling + 1), 2) + + os.delete(np.array([bucklingFilename, ".out"])) return linearLoadFactors -def readANSYSnforce(filename = None): + +def readANSYSnforce(filename=None): # readANSYSnforce Read an ANSYS list of Elements. # ********************************************************************** # * Part of the SNL NuMAD Toolbox * @@ -272,12 +273,12 @@ def readANSYSnforce(filename = None): # Usage: data = readANSYSnforce(FILENAME) # where FILENAME is file name string, default 'Elements.txt' # DATA is 3-column matrix [ELEM, MAT, SEC] - - defaultfn = 'nforce.txt' + + defaultfn = "nforce.txt" # hard-code filename if not specified - if not ('filename' is not None) : + if not ("filename" is not None): filename = defaultfn - + # # user select filename if not specified # if ~exist('filename','var') || isempty(filename) # [fn,pn] = uigetfile( ... @@ -290,83 +291,84 @@ def readANSYSnforce(filename = None): # end # filename = fullfile(pn,fn); # end - + # Open the file and read the entire contents fid = open(filename) - if (fid == - 1): - raise Exception('Could not open file "%s"',filename) - - filecontents = np.transpose(fread(fid,inf,'uint8=>char')) + if fid == -1: + raise Exception('Could not open file "%s"', filename) + + filecontents = np.transpose(fread(fid, inf, "uint8=>char")) fid.close() - #assignin('base','filecontents',filecontents); #debugging - + # assignin('base','filecontents',filecontents); #debugging + # process the tables NCOLS = 7 - data = cell(1,NCOLS) - pat = 'NODE\s*FX\s*FY\s*FZ\s*MX\s*MY\s*MZ\s*' - #pat='*** NOTE ***\s*CP =\s*\d*.\d*\s*TIME=\s*\d*:\d*:\d*\r\s*Use the SLIST command to list section data for element\s*\d*.\s*Section\s*\r\s*data overrides the real constant data.|*** NOTE ***\s*CP =\s*\d*.\d*\s*TIME=\s*\d*:\d*:\d*\r\s*Use the SLIST command to list section data for element\s*\d*.\s*Section data\s*\r\s*overrides the real constant data. '; - tbl_hdrs = regexp(filecontents,pat) - + data = cell(1, NCOLS) + pat = "NODE\s*FX\s*FY\s*FZ\s*MX\s*MY\s*MZ\s*" + # pat='*** NOTE ***\s*CP =\s*\d*.\d*\s*TIME=\s*\d*:\d*:\d*\r\s*Use the SLIST command to list section data for element\s*\d*.\s*Section\s*\r\s*data overrides the real constant data.|*** NOTE ***\s*CP =\s*\d*.\d*\s*TIME=\s*\d*:\d*:\d*\r\s*Use the SLIST command to list section data for element\s*\d*.\s*Section data\s*\r\s*overrides the real constant data. '; + tbl_hdrs = regexp(filecontents, pat) + tbl_hdrs[end() + 1] = np.asarray(filecontents).size - - #assignin('base','tbl_hdrs',tbl_hdrs); #debugging - for kTbl in np.arange(1,np.asarray(tbl_hdrs).size - 1+1).reshape(-1): - tbl = filecontents(np.arange(tbl_hdrs(kTbl),tbl_hdrs(kTbl + 1) - 1+1)) - tbl = regexprep(tbl,pat,'') - data = np.array([[data],[np.loadtxt(tbl,np.matlib.repmat(' %f',1,NCOLS))]]) - + + # assignin('base','tbl_hdrs',tbl_hdrs); #debugging + for kTbl in np.arange(1, np.asarray(tbl_hdrs).size - 1 + 1).reshape(-1): + tbl = filecontents(np.arange(tbl_hdrs(kTbl), tbl_hdrs(kTbl + 1) - 1 + 1)) + tbl = regexprep(tbl, pat, "") + data = np.array([[data], [np.loadtxt(tbl, np.matlib.repmat(" %f", 1, NCOLS))]]) + data = cell2mat(data) # only columns 1,2,6 are needed now # Note: np.loadtxt can skip fields by using #*f in place of #f on the # columns to skip - #data = data(:,[1 2 6]); - + # data = data(:,[1 2 6]); + return data -def readANSYSoutputs(filename = None,ncol = None): - fid = open(filename,'r') - if (fid == - 1): - raise Exception('Could not open file "%s"',filename) - - format = '%f' * ncol - +def readANSYSoutputs(filename=None, ncol=None): + fid = open(filename, "r") + if fid == -1: + raise Exception('Could not open file "%s"', filename) + + format = "%f" * ncol + file = fileread(filename) - - lines = strsplit(file,'\n') - + + lines = strsplit(file, "\n") + imax = len(lines) - - tempdata = np.zeros((imax,ncol)) - - #have been read, then the number of lines with numeric data will be known. - #The remaining zeros will be truncated. - + + tempdata = np.zeros((imax, ncol)) + + # have been read, then the number of lines with numeric data will be known. + # The remaining zeros will be truncated. + ct = 0 - + for i in range(imax): tline = fgetl(fid) try: - a = np.loadtxt(tline,format) + a = np.loadtxt(tline, format) b = cell2mat(a) # in that line. - if not len(b)==0 : + if not len(b) == 0: ct = ct + 1 - tempdata[ct,:] = b + tempdata[ct, :] = b finally: pass # disp(tline) # if ischar(tline) # ct=ct+1 # end - #data=[data; np.loadtxt(tline,'#f #f #f #f')]; - + # data=[data; np.loadtxt(tline,'#f #f #f #f')]; + fid.close() - data = tempdata[0:ct+1,:] - + data = tempdata[0 : ct + 1, :] + return data -def readANSYSStrains(filename, flag): + +def readANSYSStrains(filename, flag): # readANSYSStrains Read an ANSYS POST1 element table listing. # ********************************************************************** # * Part of the SNL NuMAD Toolbox * @@ -379,11 +381,11 @@ def readANSYSStrains(filename, flag): # where FILENAME is file name string, default 'Strains.txt' # and flag is either NODE or ELEM # DATA is 7-column matrix [ELEM, EPELX, EPELY, EPELZ, EPELXY, EPELYZ, EPELXZ] - + # hard-code filename if not specified if filename is None: - filename = 'Strains.txt' - + filename = "Strains.txt" + # # user select filename if not specified # if ~exist('filename','var') || isempty(filename) # [fn,pn] = uigetfile( ... @@ -396,29 +398,29 @@ def readANSYSStrains(filename, flag): # end # filename = fullfile(pn,fn); # end - + # Open the file and read the entire contents - with open(filename, 'rb') as fid: - if (fid == - 1): - raise Exception('Could not open file "%s"',filename) + with open(filename, "rb") as fid: + if fid == -1: + raise Exception('Could not open file "%s"', filename) # filecontents = np.transpose(fread(fid,inf,'uint8=>char')) filecontents = fid.read() - #assignin('base','filecontents',filecontents); #debugging - + # assignin('base','filecontents',filecontents); #debugging + # process the tables NCOLS = 7 data = [] - pat = flag+'\s*EPELX\s*EPELY\s*EPELZ\s*EPELXY\s*EPELYZ\s*EPELXZ\s*' - tbl_hdrs = regexp(filecontents,pat) - + pat = flag + "\s*EPELX\s*EPELY\s*EPELZ\s*EPELXY\s*EPELYZ\s*EPELXZ\s*" + tbl_hdrs = regexp(filecontents, pat) + tbl_hdrs[end() + 1] = np.asarray(filecontents).size - - #assignin('base','tbl_hdrs',tbl_hdrs); #debugging - for kTbl in range(tbl_hdrs.size-1): - tbl = filecontents(np.arange(tbl_hdrs(kTbl),tbl_hdrs(kTbl + 1) - 1+1)) - tbl = regexprep(tbl,pat,'') - data.append([np.loadtxt(tbl,np.matlib.repmat(' %f',1,NCOLS))]) - + + # assignin('base','tbl_hdrs',tbl_hdrs); #debugging + for kTbl in range(tbl_hdrs.size - 1): + tbl = filecontents(np.arange(tbl_hdrs(kTbl), tbl_hdrs(kTbl + 1) - 1 + 1)) + tbl = regexprep(tbl, pat, "") + data.append([np.loadtxt(tbl, np.matlib.repmat(" %f", 1, NCOLS))]) + data = cell2mat(data) - return data \ No newline at end of file + return data diff --git a/src/pynumad/analysis/ansys/utility.py b/src/pynumad/analysis/ansys/utility.py index eff0a97..2afbee7 100644 --- a/src/pynumad/analysis/ansys/utility.py +++ b/src/pynumad/analysis/ansys/utility.py @@ -9,9 +9,9 @@ from pynumad.utils.interpolation import * -def getMatrialLayerInfoWithOutGUI(blade): - #Temparary workaround to extract data needed for: - #From write_shell7.m +def getMatrialLayerInfoWithOutGUI(blade): + # Temparary workaround to extract data needed for: + # From write_shell7.m TotalStations = blade.station.size TotalShearwebs = blade.shearweb.size skin_areas = [] @@ -19,76 +19,86 @@ def getMatrialLayerInfoWithOutGUI(blade): stationIB = blade.station(kStation) stationOB = blade.station(kStation + 1) for kdp in range(stationIB.size - 1): - if np.array(['single','double']) == stationIB.dptype[kdp]: + if np.array(["single", "double"]) == stationIB.dptype[kdp]: # single and double are equivalent on the area inboard edge # start and end are current and next DP skin_areas[kStation].startIB[-1] = kdp skin_areas[kStation].endIB[-1] = kdp + 1 skin_areas[kStation].Material[-1] = stationIB.sm[kdp] else: - if np.array(['flare','hourglass']) == stationIB.dptype[kdp]: + if np.array(["flare", "hourglass"]) == stationIB.dptype[kdp]: # flare and hourglass are equivalent on the area inboard edge # start and end of first area is current DP # start and end of next area is current and next DP - skin_areas[kStation].startIB[end() + [np.arange[1,2+1]]] = np.array([kdp,kdp]) - skin_areas[kStation].endIB[end() + [np.arange[1,2+1]]] = np.array([kdp,kdp + 1]) + skin_areas[kStation].startIB[ + end() + [np.arange[1, 2 + 1]] + ] = np.array([kdp, kdp]) + skin_areas[kStation].endIB[ + end() + [np.arange[1, 2 + 1]] + ] = np.array([kdp, kdp + 1]) skin_areas[kStation].Material[end() + 1] = stationIB.dpmaterial[kdp] skin_areas[kStation].Material[end() + 1] = stationIB.sm[kdp] - for kdp in range(stationOB.dp.size-1): - if np.array(['single','flare']) == stationOB.dptype[kdp]: + for kdp in range(stationOB.dp.size - 1): + if np.array(["single", "flare"]) == stationOB.dptype[kdp]: # single and flare are equivalent on the area outboard edge # start and end are current and next DP skin_areas[kStation].startOB[end() + 1] = kdp skin_areas[kStation].endOB[end() + 1] = kdp + 1 else: - if np.array(['double','hourglass']) == stationOB.dptype[kdp]: + if np.array(["double", "hourglass"]) == stationOB.dptype[kdp]: # double and hourglass are equivalent on the area outboard edge # start and end of first area is current DP # start and end of next area is current and next DP - skin_areas[kStation].startOB[end() + [np.arange[1,2+1]]] = np.array([kdp,kdp]) - skin_areas[kStation].endOB[end() + [np.arange[1,2+1]]] = np.array([kdp,kdp + 1]) - - #tcl: Determine which composite materials are used in the model + skin_areas[kStation].startOB[ + end() + [np.arange[1, 2 + 1]] + ] = np.array([kdp, kdp]) + skin_areas[kStation].endOB[ + end() + [np.arange[1, 2 + 1]] + ] = np.array([kdp, kdp + 1]) + + # tcl: Determine which composite materials are used in the model compsInModel = np.array([]) - #tcl: search shear web materials + # tcl: search shear web materials for k in range(TotalShearwebs): - compsInModel = np.array([[compsInModel],[np.array([blade.shearweb[k].Material])]]) - - #tcl: search skin materials + compsInModel = np.array( + [[compsInModel], [np.array([blade.shearweb[k].Material])]] + ) + + # tcl: search skin materials for k in range(skin_areas.size): - compsInModel = np.array([[compsInModel],[transpose(skin_areas[k].Material)]]) - + compsInModel = np.array([[compsInModel], [transpose(skin_areas[k].Material)]]) + compsInModel = np.unique(compsInModel) # load the material database and create searchable list - if not len(blade.settings.job_name)==0 : + if not len(blade.settings.job_name) == 0: # job name exists, use the local material database - blade.matdb_path = fullfile(blade.settings.job_path,'MatDBsi.txt') + blade.matdb_path = fullfile(blade.settings.job_path, "MatDBsi.txt") else: # if no job name, use the master material database # jcb: we shouldn't arrive here because a file save is required first - blade.matdb_path = fullfile(blade.numadpath,'MatDBsi.txt') - + blade.matdb_path = fullfile(blade.numadpath, "MatDBsi.txt") + blade.matdb = readMatDB(blade.matdb_path) for k in range(blade.matdb.size): blade.matlist[k] = blade.matdb[k].name mattype[k] = blade.matdb[k].type - - blade.isotropic = str('isotropic') == str(mattype) - blade.orthotropic = str('orthotropic') == str(mattype) - blade.composite = str('composite') == str(mattype) + + blade.isotropic = str("isotropic") == str(mattype) + blade.orthotropic = str("orthotropic") == str(mattype) + blade.composite = str("composite") == str(mattype) # Determine which isotropic and orthotropic materials are used in the model isoorthoInModel = np.array([]) for kcomp in range(compsInModel.size): n = str(compsInModel(kcomp)) == str(blade.matlist) - if not np.any(n) : - raise Exception('Material "%s" not found in database.',compsInModel[kcomp]) + if not np.any(n): + raise Exception('Material "%s" not found in database.', compsInModel[kcomp]) mat = blade.matdb[n] layerNames = [] for klay in range(mat.layer.size): layerNames.bladeend(np.array([mat.layer(klay).layerName])) - isoorthoInModel = np.unique(np.array([[isoorthoInModel],[layerNames]])) - - return isoorthoInModel,compsInModel,skin_areas,blade + isoorthoInModel = np.unique(np.array([[isoorthoInModel], [layerNames]])) + + return isoorthoInModel, compsInModel, skin_areas, blade def txt2mat(filename): @@ -109,267 +119,346 @@ def txt2mat(filename): rows = [] for line in lines: line = line.strip() - nums = line.split(' ') - nums = list(filter(''.__ne__,nums)) - nums = list(map(float,nums)) + nums = line.split(" ") + nums = list(filter("".__ne__, nums)) + nums = list(map(float, nums)) row = np.array(nums) rows.append(row) mat = np.array(rows) return mat -def postprocessANSYSfatigue(blade, meshData, wt, rccdata, IEC, loadsTable, config): - if np.any('all' in config.analysisFlags.fatigue.lower()): # NOTE probably need to workshop this -kb +def postprocessANSYSfatigue(blade, meshData, wt, rccdata, IEC, loadsTable, config): + if np.any( + "all" in config.analysisFlags.fatigue.lower() + ): # NOTE probably need to workshop this -kb nSegments = 1 else: nSegments = np.asarray(config.analysisFlags.fatigue).size - + # Order of the segment names in segmentNamesReference # is very important. config.analysisFlags.fatigue can # be any order - segmentNamesReference = ['HP_TE_FLAT','HP_TE_ReINF','HP_TE_PANEL','HP_SPAR','HP_LE_PANEL','HP_LE','LP_LE','LP_LE_PANEL','LP_SPAR','LP_TE_PANEL','LP_TE_REINF','LP_TE_FLAT'] + segmentNamesReference = [ + "HP_TE_FLAT", + "HP_TE_ReINF", + "HP_TE_PANEL", + "HP_SPAR", + "HP_LE_PANEL", + "HP_LE", + "LP_LE", + "LP_LE_PANEL", + "LP_SPAR", + "LP_TE_PANEL", + "LP_TE_REINF", + "LP_TE_FLAT", + ] nsegmentNamesReference = len(segmentNamesReference) markovSize = 16 designVar = np.array([]) - + Yr = IEC.designLife - #fst=readFastMain(['IEC_' IEC.fstfn '.fst']); - #simtime=IEC.numSeeds*(fst.SimCtrl.TMax-IEC.delay); # simulated and rainflow counted time, seconds + # fst=readFastMain(['IEC_' IEC.fstfn '.fst']); + # simtime=IEC.numSeeds*(fst.SimCtrl.TMax-IEC.delay); # simulated and rainflow counted time, seconds simtime = IEC.numSeeds * (IEC.SimTime - IEC.delay) nSpace = 90 / loadsTable[2].theta - + nDirections = len(loadsTable) - - for kTheta in range(nDirections / 2+1): - #since blade movements along single direction constitues - #two directions (e.g positive flap deflections and negative - #ones are two directions; both wich make - #the flap cycles) + + for kTheta in range(nDirections / 2 + 1): + # since blade movements along single direction constitues + # two directions (e.g positive flap deflections and negative + # ones are two directions; both wich make + # the flap cycles) loadsTableTheta = loadsTable[kTheta] theta = loadsTableTheta.theta loadsTableThetaPlus90 = loadsTable[kTheta + nSpace] nGage = loadsTableTheta.input.rGagesize gageNumber = np.transpose((np.arange(nGage))) - criticalElement,fatigueDamage,criticalLayerNo,criticalMatNo = deal(np.zeros((nGage,1))) - criticalMat = cell(nGage,1) + criticalElement, fatigueDamage, criticalLayerNo, criticalMatNo = deal( + np.zeros((nGage, 1)) + ) + criticalMat = cell(nGage, 1) rGage = loadsTableTheta.input.rGage MrTheta = loadsTableTheta.input.Mrb MrThetaPlus90 = loadsTableThetaPlus90.input.Mrb plotFatigue = [] - fileNameTheta = 'plateStrains-all-'+str(kTheta)+'.txt' - fileNameThetaPlus90 = 'plateStrains-all-'+str(kTheta + nSpace)+'.txt' + fileNameTheta = "plateStrains-all-" + str(kTheta) + ".txt" + fileNameThetaPlus90 = "plateStrains-all-" + str(kTheta + nSpace) + ".txt" print(fileNameTheta) print(fileNameThetaPlus90) - #Used for reading element stresses - pat = 'ELEM\s*ZCENT\s*EPS11\s*EPS22\s*EPS12\s*KAPA11\s*KAPA22\s*KAPA12\s*GAMMA13\s*GAMMA23' + # Used for reading element stresses + pat = "ELEM\s*ZCENT\s*EPS11\s*EPS22\s*EPS12\s*KAPA11\s*KAPA22\s*KAPA12\s*GAMMA13\s*GAMMA23" NCOLS = 10 - plateStrainsTheta = readANSYSElementTable(fileNameTheta,pat,NCOLS) - plateStrainsThetaPlus90 = readANSYSElementTable(fileNameThetaPlus90,pat,NCOLS) + plateStrainsTheta = readANSYSElementTable(fileNameTheta, pat, NCOLS) + plateStrainsThetaPlus90 = readANSYSElementTable(fileNameThetaPlus90, pat, NCOLS) for i in range(nSegments): - iSegment = np.where(segmentNamesReference == config.analysisFlags.fatigue[i]) - if np.any('all' in config.analysisFlags.fatigue.lower()): - title = 'All segments' + iSegment = np.where( + segmentNamesReference == config.analysisFlags.fatigue[i] + ) + if np.any("all" in config.analysisFlags.fatigue.lower()): + title = "All segments" else: - if not 'webs' == config.analysisFlags.fatigue[i] : + if not "webs" == config.analysisFlags.fatigue[i]: title = config.analysisFlags.fatigue[i] - __,nSpanRegions = meshData.outerShellElSets.shape + __, nSpanRegions = meshData.outerShellElSets.shape elementList = [] for iSpan in range(nSpanRegions): - elementList = [elementList,meshData.outerShellElSets[iSegment,iSpan].elementList] + elementList = [ + elementList, + meshData.outerShellElSets[iSegment, iSpan].elementList, + ] else: - title = 'Webs' - __,nWebs = meshData.shearWebElSets.shape + title = "Webs" + __, nWebs = meshData.shearWebElSets.shape elementList = [] for iWeb in range(nWebs): - __,nSpanRegions = meshData.shearWebElSets[iWeb].shape + __, nSpanRegions = meshData.shearWebElSets[iWeb].shape for iSpan in range(nSpanRegions): - elementList = np.array([elementList,meshData.shearWebElSets[iWeb][iSpan].elementList]) - plateStrainsThetaSet = plateStrainsTheta[elementList,:] - plateStrainsThetaPlus90Set = plateStrainsThetaPlus90[elementList,:] + elementList = np.array( + [ + elementList, + meshData.shearWebElSets[iWeb][iSpan].elementList, + ] + ) + plateStrainsThetaSet = plateStrainsTheta[elementList, :] + plateStrainsThetaPlus90Set = plateStrainsThetaPlus90[elementList, :] for chSpan in range(nGage): direction = str(theta) - Ltheta = getMomentMarkov(rccdata,wt,Yr,simtime,markovSize,chSpan,direction) + Ltheta = getMomentMarkov( + rccdata, wt, Yr, simtime, markovSize, chSpan, direction + ) if theta + 90 < 180: direction = str(theta + 90) else: direction = str(theta - 90) - LthetaPlus90 = getMomentMarkov(rccdata,wt,Yr,simtime,markovSize,chSpan,direction) - Mtheta = interpolator_wrap(rGage,MrTheta,rGage[chSpan]) - MthetaPlus90 = interpolator_wrap(rGage,MrThetaPlus90,rGage[chSpan]) + LthetaPlus90 = getMomentMarkov( + rccdata, wt, Yr, simtime, markovSize, chSpan, direction + ) + Mtheta = interpolator_wrap(rGage, MrTheta, rGage[chSpan]) + MthetaPlus90 = interpolator_wrap(rGage, MrThetaPlus90, rGage[chSpan]) zwidth = 0.75 # at a blade gage location. z1 = rGage[chSpan] - zwidth / 2 z2 = rGage[chSpan] + zwidth / 2 - binnedElements = np.intersect(np.where(plateStrainsThetaSet[:,1] < z2),np.where(plateStrainsThetaSet[:,1] > z1)) - fdData,plotFatigueChSpan = calcFatigue(blade,meshData,IEC,Ltheta,LthetaPlus90,Mtheta,MthetaPlus90,binnedElements,plateStrainsThetaSet,plateStrainsThetaPlus90Set,iSegment) - plotFatigue = np.array([[plotFatigue],[plotFatigueChSpan]]) + binnedElements = np.intersect( + np.where(plateStrainsThetaSet[:, 1] < z2), + np.where(plateStrainsThetaSet[:, 1] > z1), + ) + fdData, plotFatigueChSpan = calcFatigue( + blade, + meshData, + IEC, + Ltheta, + LthetaPlus90, + Mtheta, + MthetaPlus90, + binnedElements, + plateStrainsThetaSet, + plateStrainsThetaPlus90Set, + iSegment, + ) + plotFatigue = np.array([[plotFatigue], [plotFatigueChSpan]]) criticalElement[chSpan] = fdData[0] fatigueDamage[chSpan] = fdData[1] criticalLayerNo[chSpan] = fdData[4] criticalMatNo[chSpan] = fdData[7] criticalMat[chSpan] = blade.materials(fdData(8)).name # plotFatigueFileName=['plotFatigue-' str(kTheta)]; -# writePlotFatigue(plotFatigueFileName,plotFatigue) - print('\n\n\n ************************ Segment No-%i: %s ************************\n' % (i,title)) - table(gageNumber,criticalElement,fatigueDamage,criticalLayerNo,criticalMatNo,criticalMat) + # writePlotFatigue(plotFatigueFileName,plotFatigue) + print( + "\n\n\n ************************ Segment No-%i: %s ************************\n" + % (i, title) + ) + table( + gageNumber, + criticalElement, + fatigueDamage, + criticalLayerNo, + criticalMatNo, + criticalMat, + ) # designVar{end+1}=max(fatigueDamage); - designVar[kTheta].fatigueDamage[i,:] = fatigueDamage - designVar[kTheta].criticalElement[i,:] = criticalElement - designVar[kTheta].criticalLayerNo[i,:] = criticalLayerNo - designVar[kTheta].criticalMatNo[i,:] = criticalMatNo - - #delete stresses-*-*.txt; + designVar[kTheta].fatigueDamage[i, :] = fatigueDamage + designVar[kTheta].criticalElement[i, :] = criticalElement + designVar[kTheta].criticalLayerNo[i, :] = criticalLayerNo + designVar[kTheta].criticalMatNo[i, :] = criticalMatNo + + # delete stresses-*-*.txt; return designVar - -def getWindSpeedDistribution(avgws): - scipy.io.loadmat('rccdata.mat','rccdata') + +def getWindSpeedDistribution(avgws): + scipy.io.loadmat("rccdata.mat", "rccdata") # determine the wind speeds that are saved in rccdata for w in range(rccdata.shape[1]): - ws[w] = rccdata[1,w].windspeed - + ws[w] = rccdata[1, w].windspeed + # check to make sure wind speeds are spaced evenly if std(np.diff(ws)) != 0: print(ws) - raise Exception('Your windspeeds are not spaced evenly') - + raise Exception("Your windspeeds are not spaced evenly") + # define bin edges based on wind speeds in rccdata binwidth = ws[1] - ws[0] - binedges = np.array([ws[0] - binwidth / 2,ws + binwidth / 2]) + binedges = np.array([ws[0] - binwidth / 2, ws + binwidth / 2]) # define wind bins - windbins = np.zeros((len(binedges),2)) - for jj in range(len(binedges)-1): - windbins[jj,:] = np.array([binedges[jj],binedges[jj + 1]]) - + windbins = np.zeros((len(binedges), 2)) + for jj in range(len(binedges) - 1): + windbins[jj, :] = np.array([binedges[jj], binedges[jj + 1]]) + # Calculate weights for each bin according to Rayleigh distribution sig = avgws / np.sqrt(np.pi / 2) # find PDF and CDF of Rayleigh distribution # pdf=windbins.*exp(-windbins.^2/(2*sig^2))/sig^2; - cdf = 1 - np.exp(- windbins ** 2 / (2 * sig ** 2)) + cdf = 1 - np.exp(-(windbins**2) / (2 * sig**2)) # calculate weights - wt = np.diff(cdf,1,2) + wt = np.diff(cdf, 1, 2) # show sum of weights (should be close to one, and not greater than one) - print(' ') - print('Sum of the Rayleigh weights is ', sum(wt)) - print(' ') + print(" ") + print("Sum of the Rayleigh weights is ", sum(wt)) + print(" ") # check to make sure that the number of weights is equal to the number of # simulated wind speeds in rccdata if len(wt) != rccdata.shape[1]: - raise Exception('The number of Rayleigh weights is not equal to the number of wind speeds contained in the rccdata.mat file') - - return wt,rccdata - - -def getLoadFactorsForElementsWithSameSection(LF, ansysSecNumber, avgFaceStress,app, mat, coreMatName): - #This is a recursive function for LF. It appends to the list of LF for each - #element that has a positive LF. EC - m,__ = avgFaceStress.shape - for i in np.arange(1,m+1).reshape(-1): - elno = avgFaceStress(i,1) - S11a = avgFaceStress(i,2) - S22a = avgFaceStress(i,3) - S12a = avgFaceStress(i,7) - #Ignoring other stresses for the time being. - #if elno==305 - #disp('press pause') - #pause(10) - lf,phicr = checkWrinkle(np.array([[S11a],[S22a],[S12a]]),mat,app,coreMatName) + raise Exception( + "The number of Rayleigh weights is not equal to the number of wind speeds contained in the rccdata.mat file" + ) + + return wt, rccdata + + +def getLoadFactorsForElementsWithSameSection( + LF, ansysSecNumber, avgFaceStress, app, mat, coreMatName +): + # This is a recursive function for LF. It appends to the list of LF for each + # element that has a positive LF. EC + m, __ = avgFaceStress.shape + for i in np.arange(1, m + 1).reshape(-1): + elno = avgFaceStress(i, 1) + S11a = avgFaceStress(i, 2) + S22a = avgFaceStress(i, 3) + S12a = avgFaceStress(i, 7) + # Ignoring other stresses for the time being. + # if elno==305 + # disp('press pause') + # pause(10) + lf, phicr = checkWrinkle( + np.array([[S11a], [S22a], [S12a]]), mat, app, coreMatName + ) if lf >= 0: - LF = np.array([[LF],[ansysSecNumber,elno,lf,phicr]]) - #end - - - def checkWrinkle(S_alphaBeta, mat, app, coreMatName): + LF = np.array([[LF], [ansysSecNumber, elno, lf, phicr]]) + # end + + def checkWrinkle(S_alphaBeta, mat, app, coreMatName): # For a single finite element, given the average in-plane stresses # in a face-sheet of that element, compute the load factor for that # element. EC - + # lf - scalar load factor for the element # phicr - an angle, degrees. The direction of wrinkling # S11a,S22a,S12a - respective average face sheet stress # mat - material object # app - blade data - - #Locate the face sheet + + # Locate the face sheet cellMat = np.array([]) for i in range(len(mat.layer)): - cellMat = np.array([[cellMat],[np.array([mat.layer[i].layerName])]]) - + cellMat = np.array([[cellMat], [np.array([mat.layer[i].layerName])]]) + kbalsa = np.find(str(coreMatName) == str(cellMat)) - iLayer = np.arange(1,(kbalsa - 1)+1) - - #ilayer=(kbalsa+1):numel(cellMat)); #Number of distinct materials in the bottom face + iLayer = np.arange(1, (kbalsa - 1) + 1) + + # ilayer=(kbalsa+1):numel(cellMat)); #Number of distinct materials in the bottom face matCore = app.matdb(np.find(str(coreMatName) == str(app.matlist))) - if str(matCore.type) == str('orthotropic'): + if str(matCore.type) == str("orthotropic"): Ec = matCore.ez else: - if str(matCore.type) == str('isotropic'): + if str(matCore.type) == str("isotropic"): Ec = matCore.ex Gc = Ec / (2 * (1 + matCore.nuxy)) else: - print('Material "%s" not found in database.',matCore.type) - raise Exception('Material type "%s" not found in database.',matCore.type) - - #ilayer=(kbalsa+1):numel(cellMat)); #Number of distinct materials in the bottom face + print('Material "%s" not found in database.', matCore.type) + raise Exception( + 'Material type "%s" not found in database.', matCore.type + ) + + # ilayer=(kbalsa+1):numel(cellMat)); #Number of distinct materials in the bottom face dangle = 2 - + N = 180 / dangle + 1 - + angle = 0 - invLF = np.zeros((N,1)) - #Apt=zeros(3,3); - #Bpt=zeros(3,3); - Dpt = np.zeros((3,3)) - #Find total height of facesheet + invLF = np.zeros((N, 1)) + # Apt=zeros(3,3); + # Bpt=zeros(3,3); + Dpt = np.zeros((3, 3)) + # Find total height of facesheet h = 0 for klay in range(iLayer.size): - h = h + mat.layer(iLayer[klay]).thicknessA * mat.layer(iLayer[klay]).quantity - + h = ( + h + + mat.layer(iLayer[klay]).thicknessA * mat.layer(iLayer[klay]).quantity + ) + for kang in range(N): - if str(matCore.type) == str('orthotropic'): - Gc = 1 / (np.sin(np.pi/180*angle) ** 2 * (1 / matCore.gyz) + np.cos(np.pi/180*angle) ** 2 * (1 / matCore.gxz)) - #Asssuming all ply angles are zero + if str(matCore.type) == str("orthotropic"): + Gc = 1 / ( + np.sin(np.pi / 180 * angle) ** 2 * (1 / matCore.gyz) + + np.cos(np.pi / 180 * angle) ** 2 * (1 / matCore.gxz) + ) + # Asssuming all ply angles are zero # R_sig=[cosd(angle)^2, sind(angle)^2, -2*sind(angle)*cosd(angle); #In-plate clockwise rotation of: angle # sind(angle)^2, cosd(angle)^2, 2*sind(angle)*cosd(angle) # sind(angle)*cosd(angle), -sind(angle)*cosd(angle), cosd(angle)^2-sind(angle)^2]; - z1 = - h / 2 + z1 = -h / 2 for klay in range(iLayer.size): - z2 = z1 + mat.layer(iLayer(klay)).thicknessA * mat.layer(iLayer(klay)).quantity - matklay = app.matdb(np.find(str(mat.layer(iLayer(klay)).layerName) == str(app.matlist))) + z2 = ( + z1 + + mat.layer(iLayer(klay)).thicknessA + * mat.layer(iLayer(klay)).quantity + ) + matklay = app.matdb( + np.find(str(mat.layer(iLayer(klay)).layerName) == str(app.matlist)) + ) # Bulid Plane Stress reduced compliance matrix for each # layer - #fprintf('z1 = #f z2 = #f mat = #f #s\n',z1,z2,matListnumber,mat.layer(klay).layerName) - #Entries common to either isotropic or orthotropic entries - Se = np.zeros((3,3)) - Se[1,1] = 1 / matklay.ex - Se[1,3] = 0 - Se[2,3] = 0 - Se[3,1] = 0 - Se[3,2] = 0 - if str(matklay.type) == str('orthotropic'): - Se[1,2] = - matklay.prxy / matklay.ex - Se[2,1] = - matklay.prxy / matklay.ex - Se[2,2] = 1 / matklay.ey - Se[3,3] = 1 / matklay.gxy + # fprintf('z1 = #f z2 = #f mat = #f #s\n',z1,z2,matListnumber,mat.layer(klay).layerName) + # Entries common to either isotropic or orthotropic entries + Se = np.zeros((3, 3)) + Se[1, 1] = 1 / matklay.ex + Se[1, 3] = 0 + Se[2, 3] = 0 + Se[3, 1] = 0 + Se[3, 2] = 0 + if str(matklay.type) == str("orthotropic"): + Se[1, 2] = -matklay.prxy / matklay.ex + Se[2, 1] = -matklay.prxy / matklay.ex + Se[2, 2] = 1 / matklay.ey + Se[3, 3] = 1 / matklay.gxy else: - if str(matklay.type) == str('isotropic'): - Se[1,2] = - matklay.nuxy / matklay.ex - Se[2,1] = - matklay.nuxy / matklay.ex - Se[2,2] = 1 / matklay.ex - Se[3,3] = 2 * (1 + matklay.nuxy) / matklay.ex + if str(matklay.type) == str("isotropic"): + Se[1, 2] = -matklay.nuxy / matklay.ex + Se[2, 1] = -matklay.nuxy / matklay.ex + Se[2, 2] = 1 / matklay.ex + Se[3, 3] = 2 * (1 + matklay.nuxy) / matklay.ex else: - print('Material "%s" not found in database.',matklay.type) - raise Exception('Material type "%s" not found in database.',matklay.type) - #Apt=Apt+R_sig*inv(Se)*R_sig'*(z2-z1); - #Bpt=Bpt+1/2*R_sig*inv(Se)*R_sig'*(z2^2-z1^2); - Dpt = Dpt + 1 / 3 * R_sig * inv(Se) * np.transpose(R_sig) * (z2 ** 3 - z1 ** 3) + print('Material "%s" not found in database.', matklay.type) + raise Exception( + 'Material type "%s" not found in database.', matklay.type + ) + # Apt=Apt+R_sig*inv(Se)*R_sig'*(z2-z1); + # Bpt=Bpt+1/2*R_sig*inv(Se)*R_sig'*(z2^2-z1^2); + Dpt = Dpt + 1 / 3 * R_sig * inv(Se) * np.transpose(R_sig) * ( + z2**3 - z1**3 + ) z1 = z2 - Pcr = - 3 / 2 * (2 * Dpt[0,0] * Ec * Gc) ** (1 / 3) - Pphi = (R_sig[0,:] * S_alphaBeta) * h + Pcr = -3 / 2 * (2 * Dpt[0, 0] * Ec * Gc) ** (1 / 3) + Pphi = (R_sig[0, :] * S_alphaBeta) * h invLF[kang] = Pphi / Pcr angle = angle + dangle - - invlf,phicr_i = np.amax(invLF) + + invlf, phicr_i = np.amax(invLF) lf = 1 / invlf phicr = (phicr_i - 1) * dangle # if lf>1e6 @@ -378,7 +467,7 @@ def checkWrinkle(S_alphaBeta, mat, app, coreMatName): # xlabel('Angle, \phi [deg]') # ylabel('Load [N/m]') # hold on; - + # plot(angle, Pphi,'r') # plot(angle, lf*Pphi,'b') # legend('P_c_r','P_\phi',strcat(num2str(lf),'P_\phi')) @@ -395,23 +484,21 @@ def checkWrinkle(S_alphaBeta, mat, app, coreMatName): # else # #fprintf('\n #8.2g #i #8.2g #8.2g #8.2g #8.2g #8.2g\n',lf,ansysSecNumber,Pcr(phicr_i),Pphi(phicr_i),S11a,S22a,S12a) # end - return lf,phicr - + return lf, phicr + return LF - -def read_forces(filename): + +def read_forces(filename): # Open the file fid = open(filename) - if (fid == - 1): - raise Exception('Could not open file "%s"',filename) - + if fid == -1: + raise Exception('Could not open file "%s"', filename) + header = fid.readline() - + filecontents = np.transpose(fid.read()) fid.close() # 'Z (m) Fx (N) Fy (N) M (N-m) Alpha x_off y_off' - forces = textscan(filecontents,np.matlib.repmat('%f',1,7)) + forces = textscan(filecontents, np.matlib.repmat("%f", 1, 7)) return forces - - diff --git a/src/pynumad/analysis/ansys/write.py b/src/pynumad/analysis/ansys/write.py index 56f4522..7228f1c 100644 --- a/src/pynumad/analysis/ansys/write.py +++ b/src/pynumad/analysis/ansys/write.py @@ -11,640 +11,729 @@ from pynumad.analysis.ansys.beamforce import * from pynumad.analysis.ansys.read import readANSYSoutputs -from pynumad.analysis.ansys.utility import txt2mat, getLoadFactorsForElementsWithSameSection,\ - getMatrialLayerInfoWithOutGUI +from pynumad.analysis.ansys.utility import ( + txt2mat, + getLoadFactorsForElementsWithSameSection, + getMatrialLayerInfoWithOutGUI, +) -def writeAnsysDeflections(blade, config, iLoad, fid, deflectionFilename): + +def writeAnsysDeflections(blade, config, iLoad, fid, deflectionFilename): # Outer AeroShell - nStationLayups,nStations = blade.stacks.shape - maxSectionNumber = int(str(nStations)+str(nStationLayups)) - + nStationLayups, nStations = blade.stacks.shape + maxSectionNumber = int(str(nStations) + str(nStationLayups)) + # The following two lines help make unique IDs for web sections # based on the highes section already defined for aeroshell orderOfMagnitude = int(np.floor(np.log10(maxSectionNumber))) - webSectionIDstart = np.ceil(maxSectionNumber / 10 ** orderOfMagnitude) * 10 ** orderOfMagnitude - fid.write('/POST1\n' % ()) - fid.write('set,last\n' % ()) - fid.write('RSYS,0\n' % ()) - - fid.write('seltol,0.05\n' % ()) + webSectionIDstart = ( + np.ceil(maxSectionNumber / 10**orderOfMagnitude) * 10**orderOfMagnitude + ) + fid.write("/POST1\n" % ()) + fid.write("set,last\n" % ()) + fid.write("RSYS,0\n" % ()) + + fid.write("seltol,0.05\n" % ()) for i in range(blade.ispan.size): - fid.write('*CFOPEN, %s,out\n' % (deflectionFilename+'-'+str(i))) - fid.write('ESEL,S,SEC,,1,%i \n' % (webSectionIDstart)) - #fprintf(fid,'ESEL,S,SEC,,1,999 \n'); #Selects aero shell only - fid.write('nsle,S, \n' % ()) - fid.write('nsel,r,loc,z,%f \n' % (blade.ispan[i])) - #fprintf(fid,'nsll,s,,\n'); + fid.write("*CFOPEN, %s,out\n" % (deflectionFilename + "-" + str(i))) + fid.write("ESEL,S,SEC,,1,%i \n" % (webSectionIDstart)) + # fprintf(fid,'ESEL,S,SEC,,1,999 \n'); #Selects aero shell only + fid.write("nsle,S, \n" % ()) + fid.write("nsel,r,loc,z,%f \n" % (blade.ispan[i])) + # fprintf(fid,'nsll,s,,\n'); if i == blade.ispan.size: - fid.write('nsel,u,node,,z_master_node_number\n' % ()) - #fprintf(fid,'nplot\n'); - fid.write('*GET, NsectionNodes, NODE,0,COUNT !Get the number of nodes in the set\n' % ()) - fid.write('*GET, node_num, NODE,0,NUM,MIN !Get the smallest number node in the set\n' % ()) - fid.write('*DO, i, 1, NsectionNodes !loop through all nodes in cross section\n' % ()) - fid.write('*GET, xpos, NODE,node_num,loc,X\n' % ()) - fid.write('*GET, ypos, NODE,node_num,loc,Y\n' % ()) - fid.write('*GET, zpos, NODE,node_num,loc,Z\n' % ()) - fid.write('*GET, u1, NODE,node_num,U,X\n' % ()) - fid.write('*GET, u2, NODE,node_num,U,Y\n' % ()) - fid.write('*GET, u3, NODE,node_num,U,Z\n' % ()) - fid.write(' *VWRITE,node_num,xpos,ypos,zpos,u1,u2,u3\n' % ()) - fid.write('(E20.12,E20.12,E20.12,E20.12,E20.12,E20.12,E20.12)\n' % ()) - fid.write('node_num=NDNEXT(node_num) !Get the next higher node number in the set\n' % ()) - fid.write('*ENDDO\n' % ()) - fid.write('*CFCLOS\n' % ()) - fid.write('\n \n \n' % ()) - - fid.write('finish\n' % ()) + fid.write("nsel,u,node,,z_master_node_number\n" % ()) + # fprintf(fid,'nplot\n'); + fid.write( + "*GET, NsectionNodes, NODE,0,COUNT !Get the number of nodes in the set\n" + % () + ) + fid.write( + "*GET, node_num, NODE,0,NUM,MIN !Get the smallest number node in the set\n" + % () + ) + fid.write( + "*DO, i, 1, NsectionNodes !loop through all nodes in cross section\n" + % () + ) + fid.write("*GET, xpos, NODE,node_num,loc,X\n" % ()) + fid.write("*GET, ypos, NODE,node_num,loc,Y\n" % ()) + fid.write("*GET, zpos, NODE,node_num,loc,Z\n" % ()) + fid.write("*GET, u1, NODE,node_num,U,X\n" % ()) + fid.write("*GET, u2, NODE,node_num,U,Y\n" % ()) + fid.write("*GET, u3, NODE,node_num,U,Z\n" % ()) + fid.write(" *VWRITE,node_num,xpos,ypos,zpos,u1,u2,u3\n" % ()) + fid.write("(E20.12,E20.12,E20.12,E20.12,E20.12,E20.12,E20.12)\n" % ()) + fid.write( + "node_num=NDNEXT(node_num) !Get the next higher node number in the set\n" + % () + ) + fid.write("*ENDDO\n" % ()) + fid.write("*CFCLOS\n" % ()) + fid.write("\n \n \n" % ()) + + fid.write("finish\n" % ()) return -def writeAnsysFagerberWrinkling(app, SkinAreas, compsInModel, coreMatName): - #limitingElementData - [ansysSecNumber elno lf phicr] +def writeAnsysFagerberWrinkling(app, SkinAreas, compsInModel, coreMatName): + # limitingElementData - [ansysSecNumber elno lf phicr] TotalStations = app.station.size TotalShearwebs = app.shearweb.size ################# Main loop #1: loop around aero shell. ################# LF = [] - for kStation in range(TotalStations-1): + for kStation in range(TotalStations - 1): for kArea in range(SkinAreas[kStation].startIB.size): - #See if the section contatins Balsa/core material name (i.e find - #the sandwhich panels) + # See if the section contatins Balsa/core material name (i.e find + # the sandwhich panels) n = str(SkinAreas[kStation].Material[kArea]) == str(app.matlist) mat = app.matdb(n) if coreMatName in mat.layer.layerName: - ansysSecNumber = np.where(str(SkinAreas[kStation].Material[kArea]) == str(compsInModel)) - file = 'section-'+str(ansysSecNumber)+'-faceAvgStresses.txt' + ansysSecNumber = np.where( + str(SkinAreas[kStation].Material[kArea]) == str(compsInModel) + ) + file = "section-" + str(ansysSecNumber) + "-faceAvgStresses.txt" avgFaceStress = txt2mat(file) os.delete(file) - LF = getLoadFactorsForElementsWithSameSection(LF,ansysSecNumber,avgFaceStress,app,mat,coreMatName) - + LF = getLoadFactorsForElementsWithSameSection( + LF, ansysSecNumber, avgFaceStress, app, mat, coreMatName + ) + ################# Main loop #2: loop along web. ################# for kShearweb in range(TotalShearwebs): n = str(app.shearweb(kShearweb).Material) == str(app.matlist) mat = app.matdb(n) if coreMatName in mat.layer.layerName: - ansysSecNumber = np.where(str(app.shearweb(kShearweb).Material) == str(compsInModel)) - file = 'section-'+str(ansysSecNumber + 1000)+'-faceAvgStresses.txt' + ansysSecNumber = np.where( + str(app.shearweb(kShearweb).Material) == str(compsInModel) + ) + file = "section-" + str(ansysSecNumber + 1000) + "-faceAvgStresses.txt" avgFaceStress = txt2mat(file) os.delete(file) - LF = getLoadFactorsForElementsWithSameSection(LF,ansysSecNumber + 1000,avgFaceStress,app,mat,coreMatName) - - minLF,index = np.amin(LF[:,2]) - limitingElementData = LF[index,:] - print('\n\n The minimum wrinkling LF is: %f, wrinkle angle: %.2f°' % (minLF,LF[index,3])) - print('\n and occurs in section number %i, element number %i\n, ' % (LF[index,0],LF[index,1])) + LF = getLoadFactorsForElementsWithSameSection( + LF, ansysSecNumber + 1000, avgFaceStress, app, mat, coreMatName + ) + + minLF, index = np.amin(LF[:, 2]) + limitingElementData = LF[index, :] + print( + "\n\n The minimum wrinkling LF is: %f, wrinkle angle: %.2f°" + % (minLF, LF[index, 3]) + ) + print( + "\n and occurs in section number %i, element number %i\n, " + % (LF[index, 0], LF[index, 1]) + ) # [maxLF,index]=max(LF(:,3)); # fprintf('\n\n The maximum LF is: #f, wrinkle angle: #.2f°' ,maxLF, LF(index,4)) # fprintf('\n and occurs in section number #i, element number #i, ',LF(index,1),LF(index,2)) - + return limitingElementData -def writeAnsysFatigue(fid, iLoad): +def writeAnsysFatigue(fid, iLoad): ###################Outputs for fatigue analysis in MATLAB################# - fid.write('! BEGIN FATIGUE SCRIPT\n' % ()) - fid.write('allsel\n' % ()) - fid.write('/prep7\n' % ()) - fid.write('esel,all\n' % ()) - fid.write('allsel\n' % ()) - fid.write('/prep7\n' % ()) - fid.write('esel,all\n' % ()) - fid.write('esel,u,type,,21 \n' % ()) - fid.write('/POST1\n' % ()) - fid.write('set,LAST\n' % ()) - fid.write('RSYS,SOLU\n' % ()) - + fid.write("! BEGIN FATIGUE SCRIPT\n" % ()) + fid.write("allsel\n" % ()) + fid.write("/prep7\n" % ()) + fid.write("esel,all\n" % ()) + fid.write("allsel\n" % ()) + fid.write("/prep7\n" % ()) + fid.write("esel,all\n" % ()) + fid.write("esel,u,type,,21 \n" % ()) + fid.write("/POST1\n" % ()) + fid.write("set,LAST\n" % ()) + fid.write("RSYS,SOLU\n" % ()) + ### Element strains and curvatures ### - fid.write('ALLSEL\n' % ()) - fid.write('ETABLE, zcent,CENT,Z\n' % ()) - fid.write('ETABLE, eps11,SMISC,9 \n' % ()) - fid.write('ETABLE, eps22,SMISC,10 \n' % ()) - fid.write('ETABLE, eps12,SMISC,11 \n' % ()) - fid.write('ETABLE, kapa11,SMISC,12 \n' % ()) - fid.write('ETABLE, kapa22,SMISC,13 \n' % ()) - fid.write('ETABLE, kapa12,SMISC,14 \n' % ()) - fid.write('ETABLE, gamma13,SMISC,15 \n' % ()) - fid.write('ETABLE, gamma23,SMISC,16 \n' % ()) - fid.write('/output,plateStrains-all-%s,txt\n' % (str(iLoad))) - fid.write('PRETAB,zcent,eps11,eps22,eps12,kapa11,kapa22,kapa12,gamma12,gamma13,gamma23\n' % ()) - fid.write('ETABLE,ERAS\n\n' % ()) - fid.write('finish\n' % ()) - fid.write('! END FATIGUE OUTPUT SCRIPT\n' % ()) + fid.write("ALLSEL\n" % ()) + fid.write("ETABLE, zcent,CENT,Z\n" % ()) + fid.write("ETABLE, eps11,SMISC,9 \n" % ()) + fid.write("ETABLE, eps22,SMISC,10 \n" % ()) + fid.write("ETABLE, eps12,SMISC,11 \n" % ()) + fid.write("ETABLE, kapa11,SMISC,12 \n" % ()) + fid.write("ETABLE, kapa22,SMISC,13 \n" % ()) + fid.write("ETABLE, kapa12,SMISC,14 \n" % ()) + fid.write("ETABLE, gamma13,SMISC,15 \n" % ()) + fid.write("ETABLE, gamma23,SMISC,16 \n" % ()) + fid.write("/output,plateStrains-all-%s,txt\n" % (str(iLoad))) + fid.write( + "PRETAB,zcent,eps11,eps22,eps12,kapa11,kapa22,kapa12,gamma12,gamma13,gamma23\n" + % () + ) + fid.write("ETABLE,ERAS\n\n" % ()) + fid.write("finish\n" % ()) + fid.write("! END FATIGUE OUTPUT SCRIPT\n" % ()) return - -def writeAnsysGetFaceStresses(blade, fid, coreMatName): - isoorthoInModel,compsInModel,SkinAreas,app = getMatrialLayerInfoWithOutGUI(blade) - #fid=fopen('getFaceStresses.mac','w+'); + +def writeAnsysGetFaceStresses(blade, fid, coreMatName): + isoorthoInModel, compsInModel, SkinAreas, app = getMatrialLayerInfoWithOutGUI(blade) + # fid=fopen('getFaceStresses.mac','w+'); TotalStations = blade.ispan.size - for kStation in range(TotalStations-1): - #kPanel=find(~cellfun('isempty',strfind([SkinAreas(kStation).Material],'PANEL'))); #Array that stores the kArea index that contains 'PANEL' in the name - #for i=1:numel(kPanel) + for kStation in range(TotalStations - 1): + # kPanel=find(~cellfun('isempty',strfind([SkinAreas(kStation).Material],'PANEL'))); #Array that stores the kArea index that contains 'PANEL' in the name + # for i=1:numel(kPanel) for kArea in range(SkinAreas[kStation].startIB.size): - #See if the section contatins Balsa/core material name (i.e find - #the sandwhich panels) + # See if the section contatins Balsa/core material name (i.e find + # the sandwhich panels) n = str(SkinAreas[kStation].Material[kArea]) == str(app.matlist) mat = app.matdb[n] if coreMatName in mat.layer.layerName: - ansysSecNumber = np.where(str(SkinAreas[kStation].Material[kArea]) == str(compsInModel)) - writeANSYSinputFile(fid,mat,ansysSecNumber,coreMatName) - - fid.write('\n' % ()) - fid.write('\n' % ()) - fid.write('!*************** WEB ***************\n' % ()) - fid.write('\n' % ()) - fid.write('\n' % ()) + ansysSecNumber = np.where( + str(SkinAreas[kStation].Material[kArea]) == str(compsInModel) + ) + writeANSYSinputFile(fid, mat, ansysSecNumber, coreMatName) + + fid.write("\n" % ()) + fid.write("\n" % ()) + fid.write("!*************** WEB ***************\n" % ()) + fid.write("\n" % ()) + fid.write("\n" % ()) fid.close() - #Web + # Web TotalShearwebs = np.asarray(app.shearweb).size for kShearweb in range(TotalShearwebs): n = str(app.shearweb[kShearweb].Material) == str(app.matlist) mat = app.matdb[n] if coreMatName in mat.layer.layerName: - ansysSecNumber = np.where(str(app.shearweb[kShearweb].Material) == str(compsInModel)) + ansysSecNumber = np.where( + str(app.shearweb[kShearweb].Material) == str(compsInModel) + ) ansysSecNumber = ansysSecNumber + 1000 - writeANSYSinputFile(fid,mat,ansysSecNumber,coreMatName) - - fid.write('FINISH\n' % ()) - fid.write('allsel\n' % ()) - return app,SkinAreas,compsInModel + writeANSYSinputFile(fid, mat, ansysSecNumber, coreMatName) + + fid.write("FINISH\n" % ()) + fid.write("allsel\n" % ()) + return app, SkinAreas, compsInModel -def writeANSYSinputFile(fid, mat, ansysSecNumber, coreMatName): +def writeANSYSinputFile(fid, mat, ansysSecNumber, coreMatName): #####Find the face sheet#### cellMat = np.array([]) for i in range(len(mat.layer)): - cellMat = np.array([[cellMat],[np.array([mat.layer(i).layerName])]]) - + cellMat = np.array([[cellMat], [np.array([mat.layer(i).layerName])]]) + kbalsa = np.where(str(coreMatName) == str(cellMat)) - iLayer = np.arange(0,(kbalsa - 1)) - + iLayer = np.arange(0, (kbalsa - 1)) + # Find the number of layers in the face qty = 0 - + for i in range(iLayer.size): qty = qty + mat.layer(iLayer[i]).quantity - - #Loop through the top facesheet layers - - fid.write('!*************** ansysSecNumber = %i ***************\n' % (ansysSecNumber)) - fid.write('/POST1\n' % ()) - fid.write('*DEL,iel\n' % ()) - fid.write('*DEL,enum\n' % ()) - fid.write('*DEL,nelTemp\n' % ()) - fid.write('RSYS, SOLU\n' % ()) - fid.write('ALLSEL\n' % ()) - fid.write('ESEL, S, SEC,,%i\n' % (ansysSecNumber)) - fid.write('*GET, enum, ELEM, 0, NUM, MIN, ! lowest element number in the selected set\n' % ()) - fid.write('*get, nelTemp, ELEM,0,count\n' % ()) - fid.write('*DIM, iel,ARRAY,nelTemp\n' % ()) - fname = 'section-'+str(ansysSecNumber)+'-faceAvgStresses' - - if os.path.isfile(fname+'.txt'): - os.delete(fname+'.txt') - - fid.write('*CFOPEN, %s, txt,,APPEND\n' % (fname)) - #Create an array with the element numbers in the selected set - fid.write('*DO, J, 1,nelTemp !Loop through elements\n' % ()) - fid.write('iel(J)=enum\n' % ()) - fid.write('enum =ELNEXT(enum) !Next higher element number above N in selected set\n' % ()) - fid.write('*ENDDO\n' % ()) - fid.write('\n' % ()) - fid.write('ALLSEL\n' % ()) - fid.write('*DO, J, 1,nelTemp !Loop through elements\n' % ()) - fid.write(' S11a=0 !Initialize average stress variables for each element\n' % ()) - fid.write(' S22a=0\n' % ()) - fid.write(' S33a=0\n' % ()) - fid.write(' S23a=0\n' % ()) - fid.write(' S13a=0\n' % ()) - fid.write(' S12a=0\n' % ()) - fid.write(' *DO, I, 1,%i !Loop through face layers\n' % (qty)) - fid.write(' LAYER,I\n' % ()) - fid.write(' SHELL,MID !Stress result at midlayer\n' % ()) - fid.write(' ESEL,S,ELEM,,iel(J)\n' % ()) - fid.write(' ETABLE,ERAS !Each element gets a new element table\n' % ()) - fid.write(' ETABLE,S11,S,X,AVG !AVG - Store averaged element centroid value\n' % ()) - fid.write(' ETABLE,S22,S,Y,AVG\n' % ()) - fid.write(' ETABLE,S33,S,Z,AVG\n' % ()) - fid.write(' ETABLE,S23,S,YZ,AVG\n' % ()) - fid.write(' ETABLE,S13,S,XZ,AVG\n' % ()) - fid.write(' ETABLE,S12,S,XY,AVG\n' % ()) - fid.write(' *GET,tempS11, ELEM, iel(J), ETAB, S11\n' % ()) - fid.write(' *GET,tempS22, ELEM, iel(J), ETAB, S22\n' % ()) - fid.write(' *GET,tempS33, ELEM, iel(J), ETAB, S33\n' % ()) - fid.write(' *GET,tempS23, ELEM, iel(J), ETAB, S23\n' % ()) - fid.write(' *GET,tempS13, ELEM, iel(J), ETAB, S13\n' % ()) - fid.write(' *GET,tempS12, ELEM, iel(J), ETAB, S12\n' % ()) - fid.write(' S11a=S11a+tempS11\n' % ()) - fid.write(' S22a=S22a+tempS22\n' % ()) - fid.write(' S33a=S33a+tempS33\n' % ()) - fid.write(' S23a=S23a+tempS23\n' % ()) - fid.write(' S13a=S13a+tempS13\n' % ()) - fid.write(' S12a=S12a+tempS12\n' % ()) - fid.write(' *ENDDO\n' % ()) - fid.write(' S11a=S11a/%i\n' % (qty)) - fid.write(' S22a=S22a/%i\n' % (qty)) - fid.write(' S33a=S33a/%i\n' % (qty)) - fid.write(' S23a=S23a/%i\n' % (qty)) - fid.write(' S13a=S13a/%i\n' % (qty)) - fid.write(' S12a=S12a/%i\n' % (qty)) - fid.write(' ELNO=iel(J) !It is needed to refer to ELNO in the command below\n' % ()) - fid.write('*VWRITE,ELNO,S11a,S22a,S33a,S23a,S13a,S12a\n' % ()) - fid.write('(E20.12,E20.12,E20.12,E20.12,E20.12,E20.12,E20.12)\n' % ()) - fid.write('*ENDDO\n' % ()) - fid.write('*CFCLOS\n' % ()) - fid.write('\n' % ()) - fid.write('\n' % ()) + + # Loop through the top facesheet layers + + fid.write( + "!*************** ansysSecNumber = %i ***************\n" % (ansysSecNumber) + ) + fid.write("/POST1\n" % ()) + fid.write("*DEL,iel\n" % ()) + fid.write("*DEL,enum\n" % ()) + fid.write("*DEL,nelTemp\n" % ()) + fid.write("RSYS, SOLU\n" % ()) + fid.write("ALLSEL\n" % ()) + fid.write("ESEL, S, SEC,,%i\n" % (ansysSecNumber)) + fid.write( + "*GET, enum, ELEM, 0, NUM, MIN, ! lowest element number in the selected set\n" + % () + ) + fid.write("*get, nelTemp, ELEM,0,count\n" % ()) + fid.write("*DIM, iel,ARRAY,nelTemp\n" % ()) + fname = "section-" + str(ansysSecNumber) + "-faceAvgStresses" + + if os.path.isfile(fname + ".txt"): + os.delete(fname + ".txt") + + fid.write("*CFOPEN, %s, txt,,APPEND\n" % (fname)) + # Create an array with the element numbers in the selected set + fid.write("*DO, J, 1,nelTemp !Loop through elements\n" % ()) + fid.write("iel(J)=enum\n" % ()) + fid.write( + "enum =ELNEXT(enum) !Next higher element number above N in selected set\n" % () + ) + fid.write("*ENDDO\n" % ()) + fid.write("\n" % ()) + fid.write("ALLSEL\n" % ()) + fid.write("*DO, J, 1,nelTemp !Loop through elements\n" % ()) + fid.write(" S11a=0 !Initialize average stress variables for each element\n" % ()) + fid.write(" S22a=0\n" % ()) + fid.write(" S33a=0\n" % ()) + fid.write(" S23a=0\n" % ()) + fid.write(" S13a=0\n" % ()) + fid.write(" S12a=0\n" % ()) + fid.write(" *DO, I, 1,%i !Loop through face layers\n" % (qty)) + fid.write(" LAYER,I\n" % ()) + fid.write(" SHELL,MID !Stress result at midlayer\n" % ()) + fid.write(" ESEL,S,ELEM,,iel(J)\n" % ()) + fid.write(" ETABLE,ERAS !Each element gets a new element table\n" % ()) + fid.write( + " ETABLE,S11,S,X,AVG !AVG - Store averaged element centroid value\n" % () + ) + fid.write(" ETABLE,S22,S,Y,AVG\n" % ()) + fid.write(" ETABLE,S33,S,Z,AVG\n" % ()) + fid.write(" ETABLE,S23,S,YZ,AVG\n" % ()) + fid.write(" ETABLE,S13,S,XZ,AVG\n" % ()) + fid.write(" ETABLE,S12,S,XY,AVG\n" % ()) + fid.write(" *GET,tempS11, ELEM, iel(J), ETAB, S11\n" % ()) + fid.write(" *GET,tempS22, ELEM, iel(J), ETAB, S22\n" % ()) + fid.write(" *GET,tempS33, ELEM, iel(J), ETAB, S33\n" % ()) + fid.write(" *GET,tempS23, ELEM, iel(J), ETAB, S23\n" % ()) + fid.write(" *GET,tempS13, ELEM, iel(J), ETAB, S13\n" % ()) + fid.write(" *GET,tempS12, ELEM, iel(J), ETAB, S12\n" % ()) + fid.write(" S11a=S11a+tempS11\n" % ()) + fid.write(" S22a=S22a+tempS22\n" % ()) + fid.write(" S33a=S33a+tempS33\n" % ()) + fid.write(" S23a=S23a+tempS23\n" % ()) + fid.write(" S13a=S13a+tempS13\n" % ()) + fid.write(" S12a=S12a+tempS12\n" % ()) + fid.write(" *ENDDO\n" % ()) + fid.write(" S11a=S11a/%i\n" % (qty)) + fid.write(" S22a=S22a/%i\n" % (qty)) + fid.write(" S33a=S33a/%i\n" % (qty)) + fid.write(" S23a=S23a/%i\n" % (qty)) + fid.write(" S13a=S13a/%i\n" % (qty)) + fid.write(" S12a=S12a/%i\n" % (qty)) + fid.write( + " ELNO=iel(J) !It is needed to refer to ELNO in the command below\n" % () + ) + fid.write("*VWRITE,ELNO,S11a,S22a,S33a,S23a,S13a,S12a\n" % ()) + fid.write("(E20.12,E20.12,E20.12,E20.12,E20.12,E20.12,E20.12)\n" % ()) + fid.write("*ENDDO\n" % ()) + fid.write("*CFCLOS\n" % ()) + fid.write("\n" % ()) + fid.write("\n" % ()) return - - -def writeAnsysLinearBuckling(blade, config, iLoad, fid, bucklingFilename): - fid.write('! BEGIN BUCKLE MACRO\n' % ()) - fid.write('allsel\n' % ()) - fid.write('/solu\n' % ()) - fid.write('irlf,-1\n' % ()) - fid.write('pstres,on\n' % ()) - fid.write('antype,buckle\n' % ()) - fid.write('bucopt,lanb,'+str(config.analysisFlags.globalBuckling)+',,,RANGE\n') % () - #fprintf(fid,strcat('MXPAND,',int2str(nmodes),',0,0,1\n'), nmodes); # Required for element stress/strain, etc.. - fid.write('solve\n' % ()) - fid.write('finish\n' % ()) - fid.write('/post1\n' % ()) - fid.write(np.array(['/output,',bucklingFilename,',out\n']) % ()) - fid.write('set,list\n' % ()) - fid.write('/output\n' % ()) - fid.write('finish\n' % ()) - fid.write('! END BUCKLE MACRO\n' % ()) + + +def writeAnsysLinearBuckling(blade, config, iLoad, fid, bucklingFilename): + fid.write("! BEGIN BUCKLE MACRO\n" % ()) + fid.write("allsel\n" % ()) + fid.write("/solu\n" % ()) + fid.write("irlf,-1\n" % ()) + fid.write("pstres,on\n" % ()) + fid.write("antype,buckle\n" % ()) + fid.write( + "bucopt,lanb," + str(config.analysisFlags.globalBuckling) + ",,,RANGE\n" + ) % () + # fprintf(fid,strcat('MXPAND,',int2str(nmodes),',0,0,1\n'), nmodes); # Required for element stress/strain, etc.. + fid.write("solve\n" % ()) + fid.write("finish\n" % ()) + fid.write("/post1\n" % ()) + fid.write(np.array(["/output,", bucklingFilename, ",out\n"]) % ()) + fid.write("set,list\n" % ()) + fid.write("/output\n" % ()) + fid.write("finish\n" % ()) + fid.write("! END BUCKLE MACRO\n" % ()) return -def writeAnsysLocalFields(blade, config, iLoad, fid - ): +def writeAnsysLocalFields(blade, config, iLoad, fid): ###################Outputs for fatigue analysis in MATLAB################# - fid.write('! BEGIN LOCAL FIELD SCRIPT\n' % ()) - fid.write('allsel\n' % ()) - fid.write('/post1\n' % ()) - fid.write('set,last\n' % ()) - fid.write('esel,all\n' % ()) + fid.write("! BEGIN LOCAL FIELD SCRIPT\n" % ()) + fid.write("allsel\n" % ()) + fid.write("/post1\n" % ()) + fid.write("set,last\n" % ()) + fid.write("esel,all\n" % ()) ### Element Stress ### - fid.write('ALLSEL\n' % ()) - fid.write('ETABLE, zcent,CENT,Z\n' % ()) - fid.write('ETABLE, eps11,SMISC,9 \n' % ()) - fid.write('ETABLE, eps22,SMISC,10 \n' % ()) - fid.write('ETABLE, eps12,SMISC,11 \n' % ()) - fid.write('ETABLE, kapa11,SMISC,12 \n' % ()) - fid.write('ETABLE, kapa22,SMISC,13 \n' % ()) - fid.write('ETABLE, kapa12,SMISC,14 \n' % ()) - fid.write('ETABLE, gamma13,SMISC,15 \n' % ()) - fid.write('ETABLE, gamma23,SMISC,16 \n' % ()) - fid.write('/output,plateStrains-all-%s,txt\n' % (str(iLoad))) - fid.write('PRETAB,zcent,eps11,eps22,eps12,kapa11,kapa22,kapa12,gamma12,gamma13,gamma23\n' % ()) - fid.write('ETABLE,ERAS\n\n' % ()) - #fprintf(fid,'ETABLE, zcent,CENT,Z\n'); - #fprintf(fid,'ETABLE, N11,SMISC,1 \n'); - #fprintf(fid,'ETABLE, N22,SMISC,2 \n'); - #fprintf(fid,'ETABLE, N12,SMISC,3 \n'); - #fprintf(fid,'ETABLE, M11,SMISC,4 \n'); - #fprintf(fid,'ETABLE, M22,SMISC,5 \n'); - #fprintf(fid,'ETABLE, M12,SMISC,6 \n'); - #fprintf(fid,'ETABLE, Q13,SMISC,7 \n'); - #fprintf(fid,'ETABLE, Q23,SMISC,8 \n'); - #fprintf(fid,'/output,plateExamplePlateForces-all-#s,txt\n',int2str(iLoad)); - #fprintf(fid,'/output,plateForces-all-#s,txt\n',int2str(iLoad)); - #fprintf(fid,'PRETAB,zcent,N11,N22,N12,M11,M22,M12,Q12,Q13,Q23\n'); - #fprintf(fid, 'ETABLE,ERAS\n\n'); - fid.write('finish\n' % ()) + fid.write("ALLSEL\n" % ()) + fid.write("ETABLE, zcent,CENT,Z\n" % ()) + fid.write("ETABLE, eps11,SMISC,9 \n" % ()) + fid.write("ETABLE, eps22,SMISC,10 \n" % ()) + fid.write("ETABLE, eps12,SMISC,11 \n" % ()) + fid.write("ETABLE, kapa11,SMISC,12 \n" % ()) + fid.write("ETABLE, kapa22,SMISC,13 \n" % ()) + fid.write("ETABLE, kapa12,SMISC,14 \n" % ()) + fid.write("ETABLE, gamma13,SMISC,15 \n" % ()) + fid.write("ETABLE, gamma23,SMISC,16 \n" % ()) + fid.write("/output,plateStrains-all-%s,txt\n" % (str(iLoad))) + fid.write( + "PRETAB,zcent,eps11,eps22,eps12,kapa11,kapa22,kapa12,gamma12,gamma13,gamma23\n" + % () + ) + fid.write("ETABLE,ERAS\n\n" % ()) + # fprintf(fid,'ETABLE, zcent,CENT,Z\n'); + # fprintf(fid,'ETABLE, N11,SMISC,1 \n'); + # fprintf(fid,'ETABLE, N22,SMISC,2 \n'); + # fprintf(fid,'ETABLE, N12,SMISC,3 \n'); + # fprintf(fid,'ETABLE, M11,SMISC,4 \n'); + # fprintf(fid,'ETABLE, M22,SMISC,5 \n'); + # fprintf(fid,'ETABLE, M12,SMISC,6 \n'); + # fprintf(fid,'ETABLE, Q13,SMISC,7 \n'); + # fprintf(fid,'ETABLE, Q23,SMISC,8 \n'); + # fprintf(fid,'/output,plateExamplePlateForces-all-#s,txt\n',int2str(iLoad)); + # fprintf(fid,'/output,plateForces-all-#s,txt\n',int2str(iLoad)); + # fprintf(fid,'PRETAB,zcent,N11,N22,N12,M11,M22,M12,Q12,Q13,Q23\n'); + # fprintf(fid, 'ETABLE,ERAS\n\n'); + fid.write("finish\n" % ()) return -def writeAnsysNonLinearBuckling(ansysFilename, ansys_path, ansys_product, config, ii, jj, ncpus, iLoad): - warnings.warn('output designvar. Currently does not work for nonlinear cases') - script_name = 'commands3-'+str(ii)+'.mac' - script_out = 'output3-'+str(ii)+'-'+str(jj)+'.txt' - fid = open(script_name,'w+') - fid.write('!************ MODE-%i ************\n' % (ii)) - fid.write('/FILNAME,%s,1\n' % ansysFilename + '-Load' + str(iLoad)) - - fid.write('resume, %s,db\n' % (ansysFilename+'-Load'+str(iLoad))) - #Get Max displacement, UY +def writeAnsysNonLinearBuckling( + ansysFilename, ansys_path, ansys_product, config, ii, jj, ncpus, iLoad +): + warnings.warn("output designvar. Currently does not work for nonlinear cases") + script_name = "commands3-" + str(ii) + ".mac" + script_out = "output3-" + str(ii) + "-" + str(jj) + ".txt" + fid = open(script_name, "w+") + fid.write("!************ MODE-%i ************\n" % (ii)) + fid.write("/FILNAME,%s,1\n" % ansysFilename + "-Load" + str(iLoad)) + + fid.write("resume, %s,db\n" % (ansysFilename + "-Load" + str(iLoad))) + # Get Max displacement, UY # fprintf(fid,'/POST1\n'); # fprintf(fid,'SET,1,#i\n',ii); #Read in results # fprintf(fid,'nsel, all, node\n'); #Select all nodes # fprintf(fid,'*get, Zncount, node,0,count\n'); #Find the number (quantity) of nodes selected # fprintf(fid,'*dim,zNodeDisp,array,Zncount,1 \n'); #Allocate memory for an arry to hold nodal disp. - + # #For each node populate array with Y-displacements # fprintf(fid,'*DO, i,1,Zncount\n'); # fprintf(fid,'*VGET, zNodeDisp(i,1), NODE, i, U, Y\n'); # fprintf(fid,'*ENDDO\n'); - + # #Find the min/max disp. value # fprintf(fid,'*VSCFUN,zMaxUY,max,zNodeDisp\n'); # fprintf(fid,'*VSCFUN,zMinUY,min,zNodeDisp\n'); # U0 = 1/400; #Dimple factor # lg = 1; #Largest horizontal dimension of buckle # fprintf(fid,'zImperfectionSF=#f*#f*#f/max(abs(zMaxUY),abs(zMinUY))\n',config.analysisFlags.imperfection(jj), U0, lg); - - fid.write('zImperfectionSF=%f\n' % (config.analysisFlags.imperfection(jj))) + + fid.write("zImperfectionSF=%f\n" % (config.analysisFlags.imperfection(jj))) # U0 = 1/400; #Dimple factor # lg = 1; #Largest horizontal dimension of buckle # zImperfectionSF = lg*U0; - - fid.write('/prep7\n' % ()) - fid.write('UPGEOM,zImperfectionSF,1,%i,%s,"rst"\n' % (ii, ansysFilename+'-Load'+str(iLoad))) - - fid.write('FINISH\n' % ()) - - filename = ansysFilename+'-'+str(ii)+'-'+str(jj) - - fid.write('/FILNAME,%s,1\n' % (filename)) - - #fprintf(fid,strcat('SAVE,''',filename,''',''db'',''',strrep(pwd,'\','\\'),'''\n')); - - fid.write('\n' % ()) - #Nonlinear Static Analysis - fid.write('/solu\n' % ()) - fid.write('antype,0\n' % ()) - #fprintf(fid,'irlf,-1\n'); - fid.write('pstres,0\n' % ()) - fid.write('NLGEOM,1\n' % ()) - fid.write('TIME,%f\n' % (1)) + + fid.write("/prep7\n" % ()) + fid.write( + 'UPGEOM,zImperfectionSF,1,%i,%s,"rst"\n' + % (ii, ansysFilename + "-Load" + str(iLoad)) + ) + + fid.write("FINISH\n" % ()) + + filename = ansysFilename + "-" + str(ii) + "-" + str(jj) + + fid.write("/FILNAME,%s,1\n" % (filename)) + + # fprintf(fid,strcat('SAVE,''',filename,''',''db'',''',strrep(pwd,'\','\\'),'''\n')); + + fid.write("\n" % ()) + # Nonlinear Static Analysis + fid.write("/solu\n" % ()) + fid.write("antype,0\n" % ()) + # fprintf(fid,'irlf,-1\n'); + fid.write("pstres,0\n" % ()) + fid.write("NLGEOM,1\n" % ()) + fid.write("TIME,%f\n" % (1)) # fprintf(fid,'AUTOTS,ON,\n'); # nsubstep = 20; # fprintf(fid,'nsubstep=#f\n', nsubstep); # fprintf(fid,'NSUBST,508,20,500\n'); # fprintf(fid,'NEQIT,200,\n'); loadScaleFactor = 5 - + # fprintf(fid,'allsel\n'); # fprintf(fid,'esel,s,type,,33\n'); #Select all follower elements - fid.write('NSEL, ALL\n' % ()) - fid.write('FSCALE,%f\n' % (loadScaleFactor)) + fid.write("NSEL, ALL\n" % ()) + fid.write("FSCALE,%f\n" % (loadScaleFactor)) # fprintf(fid,'RESCONTROL,DEFINE,ALL,1,\n'); - fid.write('OUTRES,NSOL,ALL\n' % ()) + fid.write("OUTRES,NSOL,ALL\n" % ()) # fprintf(fid,'NROPT,UNSYM\n'); - + # fprintf(fid,'CUTCONTROL,PIVSTOP,2\n'); #Ends simulation once pivot becomes negative # fprintf(fid,'PRED,OFF\n'); - - fid.write('allsel\n' % ()) - fid.write('solve\n' % ()) - fid.write('FINISH\n' % ()) + + fid.write("allsel\n" % ()) + fid.write("solve\n" % ()) + fid.write("FINISH\n" % ()) # fid.write('SAVE,'',filename,'','db','',pwd,'\n') % ()) - fid.write('/EXIT,NOSAVE\n' % ()) + fid.write("/EXIT,NOSAVE\n" % ()) fid.close() ###### RUN ANSYS####### - ansys_call = print('SET KMP_STACKSIZE=2048k & "%s" -b -p %s -I %s -o %s -np %s',ansys_path,ansys_product,script_name,script_out,int2str(ncpus)) + ansys_call = print( + 'SET KMP_STACKSIZE=2048k & "%s" -b -p %s -I %s -o %s -np %s', + ansys_path, + ansys_product, + script_name, + script_out, + int2str(ncpus), + ) # KMP_STACKSIZE=2048k has been specifed. 2048k may not be enough for other # simulations. EC - + subprocess.run(ansys_call) - print('%s: Nonlinear Mode-%s Analysis Finished\n' % (datetime.now(),str(ii))) - data = readANSYSoutputs(filename+'.mntr',11) + print("%s: Nonlinear Mode-%s Analysis Finished\n" % (datetime.now(), str(ii))) + data = readANSYSoutputs(filename + ".mntr", 11) a = data.shape - nonlinearLoadFactors = data(a(1),7) * loadScaleFactor - + nonlinearLoadFactors = data(a(1), 7) * loadScaleFactor + return nonlinearLoadFactors -def writeAnsysResultantVSSpan(blade, config, iLoad, fid): - fid.write('/POST1\n' % ()) - fid.write('set,LAST\n' % ()) - fid.write('RSYS,0\n' % ()) - - #fprintf(fid,'seltol,0.05\n'); - #fprintf(fid,'*CFOPEN, resultantVSspan,txt\n'); - #for i=1:numel(blade.ispan) - #fprintf(fid,'nsel,s,loc,z,0,#f \n',blade.ispan(i)); - - #if i==numel(blade.ispan) - #fprintf(fid,'nsel,u,node,,z_master_node_number\n'); - #end - - #fprintf(fid,'spoint,0,#f,#f,#f\n',blade.sweep(i),blade.prebend(i),blade.ispan(i)); - #fprintf(fid,'nplot\n'); - #fprintf(fid,'FSUM\n'); - #fprintf(fid,'*GET, F1, FSUM, 0, ITEM,FX\n'); - #fprintf(fid,'*GET, F2, FSUM, 0, ITEM,FY\n'); - #fprintf(fid,'*GET, F3, FSUM, 0, ITEM,FZ\n'); - #fprintf(fid,'*GET, M1, FSUM, 0, ITEM,MX\n'); - #fprintf(fid,'*GET, M2, FSUM, 0, ITEM,MY\n'); - #fprintf(fid,'*GET, M3, FSUM, 0, ITEM,MZ\n'); - #fprintf(fid,'*VWRITE,#f,F1,F2,F3,M1,M2,M3\n',blade.ispan(i)); - #fprintf(fid,'(E20.12,E20.12,E20.12,E20.12,E20.12,E20.12,E20.12)\n'); - #fprintf(fid,'\n \n \n'); - #end - #fprintf(fid,'*CFCLOS\n'); - #fprintf(fid,'finish\n'); - - fid.write('/post1\n' % ()) - fid.write('elsize=%f\n' % (blade.mesh)) - fid.write('nz=nint(%f/elsize) !Integer number of points to output resultant loads\n' % (blade.ispan(end()))) - fid.write('zloc=0\n' % ()) - fid.write('delta=0.1\n' % ()) - fid.write('*CFOPEN, resultantVSspan,txt\n' % ()) - fid.write('*do,I,1,nz+1\n' % ()) - fid.write('allsel\n' % ()) - fid.write('nsel,s,loc,z,0,zloc+delta\n' % ()) - fid.write('spoint,0,0,0,zloc\n' % ()) - fid.write('!nplot\n' % ()) - fid.write('FSUM\n' % ()) - fid.write('*GET, F1, FSUM, 0, ITEM,FX\n' % ()) - fid.write('*GET, F2, FSUM, 0, ITEM,FY\n' % ()) - fid.write('*GET, F3, FSUM, 0, ITEM,FZ\n' % ()) - fid.write('*GET, M1, FSUM, 0, ITEM,MX\n' % ()) - fid.write('*GET, M2, FSUM, 0, ITEM,MY\n' % ()) - fid.write('*GET, M3, FSUM, 0, ITEM,MZ\n' % ()) - fid.write('*VWRITE,zloc,F1,F2,F3,M1,M2,M3\n' % ()) - fid.write('(E20.12,E20.12,E20.12,E20.12,E20.12,E20.12,E20.12)\n' % ()) - fid.write('zloc=zloc+elsize\n' % ()) - fid.write('*ENDDO\n' % ()) - fid.write('*CFCLOS\n' % ()) - fid.write('finish\n' % ()) +def writeAnsysResultantVSSpan(blade, config, iLoad, fid): + fid.write("/POST1\n" % ()) + fid.write("set,LAST\n" % ()) + fid.write("RSYS,0\n" % ()) + + # fprintf(fid,'seltol,0.05\n'); + # fprintf(fid,'*CFOPEN, resultantVSspan,txt\n'); + # for i=1:numel(blade.ispan) + # fprintf(fid,'nsel,s,loc,z,0,#f \n',blade.ispan(i)); + + # if i==numel(blade.ispan) + # fprintf(fid,'nsel,u,node,,z_master_node_number\n'); + # end + + # fprintf(fid,'spoint,0,#f,#f,#f\n',blade.sweep(i),blade.prebend(i),blade.ispan(i)); + # fprintf(fid,'nplot\n'); + # fprintf(fid,'FSUM\n'); + # fprintf(fid,'*GET, F1, FSUM, 0, ITEM,FX\n'); + # fprintf(fid,'*GET, F2, FSUM, 0, ITEM,FY\n'); + # fprintf(fid,'*GET, F3, FSUM, 0, ITEM,FZ\n'); + # fprintf(fid,'*GET, M1, FSUM, 0, ITEM,MX\n'); + # fprintf(fid,'*GET, M2, FSUM, 0, ITEM,MY\n'); + # fprintf(fid,'*GET, M3, FSUM, 0, ITEM,MZ\n'); + # fprintf(fid,'*VWRITE,#f,F1,F2,F3,M1,M2,M3\n',blade.ispan(i)); + # fprintf(fid,'(E20.12,E20.12,E20.12,E20.12,E20.12,E20.12,E20.12)\n'); + # fprintf(fid,'\n \n \n'); + # end + # fprintf(fid,'*CFCLOS\n'); + # fprintf(fid,'finish\n'); + + fid.write("/post1\n" % ()) + fid.write("elsize=%f\n" % (blade.mesh)) + fid.write( + "nz=nint(%f/elsize) !Integer number of points to output resultant loads\n" + % (blade.ispan(end())) + ) + fid.write("zloc=0\n" % ()) + fid.write("delta=0.1\n" % ()) + fid.write("*CFOPEN, resultantVSspan,txt\n" % ()) + fid.write("*do,I,1,nz+1\n" % ()) + fid.write("allsel\n" % ()) + fid.write("nsel,s,loc,z,0,zloc+delta\n" % ()) + fid.write("spoint,0,0,0,zloc\n" % ()) + fid.write("!nplot\n" % ()) + fid.write("FSUM\n" % ()) + fid.write("*GET, F1, FSUM, 0, ITEM,FX\n" % ()) + fid.write("*GET, F2, FSUM, 0, ITEM,FY\n" % ()) + fid.write("*GET, F3, FSUM, 0, ITEM,FZ\n" % ()) + fid.write("*GET, M1, FSUM, 0, ITEM,MX\n" % ()) + fid.write("*GET, M2, FSUM, 0, ITEM,MY\n" % ()) + fid.write("*GET, M3, FSUM, 0, ITEM,MZ\n" % ()) + fid.write("*VWRITE,zloc,F1,F2,F3,M1,M2,M3\n" % ()) + fid.write("(E20.12,E20.12,E20.12,E20.12,E20.12,E20.12,E20.12)\n" % ()) + fid.write("zloc=zloc+elsize\n" % ()) + fid.write("*ENDDO\n" % ()) + fid.write("*CFCLOS\n" % ()) + fid.write("finish\n" % ()) return -def writeAnsysRupture(config, iLoad, fid, failureFilename): - fid.write('! BEGIN FAILURE SCRIPT\n' % ()) - fid.write('!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n' % ()) - fid.write('!Add for PLESOL and *get,findex,PLNSOL,0,MAX to work' % ()) - fid.write('/BATCH \n' % ()) - fid.write('/COM,ANSYS RELEASE Release 18.1 BUILD 18.1 UP20170403 15:49:08\n' % ()) - fid.write('/GRA,POWER\n ' % ()) - fid.write('/GST,ON\n ' % ()) - fid.write('/PLO,INFO,3\n ' % ()) - fid.write('/GRO,CURL,ON\n ' % ()) - fid.write('/CPLANE,1 \n ' % ()) - fid.write('/REPLOT,RESIZE \n ' % ()) - fid.write('WPSTYLE,,,,,,,,0\n ' % ()) - fid.write('/SHOW\n ' % ()) - fid.write('!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n' % ()) - fid.write('/POST1\n' % ()) - fid.write('set,last\n' % ()) - fid.write('allsel\n' % ()) - fid.write('RSYS,LSYS \n' % ()) - - fid.write('layer,fcmax\n' % ()) - if not config.analysisFlags.failure.upper() in ['PUCK','LARC03','LARC04']: - #Do this for the failure criteria that do not distinguish between fiber - #and matrix failure +def writeAnsysRupture(config, iLoad, fid, failureFilename): + fid.write("! BEGIN FAILURE SCRIPT\n" % ()) + fid.write( + "!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n" % () + ) + fid.write("!Add for PLESOL and *get,findex,PLNSOL,0,MAX to work" % ()) + fid.write("/BATCH \n" % ()) + fid.write( + "/COM,ANSYS RELEASE Release 18.1 BUILD 18.1 UP20170403 15:49:08\n" + % () + ) + fid.write("/GRA,POWER\n " % ()) + fid.write("/GST,ON\n " % ()) + fid.write("/PLO,INFO,3\n " % ()) + fid.write("/GRO,CURL,ON\n " % ()) + fid.write("/CPLANE,1 \n " % ()) + fid.write("/REPLOT,RESIZE \n " % ()) + fid.write("WPSTYLE,,,,,,,,0\n " % ()) + fid.write("/SHOW\n " % ()) + fid.write( + "!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n" % () + ) + fid.write("/POST1\n" % ()) + fid.write("set,last\n" % ()) + fid.write("allsel\n" % ()) + fid.write("RSYS,LSYS \n" % ()) + + fid.write("layer,fcmax\n" % ()) + if not config.analysisFlags.failure.upper() in ["PUCK", "LARC03", "LARC04"]: + # Do this for the failure criteria that do not distinguish between fiber + # and matrix failure fc = config.analysisFlags.failure.upper() - fid.write('FCTYP,add,%s\n' % (fc)) - fid.write('PLESOL, FAIL,%s, 0,1.0\n' % (fc)) - fid.write('*get,findex,PLNSOL,0,MAX\n' % ()) + fid.write("FCTYP,add,%s\n" % (fc)) + fid.write("PLESOL, FAIL,%s, 0,1.0\n" % (fc)) + fid.write("*get,findex,PLNSOL,0,MAX\n" % ()) else: - #Do this for the failure criteria that do distinguish between fiber - #and matrix failure - if 'PUCK' == config.analysisFlags.failure.upper(): - #Fiber Failure - fc = 'PFIB' - fid.write('FCTYP,add,%s\n' % (fc)) - fid.write('PLESOL, FAIL,%s, 0,1.0\n' % (fc)) - fid.write('*get,Ffindex,PLNSOL,0,MAX\n' % ()) - #Matrix Failure - fc = 'PMAT' - fid.write('FCTYP,add,%s\n' % (fc)) - fid.write('PLESOL, FAIL,%s, 0,1.0\n' % (fc)) - fid.write('*get,Mfindex,PLNSOL,0,MAX\n' % ()) + # Do this for the failure criteria that do distinguish between fiber + # and matrix failure + if "PUCK" == config.analysisFlags.failure.upper(): + # Fiber Failure + fc = "PFIB" + fid.write("FCTYP,add,%s\n" % (fc)) + fid.write("PLESOL, FAIL,%s, 0,1.0\n" % (fc)) + fid.write("*get,Ffindex,PLNSOL,0,MAX\n" % ()) + # Matrix Failure + fc = "PMAT" + fid.write("FCTYP,add,%s\n" % (fc)) + fid.write("PLESOL, FAIL,%s, 0,1.0\n" % (fc)) + fid.write("*get,Mfindex,PLNSOL,0,MAX\n" % ()) else: - if 'LARC03' == config.analysisFlags.failure.upper(): - #Fiber Failure - fc = 'L3FB' - fid.write('FCTYP,add,%s\n' % (fc)) - fid.write('PLESOL, FAIL,%s, 0,1.0\n' % (fc)) - fid.write('*get,Ffindex,PLNSOL,0,MAX\n' % ()) - #Matrix Failure - fc = 'L3MT' - fid.write('FCTYP,add,%s\n' % (fc)) - fid.write('PLESOL, FAIL,%s, 0,1.0\n' % (fc)) - fid.write('*get,Mfindex,PLNSOL,0,MAX\n' % ()) + if "LARC03" == config.analysisFlags.failure.upper(): + # Fiber Failure + fc = "L3FB" + fid.write("FCTYP,add,%s\n" % (fc)) + fid.write("PLESOL, FAIL,%s, 0,1.0\n" % (fc)) + fid.write("*get,Ffindex,PLNSOL,0,MAX\n" % ()) + # Matrix Failure + fc = "L3MT" + fid.write("FCTYP,add,%s\n" % (fc)) + fid.write("PLESOL, FAIL,%s, 0,1.0\n" % (fc)) + fid.write("*get,Mfindex,PLNSOL,0,MAX\n" % ()) else: - if 'LARC04' == config.analysisFlags.failure.upper(): - #Fiber Failure - fc = 'L4FB' - fid.write('FCTYP,add,%s\n' % (fc)) - fid.write('PLESOL, FAIL,%s, 0,1.0\n' % (fc)) - fid.write('*get,Ffindex,PLNSOL,0,MAX\n' % ()) - #Matrix Failure - fc = 'L4MT' - fid.write('FCTYP,add,%s\n' % (fc)) - fid.write('PLESOL, FAIL,%s, 0,1.0\n' % (fc)) - fid.write('*get,Mfindex,PLNSOL,0,MAX\n' % ()) - #Report the higher of the fiber failure index or the matrix - fid.write('*IF, Ffindex, GT,Mfindex, THEN\n' % ()) - fid.write('findex=Ffindex\n' % ()) - fid.write('*ELSE\n' % ()) - fid.write('findex=Mfindex\n' % ()) - fid.write('*ENDIF\n' % ()) - - fid.write(np.array(['/output,',failureFilename,',out\n']) % ()) - fid.write('*status,findex\n' % ()) - fid.write('/output\n' % ()) + if "LARC04" == config.analysisFlags.failure.upper(): + # Fiber Failure + fc = "L4FB" + fid.write("FCTYP,add,%s\n" % (fc)) + fid.write("PLESOL, FAIL,%s, 0,1.0\n" % (fc)) + fid.write("*get,Ffindex,PLNSOL,0,MAX\n" % ()) + # Matrix Failure + fc = "L4MT" + fid.write("FCTYP,add,%s\n" % (fc)) + fid.write("PLESOL, FAIL,%s, 0,1.0\n" % (fc)) + fid.write("*get,Mfindex,PLNSOL,0,MAX\n" % ()) + # Report the higher of the fiber failure index or the matrix + fid.write("*IF, Ffindex, GT,Mfindex, THEN\n" % ()) + fid.write("findex=Ffindex\n" % ()) + fid.write("*ELSE\n" % ()) + fid.write("findex=Mfindex\n" % ()) + fid.write("*ENDIF\n" % ()) + + fid.write(np.array(["/output,", failureFilename, ",out\n"]) % ()) + fid.write("*status,findex\n" % ()) + fid.write("/output\n" % ()) ## EMA added: - fid.write('/output,allElemFailureResults%s,out\n' % (str(iLoad))) - fid.write('PRESOL,FAIL\n' % ()) - fid.write('/output\n' % ()) + fid.write("/output,allElemFailureResults%s,out\n" % (str(iLoad))) + fid.write("PRESOL,FAIL\n" % ()) + fid.write("/output\n" % ()) ## END - fid.write('finish\n' % ()) - fid.write('! END FAILURE SCRIPT\n' % ()) + fid.write("finish\n" % ()) + fid.write("! END FAILURE SCRIPT\n" % ()) return -def writeWrinklingForNonlinearBuckling(blade, coreMatName, settings, np, ansysFilename, i, j): - filename = ansysFilename+'-'+str(i)+'-'+str(j) - +def writeWrinklingForNonlinearBuckling( + blade, coreMatName, settings, np, ansysFilename, i, j +): + filename = ansysFilename + "-" + str(i) + "-" + str(j) + ####### Generate Wrinkling Files ############# - script_name = 'commands4-'+str(i)+'.mac' - script_out = 'output4-'+str(i)+'-'+str(j)+'.txt' - fid = open(script_name,'w+') - fid.write('!************ MODE-%i ************\n' % (i)) - fid.write('/FILNAME,%s,1\n' % (filename)) - - fid.write('resume\n' % ()) - fid.write('/POST1\n' % ()) - fid.write('SET,LAST\n' % ()) - app,SkinAreas,compsInModel = writeAnsysGetFaceStresses(blade,fid,coreMatName) - fid.write('/EXIT,NOSAVE\n' % ()) + script_name = "commands4-" + str(i) + ".mac" + script_out = "output4-" + str(i) + "-" + str(j) + ".txt" + fid = open(script_name, "w+") + fid.write("!************ MODE-%i ************\n" % (i)) + fid.write("/FILNAME,%s,1\n" % (filename)) + + fid.write("resume\n" % ()) + fid.write("/POST1\n" % ()) + fid.write("SET,LAST\n" % ()) + app, SkinAreas, compsInModel = writeAnsysGetFaceStresses(blade, fid, coreMatName) + fid.write("/EXIT,NOSAVE\n" % ()) fid.close() - ansys_call = print('SET KMP_STACKSIZE=2048k & "%s" -b -p %s -I %s -o %s -np %s',settings.ansys_path,settings.ansys_product,script_name,script_out,int2str(np)) + ansys_call = print( + 'SET KMP_STACKSIZE=2048k & "%s" -b -p %s -I %s -o %s -np %s', + settings.ansys_path, + settings.ansys_product, + script_name, + script_out, + int2str(np), + ) # KMP_STACKSIZE=2048k has been specifed. 2048k may not be enough for other # simulations. EC - + subprocess.run(ansys_call) - print('%s: Nonlinear Mode-%s Analysis Finished\n' % (datetime.now(),str(i))) - wrinklingLimitingElementData = writeAnsysFagerberWrinkling(app,SkinAreas,compsInModel,coreMatName) + print("%s: Nonlinear Mode-%s Analysis Finished\n" % (datetime.now(), str(i))) + wrinklingLimitingElementData = writeAnsysFagerberWrinkling( + app, SkinAreas, compsInModel, coreMatName + ) return wrinklingLimitingElementData - -def writeAnsysNonLinearLocalBuckling(blade, config, iLoad, fid, ansysFilename, ii, jj): - #UNSUPPORTED AT THIS TIME + +def writeAnsysNonLinearLocalBuckling(blade, config, iLoad, fid, ansysFilename, ii, jj): + # UNSUPPORTED AT THIS TIME # filename=strcat(ansysFilename,'-',int2str(ii),'-',int2str(jj)); #The name of the next job name # ####### Generate Wrinkling Files ############# # script_name=strcat('commands4-',int2str(ii),'.mac'); # script_out=strcat('output4-',int2str(ii),'-',int2str(jj),'.txt'); - + # fid=fopen(script_name,'w+'); # fprintf(fid,strcat('!************ MODE-#i ************\n'),ii); # fprintf(fid,'/FILNAME,''#s'',1\n',filename); #From master, change the jobname # fprintf(fid,'resume\n'); # fprintf(fid,'/POST1\n'); # fprintf(fid,'SET,LAST\n'); - + # [app,SkinAreas,compsInModel]=writeANSYSgetFaceStresses(blade,fid,config.analysisFlags.localBuckling); - + # fprintf(fid,'/EXIT,NOSAVE\n'); # fid.close(); - + # ansys_call = sprintf('SET KMP_STACKSIZE=2048k & "#s" -b -p #s -I #s -o #s -np #s',settings.ansys_path,settings.ansys_product,script_name,script_out,int2str(np)) # KMP_STACKSIZE is 512k by default. This is not enough therefore SET # # KMP_STACKSIZE=2048k has been specifed. 2048k may not be enough for other # # simulations. EC # # - - + # system(ansys_call) # the windows system call to run the above ansys command # fprintf('#s: Nonlinear Mode-#s Analysis Finished\n',datestr(now),int2str(ii)) return -def writePlotFatigue(fname, plotFatigue): - #Write fatigue damage for each element. ANSYS requires elements to be - #sorted - n = len(plotFatigue[:,0]) - fid = open(np.array([fname,'.txt']),'w+') - fid.write('Element fatigueDamage\n' % ()) - plotFatigue = sortrows(plotFatigue,1) - for i in range(len(plotFatigue[:,0])): - fid.write('%8i %6.5E\n' % (plotFatigue[i,0],plotFatigue[i,1])) - +def writePlotFatigue(fname, plotFatigue): + # Write fatigue damage for each element. ANSYS requires elements to be + # sorted + n = len(plotFatigue[:, 0]) + fid = open(np.array([fname, ".txt"]), "w+") + fid.write("Element fatigueDamage\n" % ()) + plotFatigue = sortrows(plotFatigue, 1) + for i in range(len(plotFatigue[:, 0])): + fid.write("%8i %6.5E\n" % (plotFatigue[i, 0], plotFatigue[i, 1])) + fid.close() - #Write plot commands - fid = open(fname+'.mac','w') - fid.write('/post1\n' % ()) - fid.write('set,last\n' % ()) - fid.write('plnsol,u,sum\n' % ()) - fid.write('etab,test,u,X\n' % ()) - fid.write('*get,max_e,elem,0,count\n' % ()) - fid.write('*dim,d_res,array,max_e,3\n' % ()) - fid.write('!Column 1 = Element Number\n' % ()) + # Write plot commands + fid = open(fname + ".mac", "w") + fid.write("/post1\n" % ()) + fid.write("set,last\n" % ()) + fid.write("plnsol,u,sum\n" % ()) + fid.write("etab,test,u,X\n" % ()) + fid.write("*get,max_e,elem,0,count\n" % ()) + fid.write("*dim,d_res,array,max_e,3\n" % ()) + fid.write("!Column 1 = Element Number\n" % ()) fid.write("!Column 2 = Where I'm putting result data\n" % ()) - fid.write('*vget,d_res(1,1),elem,,elist\n' % ()) - fid.write('*vfill,d_res(1,2),ramp,0,0\n' % ()) - fid.write('*dim,d_results,array,%i,2 !Need to specify the same size array as the data being read in\n' % (n)) - fid.write('*vread,d_results(1,1),%s,txt,,jik,2,%i,,1\n' % (fname,n)) - fid.write('(F8.0,E13.5)\n' % ()) - fid.write('*get,d_temp,parm,d_results,dim,x\n' % ()) - fid.write('j=1\n' % ()) - fid.write('i=1\n' % ()) - fid.write('d_run=1\n' % ()) - fid.write('*dowhile,d_run\n' % ()) - fid.write('*if,d_res(i,1),EQ,d_results(j,1),THEN\n' % ()) - fid.write('d_res(i,2)=d_results(j,2)\n' % ()) - fid.write('j=j+1\n' % ()) - fid.write('*endif\n' % ()) - fid.write('*if,j,GT,d_temp,THEN\n' % ()) - fid.write('d_run=0\n' % ()) - fid.write('*endif\n' % ()) - fid.write('i=i+1\n' % ()) - fid.write('*enddo\n' % ()) + fid.write("*vget,d_res(1,1),elem,,elist\n" % ()) + fid.write("*vfill,d_res(1,2),ramp,0,0\n" % ()) + fid.write( + "*dim,d_results,array,%i,2 !Need to specify the same size array as the data being read in\n" + % (n) + ) + fid.write("*vread,d_results(1,1),%s,txt,,jik,2,%i,,1\n" % (fname, n)) + fid.write("(F8.0,E13.5)\n" % ()) + fid.write("*get,d_temp,parm,d_results,dim,x\n" % ()) + fid.write("j=1\n" % ()) + fid.write("i=1\n" % ()) + fid.write("d_run=1\n" % ()) + fid.write("*dowhile,d_run\n" % ()) + fid.write("*if,d_res(i,1),EQ,d_results(j,1),THEN\n" % ()) + fid.write("d_res(i,2)=d_results(j,2)\n" % ()) + fid.write("j=j+1\n" % ()) + fid.write("*endif\n" % ()) + fid.write("*if,j,GT,d_temp,THEN\n" % ()) + fid.write("d_run=0\n" % ()) + fid.write("*endif\n" % ()) + fid.write("i=i+1\n" % ()) + fid.write("*enddo\n" % ()) # fprintf(fid,'j=1\n'); # fprintf(fid,'*do,i,1,max_e\n'); # fprintf(fid,'*if,j,LT,#i,THEN\n',n+1); @@ -654,80 +743,104 @@ def writePlotFatigue(fname, plotFatigue): # fprintf(fid,'*endif\n'); # fprintf(fid,'*endif\n'); # fprintf(fid,'*enddo\n'); - - fid.write('allsel,all\n' % ()) - fid.write('*vput,d_res(1,2),elem,1,etab,test\n' % ()) - fid.write('Pretab\n' % ()) - fid.write('pletab,test\n' % ()) - + + fid.write("allsel,all\n" % ()) + fid.write("*vput,d_res(1,2),elem,1,etab,test\n" % ()) + fid.write("Pretab\n" % ()) + fid.write("pletab,test\n" % ()) + return -def writeforcefile(filename, forcemap, forcesums, maptype): - fid = open(filename,'wt') - if (fid == - 1): - raise Exception('Could not open file "%s"',filename) - - if ('forcesums' is not None): - fid.write('!========== FORCE MAPPING SUMMARY ==========' % ()) +def writeforcefile(filename, forcemap, forcesums, maptype): + fid = open(filename, "wt") + if fid == -1: + raise Exception('Could not open file "%s"', filename) + + if "forcesums" is not None: + fid.write("!========== FORCE MAPPING SUMMARY ==========" % ()) fid.write('\n!maptype = "%s"' % (maptype)) - s = ' %14.6e'*forcesums["Z"].size % tuple(forcesums["Z"]) - s = '\n! Z =%s'% s +' TOTAL ' + s = " %14.6e" * forcesums["Z"].size % tuple(forcesums["Z"]) + s = "\n! Z =%s" % s + " TOTAL " fid.write(s) - fid.write('\n!'+('-'*len(s))) - fid.write('\n!Input Fx =' % ()) - fid.write(' %14.6e'*forcesums["Fx"][:,0].size % tuple(forcesums["Fx"][:,0])) - fid.write(' %14.6e' % (sum(forcesums["Fx"][:,0]))) - fid.write('\n!Output sum(fx) =' % ()) - fid.write(' %14.6e'*forcesums["Fx"][:,1].size % tuple(forcesums["Fx"][:,1])) - fid.write(' %14.6e' % (sum(forcesums["Fx"][:,1]))) - fid.write('\n!'+('-'*len(s))) - fid.write('\n!Input Fy =' % ()) - fid.write(' %14.6e'*forcesums["Fy"][:,0].size % tuple(forcesums["Fy"][:,0])) - fid.write(' %14.6e' % (sum(forcesums["Fy"][:,0]))) - fid.write('\n!Output sum(fy) =' % ()) - fid.write(' %14.6e'*forcesums["Fx"][:,1].size % tuple(forcesums["Fx"][:,1])) - fid.write(' %14.6e' % (sum(forcesums["Fy"][:,1]))) - fid.write('\n!'+('-'*len(s))) - fid.write('\n!Input M =' % ()) - fid.write(' %14.6e'*forcesums["M"][:,0].size % tuple(forcesums["M"][:,0])) - fid.write(' %14.6e' % (sum(forcesums["M"][:,0]))) - fid.write('\n!sum(-y*fx + x*fy) =' % ()) - fid.write(' %14.6e'*forcesums["M"][:,1].size % tuple(forcesums["M"][:,1])) - fid.write(' %14.6e' % (sum(forcesums["M"][:,1]))) - fid.write('\n!'+('-'*len(s))) - fid.write('\n!Input Z*Fy =' % ()) - fid.write(' %14.6e'*forcesums["RootMx"][:,0].size % tuple(forcesums["RootMx"][:,0])) - fid.write(' %14.6e' % (sum(forcesums["RootMx"][:,0]))) - fid.write('\n!Output sum(z*fy) =' % ()) - fid.write(' %14.6e'*forcesums["RootMx"][:,1].size % tuple(forcesums["RootMx"][:,1])) - fid.write(' %14.6e' % (sum(forcesums["RootMx"][:,1]))) - fid.write('\n!'+('-'*len(s))) - fid.write('\n!Input Z*Fx =' % ()) - fid.write(' %14.6e'*forcesums["RootMy"][:,0].size % tuple(forcesums["RootMy"][:,0])) - fid.write(' %14.6e' % (sum(forcesums["RootMy"][:,0]))) - fid.write('\n!Output sum(z*fx) =' % ()) - fid.write(' %14.6e'*forcesums["RootMy"][:,1].size % tuple(forcesums["RootMy"][:,1])) - fid.write(' %14.6e' % (sum(forcesums["RootMy"][:,1]))) - fid.write('\n\n' % ()) - - fid.write('finish\n/prep7\n\n' % ()) + fid.write("\n!" + ("-" * len(s))) + fid.write("\n!Input Fx =" % ()) + fid.write( + " %14.6e" * forcesums["Fx"][:, 0].size % tuple(forcesums["Fx"][:, 0]) + ) + fid.write(" %14.6e" % (sum(forcesums["Fx"][:, 0]))) + fid.write("\n!Output sum(fx) =" % ()) + fid.write( + " %14.6e" * forcesums["Fx"][:, 1].size % tuple(forcesums["Fx"][:, 1]) + ) + fid.write(" %14.6e" % (sum(forcesums["Fx"][:, 1]))) + fid.write("\n!" + ("-" * len(s))) + fid.write("\n!Input Fy =" % ()) + fid.write( + " %14.6e" * forcesums["Fy"][:, 0].size % tuple(forcesums["Fy"][:, 0]) + ) + fid.write(" %14.6e" % (sum(forcesums["Fy"][:, 0]))) + fid.write("\n!Output sum(fy) =" % ()) + fid.write( + " %14.6e" * forcesums["Fx"][:, 1].size % tuple(forcesums["Fx"][:, 1]) + ) + fid.write(" %14.6e" % (sum(forcesums["Fy"][:, 1]))) + fid.write("\n!" + ("-" * len(s))) + fid.write("\n!Input M =" % ()) + fid.write(" %14.6e" * forcesums["M"][:, 0].size % tuple(forcesums["M"][:, 0])) + fid.write(" %14.6e" % (sum(forcesums["M"][:, 0]))) + fid.write("\n!sum(-y*fx + x*fy) =" % ()) + fid.write(" %14.6e" * forcesums["M"][:, 1].size % tuple(forcesums["M"][:, 1])) + fid.write(" %14.6e" % (sum(forcesums["M"][:, 1]))) + fid.write("\n!" + ("-" * len(s))) + fid.write("\n!Input Z*Fy =" % ()) + fid.write( + " %14.6e" + * forcesums["RootMx"][:, 0].size + % tuple(forcesums["RootMx"][:, 0]) + ) + fid.write(" %14.6e" % (sum(forcesums["RootMx"][:, 0]))) + fid.write("\n!Output sum(z*fy) =" % ()) + fid.write( + " %14.6e" + * forcesums["RootMx"][:, 1].size + % tuple(forcesums["RootMx"][:, 1]) + ) + fid.write(" %14.6e" % (sum(forcesums["RootMx"][:, 1]))) + fid.write("\n!" + ("-" * len(s))) + fid.write("\n!Input Z*Fx =" % ()) + fid.write( + " %14.6e" + * forcesums["RootMy"][:, 0].size + % tuple(forcesums["RootMy"][:, 0]) + ) + fid.write(" %14.6e" % (sum(forcesums["RootMy"][:, 0]))) + fid.write("\n!Output sum(z*fx) =" % ()) + fid.write( + " %14.6e" + * forcesums["RootMy"][:, 1].size + % tuple(forcesums["RootMy"][:, 1]) + ) + fid.write(" %14.6e" % (sum(forcesums["RootMy"][:, 1]))) + fid.write("\n\n" % ()) + + fid.write("finish\n/prep7\n\n" % ()) for nk in range(len(forcemap["n"])): if forcemap["fx"][nk]: - fid.write('f,%d,fx,%g\n' % (forcemap["n"][nk],forcemap["fx"][nk])) + fid.write("f,%d,fx,%g\n" % (forcemap["n"][nk], forcemap["fx"][nk])) if forcemap["fy"][nk]: - fid.write('f,%d,fy,%g\n' % (forcemap["n"][nk],forcemap["fy"][nk])) + fid.write("f,%d,fy,%g\n" % (forcemap["n"][nk], forcemap["fy"][nk])) ## EMA added: if forcemap["fz"][nk]: - fid.write('f,%d,fz,%g\n' % (forcemap["n"][nk],forcemap["fz"][nk])) + fid.write("f,%d,fz,%g\n" % (forcemap["n"][nk], forcemap["fz"][nk])) ## END - + fid.close() return def writeAnsysShellModel(blade, filename, meshData, config): - """ WRITE_SHELL7 Generate the ANSYS input file that creates the blade + """WRITE_SHELL7 Generate the ANSYS input file that creates the blade Parameters ---------- @@ -736,98 +849,131 @@ def writeAnsysShellModel(blade, filename, meshData, config): ------- """ - + fcopts = [ - 'EMAX','SMAX','TWSI','TWSR','HFIB', - 'HMAT','PFIB','PMAT','L3FB','L3MT', - 'L4FB','L4MT','USR1','USR2','USR3', - 'USR4','USR5','USR6','USR7','USR8', - 'USR9' - ] + "EMAX", + "SMAX", + "TWSI", + "TWSR", + "HFIB", + "HMAT", + "PFIB", + "PMAT", + "L3FB", + "L3MT", + "L4FB", + "L4MT", + "USR1", + "USR2", + "USR3", + "USR4", + "USR5", + "USR6", + "USR7", + "USR8", + "USR9", + ] config["FailureCriteria"] = dict.fromkeys(fcopts, 0) TotalStations = blade.ispan.size # write the macro file that builds the mesh - - fid = open(filename,'wt') - fid.write('hide_warndlg_keyopt=1\n') - fid.write('hide_warndlg_areas=1\n') - fid.write('\n/nerr,500,50000\n') - fid.write('\n/filename,master\n') - fid.write('\n/prep7\n' ) + + fid = open(filename, "wt") + fid.write("hide_warndlg_keyopt=1\n") + fid.write("hide_warndlg_areas=1\n") + fid.write("\n/nerr,500,50000\n") + fid.write("\n/filename,master\n") + fid.write("\n/prep7\n") # DEFINE ELEMENT TYPES - fid.write('\n! DEFINE ELEMENT TYPES =================================\n') + fid.write("\n! DEFINE ELEMENT TYPES =================================\n") # structural mass - fid.write('\n et,21,mass21,,,0') - fid.write('\n r,999,0.0,0.0,0.00001,0.0,0.0,0.0\n') + fid.write("\n et,21,mass21,,,0") + fid.write("\n r,999,0.0,0.0,0.00001,0.0,0.0,0.0\n") # shell281, 8-node structural shell, store data for TOP, BOTTOM, # and MID for all layers - fid.write('\n et,11,shell281') - fid.write('\n keyopt,11,8,2') - fid.write('\n*if,hide_warndlg_keyopt,eq,1,then') - fid.write('\n /UIS, MSGPOP, 3') - fid.write('\n*endif') - fid.write('\n !Set keyopt(2)=1 for improved formulation in R12 & R13') - fid.write('\n keyopt,11,2,1') - fid.write('\n*if,hide_warndlg_keyopt,eq,1,then') - fid.write('\n /UIS, MSGPOP, 2') - fid.write('\n*endif\n') - #jcb: I thought about checking the ansys version and making conditional - #statements, but someone could share the shell7 with someone using a - #different version of ansys + fid.write("\n et,11,shell281") + fid.write("\n keyopt,11,8,2") + fid.write("\n*if,hide_warndlg_keyopt,eq,1,then") + fid.write("\n /UIS, MSGPOP, 3") + fid.write("\n*endif") + fid.write("\n !Set keyopt(2)=1 for improved formulation in R12 & R13") + fid.write("\n keyopt,11,2,1") + fid.write("\n*if,hide_warndlg_keyopt,eq,1,then") + fid.write("\n /UIS, MSGPOP, 2") + fid.write("\n*endif\n") + # jcb: I thought about checking the ansys version and making conditional + # statements, but someone could share the shell7 with someone using a + # different version of ansys # if strncmp(ansys_version,'12',2) || strncmp(ansys_version,'13',2) # # Releases 12 & 13 of ANSYS require keyopt(2)=1 with shell281 # # to make use of the improved formulation # fprintf(fid,'\n keyopt,11,2,1\n'); # end - # shell181, 4-node structural shell, store data for TOP, BOTTOM, and + # shell181, 4-node structural shell, store data for TOP, BOTTOM, and # MID for all layers - fid.write('\n et,12,shell181') - fid.write('\n keyopt,12,8,2\n') - fid.write('\n keyopt,12,3,2\n') - #tcl: Write material properties - #tcl: This changed dramatically on 2001 November 08 - #tcl: Now only materials used in the model are written to shell7.src - #tcl: Also, material numbers are no longer recorded until write_shell7 - #tcl: Two new local arrays ansysMPnumber and ansysRnumber are used within write_shell7 + fid.write("\n et,12,shell181") + fid.write("\n keyopt,12,8,2\n") + fid.write("\n keyopt,12,3,2\n") + # tcl: Write material properties + # tcl: This changed dramatically on 2001 November 08 + # tcl: Now only materials used in the model are written to shell7.src + # tcl: Also, material numbers are no longer recorded until write_shell7 + # tcl: Two new local arrays ansysMPnumber and ansysRnumber are used within write_shell7 fcfields = [ - 'xten','xcmp','yten','ycmp','zten','zcmp', - 'xy','yz','xz','xycp','yzcp','xzcp','xzit', - 'xzic','yzit','yzic','g1g2','etal','etat','alp0' + "xten", + "xcmp", + "yten", + "ycmp", + "zten", + "zcmp", + "xy", + "yz", + "xz", + "xycp", + "yzcp", + "xzcp", + "xzit", + "xzic", + "yzit", + "yzic", + "g1g2", + "etal", + "etat", + "alp0", ] - fcvalues = dict.fromkeys(fcfields,0) - fid.write('\n! WRITE MATERIAL PROPERTIES ============================\n') - fid.write('\n ! FAILURE CRITERIA LIMIT TABLE LEGEND:') - fid.write('\n ! tb,fcli,,ntemp,npts,tbopt') - fid.write('\n ! (tbopt=1 for stress limits; default ntemp=1,npts=20)') - fid.write('\n ! tbdata,1,xten,xcmp,yten,ycmp,zten,zcmp') - fid.write('\n ! tbdata,7,xy,yz,xz,xycp,yzcp,xzcp') - fid.write('\n ! tbdata,13,xzit,xzic,yzit,yzic') - fid.write('\n ! tbdata,17,g1g2,etal,etat,alp0\n') + fcvalues = dict.fromkeys(fcfields, 0) + fid.write("\n! WRITE MATERIAL PROPERTIES ============================\n") + fid.write("\n ! FAILURE CRITERIA LIMIT TABLE LEGEND:") + fid.write("\n ! tb,fcli,,ntemp,npts,tbopt") + fid.write("\n ! (tbopt=1 for stress limits; default ntemp=1,npts=20)") + fid.write("\n ! tbdata,1,xten,xcmp,yten,ycmp,zten,zcmp") + fid.write("\n ! tbdata,7,xy,yz,xz,xycp,yzcp,xzcp") + fid.write("\n ! tbdata,13,xzit,xzic,yzit,yzic") + fid.write("\n ! tbdata,17,g1g2,etal,etat,alp0\n") for kmp in range(len(blade.materials)): mat = blade.materials[kmp] - if mat.type == 'isotropic': - fid.write('\n ! %s' % (mat.name)) - fid.write('\n mp,ex,%d,%g' % (kmp+1,mat.ex)) - fid.write('\n mp,dens,%d,%g' % (kmp+1,mat.density)) - fid.write('\n mp,nuxy,%d,%g' % (kmp+1,mat.prxy)) + if mat.type == "isotropic": + fid.write("\n ! %s" % (mat.name)) + fid.write("\n mp,ex,%d,%g" % (kmp + 1, mat.ex)) + fid.write("\n mp,dens,%d,%g" % (kmp + 1, mat.density)) + fid.write("\n mp,nuxy,%d,%g" % (kmp + 1, mat.prxy)) xten = mat.uts - elif 'orthotropic' == mat.type: - fid.write('\n ! %s' % (mat.name)) - fid.write('\n mp,ex,%d,%g' % (kmp+1,mat.ex)) - fid.write('\n mp,ey,%d,%g' % (kmp+1,mat.ey)) - fid.write('\n mp,ez,%d,%g' % (kmp+1,mat.ez)) - fid.write('\n mp,prxy,%d,%g' % (kmp+1,mat.prxy)) - fid.write('\n mp,pryz,%d,%g' % (kmp+1,mat.pryz)) - fid.write('\n mp,prxz,%d,%g' % (kmp+1,mat.prxz)) - fid.write('\n mp,gxy,%d,%g' % (kmp+1,mat.gxy)) - fid.write('\n mp,gyz,%d,%g' % (kmp+1,mat.gyz)) - fid.write('\n mp,gxz,%d,%g' % (kmp+1,mat.gxz)) - fid.write('\n mp,dens,%d,%g' % (kmp+1,mat.density)) + elif "orthotropic" == mat.type: + fid.write("\n ! %s" % (mat.name)) + fid.write("\n mp,ex,%d,%g" % (kmp + 1, mat.ex)) + fid.write("\n mp,ey,%d,%g" % (kmp + 1, mat.ey)) + fid.write("\n mp,ez,%d,%g" % (kmp + 1, mat.ez)) + fid.write("\n mp,prxy,%d,%g" % (kmp + 1, mat.prxy)) + fid.write("\n mp,pryz,%d,%g" % (kmp + 1, mat.pryz)) + fid.write("\n mp,prxz,%d,%g" % (kmp + 1, mat.prxz)) + fid.write("\n mp,gxy,%d,%g" % (kmp + 1, mat.gxy)) + fid.write("\n mp,gyz,%d,%g" % (kmp + 1, mat.gyz)) + fid.write("\n mp,gxz,%d,%g" % (kmp + 1, mat.gxz)) + fid.write("\n mp,dens,%d,%g" % (kmp + 1, mat.density)) else: - raise Exception('Unknown material type in database') - if mat.type in ['isotropic','orthotropic']: + raise Exception("Unknown material type in database") + if mat.type in ["isotropic", "orthotropic"]: # Note that entering a blank or a zero for XYCP,YZCP, or XZCP # triggers the default value of -1.0. To specify an effective zero, # use a small, nonzero value (such as 1E-14). @@ -835,7 +981,7 @@ def writeAnsysShellModel(blade, filename, meshData, config): # if isequal(0,mat.yzcp), mat.yzcp=1e-14; end # if isequal(0,mat.xzcp), mat.xzcp=1e-14; end # convert degrees to radians - #mat.alp0 = mat.alp0 * pi/180; + # mat.alp0 = mat.alp0 * pi/180; # read all of the failure criteria values # for kfc = 1:numel(fcfields) # fcname = fcfields{kfc} @@ -846,7 +992,7 @@ def writeAnsysShellModel(blade, filename, meshData, config): # # properties are empty # else # tempArray=zeros(9,1); - #Tesile Properties + # Tesile Properties try: mat.uts.shape uts = mat.uts @@ -871,290 +1017,337 @@ def writeAnsysShellModel(blade, filename, meshData, config): nStrenghts = uss.shape[0] if nStrenghts < 3: uss = fullyPopluateStrengthsArray(uss) - fid.write('\n tb,fcli,%d,1,20,1' % (kmp+1)) - fid.write('\n tbdata,1,%g,%g,%g,%g,%g,%g' % (uts[0],ucs[0],uts[1],ucs[1],uts[2],ucs[2])) - fid.write('\n tbdata,7,%g,%g,%g,,,' % (uss[0],uss[1],uss[2])) - fid.write('\n tbdata,13,%g,%g,%g,%g' % (mat.xzit,mat.xzic,mat.yzit,mat.yzic)) - fid.write('\n tbdata,17,%g,%g,%g,%g' % (mat.g1g2,mat.etal,mat.etat,mat.alp0)) + fid.write("\n tb,fcli,%d,1,20,1" % (kmp + 1)) + fid.write( + "\n tbdata,1,%g,%g,%g,%g,%g,%g" + % (uts[0], ucs[0], uts[1], ucs[1], uts[2], ucs[2]) + ) + fid.write("\n tbdata,7,%g,%g,%g,,," % (uss[0], uss[1], uss[2])) + fid.write( + "\n tbdata,13,%g,%g,%g,%g" % (mat.xzit, mat.xzic, mat.yzit, mat.yzic) + ) + fid.write( + "\n tbdata,17,%g,%g,%g,%g" % (mat.g1g2, mat.etal, mat.etat, mat.alp0) + ) # for kf = 1:numel(fcvalues) # if ~isempty(fcvalues{kf}) # fprintf(fid,'\n tbdata,#d,#g',kf,fcvalues{kf}); # end # end - #end + # end else: - raise Exception('Unknown material type in database') - fid.write('\n') - fid.write('\n! WRITE THE COMPOSITE LAYUPS =================================\n') + raise Exception("Unknown material type in database") + fid.write("\n") + fid.write("\n! WRITE THE COMPOSITE LAYUPS =================================\n") rCounter = 1 - if config["elementType"] in ['281','181']: + if config["elementType"] in ["281", "181"]: ### Outer AeroShell - nElements = len(meshData['sections']) + nElements = len(meshData["sections"]) for nelem in range(nElements): - section = meshData['sections'][nelem] + section = meshData["sections"][nelem] # Break from loop once shearweb elements are reached - stackName = section['elementSet'] - strList = stackName.split('_') + stackName = section["elementSet"] + strList = stackName.split("_") if strList[2] == "SW": swstart = nelem break - - layup = section['layup'] - secID = nelem+1 - fid.write('\n ! %s' % (section['elementSet'])) - fid.write('\n sectype,%d,shell' % (secID)) + + layup = section["layup"] + secID = nelem + 1 + fid.write("\n ! %s" % (section["elementSet"])) + fid.write("\n sectype,%d,shell" % (secID)) for layer in layup: matid = layer[0] thickness = layer[1] / 1000 angle = layer[2] - fid.write('\n secdata,%g,%d,%g,,' % (thickness,matid+1,angle)) - fid.write('\n secoffset,bot\n') + fid.write("\n secdata,%g,%d,%g,," % (thickness, matid + 1, angle)) + fid.write("\n secoffset,bot\n") ### Web(s) for nelem in range(swstart, nElements): - section = meshData['sections'][nelem] - secID = nelem+1 - layup = section['layup'] + section = meshData["sections"][nelem] + secID = nelem + 1 + layup = section["layup"] if layup: # secID = (webSectionIDstart + nstat+1) + (nweb * 10 ** orderOfMagnitude) - fid.write('\n ! %s' % (section['elementSet'])) - fid.write('\n sectype,%d,shell' % (secID)) + fid.write("\n ! %s" % (section["elementSet"])) + fid.write("\n sectype,%d,shell" % (secID)) for layer in layup: matid = layer[0] thickness = layer[1] / 1000 angle = layer[2] - fid.write('\n secdata,%g,%d,%g,,' % (thickness,matid+1,angle)) - fid.write('\n secoffset,mid\n') + fid.write( + "\n secdata,%g,%d,%g,," % (thickness, matid + 1, angle) + ) + fid.write("\n secoffset,mid\n") else: # logging.warning('Element System %s not yet available' % config["elementType"],'write_shell7 error') - raise Exception('Element System %s not yet available',config["elementType"]) + raise Exception("Element System %s not yet available", config["elementType"]) # [~,jobtitle,~] = fileparts(blade.job_name); - fid.write('\n/title,%s' % (config["dbname"])) - fid.write('\nZrCount=%d\n' % (rCounter)) - #tcl: DEFINE KEYPOINTS FOR SECTIONS AND CONNECT KEYPOINTS WITH LINES - #tcl: THE LINES ARE PRODUCED WITH THREE DIFFERENT SPLINING MACROS - fid.write('\n! DEFINE KEYPOINTS FOR SECTIONS AND CONNECT KEYPOINTS WITH LINES\n') + fid.write("\n/title,%s" % (config["dbname"])) + fid.write("\nZrCount=%d\n" % (rCounter)) + # tcl: DEFINE KEYPOINTS FOR SECTIONS AND CONNECT KEYPOINTS WITH LINES + # tcl: THE LINES ARE PRODUCED WITH THREE DIFFERENT SPLINING MACROS + fid.write("\n! DEFINE KEYPOINTS FOR SECTIONS AND CONNECT KEYPOINTS WITH LINES\n") # Create a coordinate system roughly in the fiber direction (+X down blade, +Z up toward LP side) # --> beginning with global csys, rotate -90 about local Z, then -90 about local Y - fid.write('\nlocal,1000,CART,0,0,0, -90,0,-90\n') + fid.write("\nlocal,1000,CART,0,0,0, -90,0,-90\n") twistFlag = 1 if blade.rotorspin == 1: - twistFlag = - 1 + twistFlag = -1 for kStation in range(TotalStations): # use the generating line to translate and rotate the coordinates # presweep =========================================================== if np.all(blade.isweep == 0): - table = np.array([0,0,np.nan]).reshape((1,-1)) + table = np.array([0, 0, np.nan]).reshape((1, -1)) else: N = len(blade.ispan) - table = np.array([blade.ispan,blade.isweep,np.full([N,1],np.nan)]) + table = np.array([blade.ispan, blade.isweep, np.full([N, 1], np.nan)]) blade_struct = {} blade_struct["PresweepRef"] = {} - blade_struct["PresweepRef"]["method"] = 'shear' + blade_struct["PresweepRef"]["method"] = "shear" blade_struct["PresweepRef"]["table"] = table - blade_struct["PresweepRef"]["pptype"] = 'spline' + blade_struct["PresweepRef"]["pptype"] = "spline" # precurve =========================================================== if np.all(blade.iprebend == 0): - table = np.array([0,0,np.nan]) + table = np.array([0, 0, np.nan]) else: N = len(blade.ispan) - table = np.vstack([blade.ispan,blade.iprebend,np.full((N,),np.nan)]) + table = np.vstack([blade.ispan, blade.iprebend, np.full((N,), np.nan)]) blade_struct["PrecurveRef"] = {} - blade_struct["PrecurveRef"]["method"] = 'shear' + blade_struct["PrecurveRef"]["method"] = "shear" blade_struct["PrecurveRef"]["table"] = table - blade_struct["PrecurveRef"]["pptype"] = 'spline' + blade_struct["PrecurveRef"]["pptype"] = "spline" blade_struct = calcGenLinePP(blade_struct) - if isinstance(blade_struct["PresweepRef"]["pp"],int): + if isinstance(blade_struct["PresweepRef"]["pp"], int): presweep_slope = 0 else: - presweep_slope = blade_struct["PresweepRef"]["pp"].__call__(blade.ispan[kStation], nu=1) - if isinstance(blade_struct["PrecurveRef"]["pp"],int): + presweep_slope = blade_struct["PresweepRef"]["pp"].__call__( + blade.ispan[kStation], nu=1 + ) + if isinstance(blade_struct["PrecurveRef"]["pp"], int): precurve_slope = 0 else: - precurve_slope = blade_struct["PrecurveRef"]["pp"].__call__(blade.ispan[kStation], nu=1) + precurve_slope = blade_struct["PrecurveRef"]["pp"].__call__( + blade.ispan[kStation], nu=1 + ) presweepDeg = 180 / np.pi * np.arctan(presweep_slope * twistFlag) - precurveDeg = 180 / np.pi * np.arctan(- precurve_slope) - presweep_rot,precurve_rot = (0,0) - if blade_struct["PresweepRef"]["method"]=='normal': + precurveDeg = 180 / np.pi * np.arctan(-precurve_slope) + presweep_rot, precurve_rot = (0, 0) + if blade_struct["PresweepRef"]["method"] == "normal": presweep_rot = np.arctan(presweep_slope * twistFlag) - if blade_struct["PrecurveRef"]["method"]=='normal': - precurve_rot = np.arctan(- precurve_slope) - transX = twistFlag * blade_struct["PresweepRef"]["pp"].__call__(blade.ispan[kStation]) + if blade_struct["PrecurveRef"]["method"] == "normal": + precurve_rot = np.arctan(-precurve_slope) + transX = twistFlag * blade_struct["PresweepRef"]["pp"].__call__( + blade.ispan[kStation] + ) transY = blade_struct["PrecurveRef"]["pp"].__call__(blade.ispan[kStation]) # ensure we are in csys0 and no keypoints are selected - fid.write('\ncsys,0') + fid.write("\ncsys,0") # Create a coordinate system to be used later for aligning the fiber direction. # First, load the csys defined earlier (+X down blade, +Z up toward LP side) - fid.write('\n csys,1000') + fid.write("\n csys,1000") # Next, translate & rotate relative to this active csys (use CLOCAL, not LOCAL) # translation: global X,Y,Z => local y,z,x # rotation: presweep is local z rotation & precurve is local y rotation - fid.write('\n clocal,%d,CART,%g,%g,%g, %g,%g,%g\n' % \ - ((1000 + kStation+1), blade.ispan[kStation],transX,transY,presweepDeg,0,precurveDeg)) + fid.write( + "\n clocal,%d,CART,%g,%g,%g, %g,%g,%g\n" + % ( + (1000 + kStation + 1), + blade.ispan[kStation], + transX, + transY, + presweepDeg, + 0, + precurveDeg, + ) + ) # Create coordinate system at the tip - fid.write('\nlocal,12,CART,%g,%g,%g, %g,%g,%g\n' % \ - (transX,transY,blade.ispan[kStation],0,precurve_rot * 180 / np.pi,presweep_rot * 180 / np.pi)) - fid.write('\n csys,0') - fid.write('\nksel,all\n') - fid.write('\nallsel\n') + fid.write( + "\nlocal,12,CART,%g,%g,%g, %g,%g,%g\n" + % ( + transX, + transY, + blade.ispan[kStation], + 0, + precurve_rot * 180 / np.pi, + presweep_rot * 180 / np.pi, + ) + ) + fid.write("\n csys,0") + fid.write("\nksel,all\n") + fid.write("\nallsel\n") nodes = meshData["nodes"] elements = meshData["elements"] nnodes = nodes.shape[0] nelements = elements.shape[0] - fid.write('\n! DEFINE NODES =======================================\n') + fid.write("\n! DEFINE NODES =======================================\n") for iNode in range(nnodes): - fid.write('n, %i, %f, %f, %f\n' % \ - (iNode+1,nodes[iNode,0],nodes[iNode,1],nodes[iNode,2])) - #Set the element Type - if '281' == config["elementType"]: - fid.write('type, 11\n') + fid.write( + "n, %i, %f, %f, %f\n" + % (iNode + 1, nodes[iNode, 0], nodes[iNode, 1], nodes[iNode, 2]) + ) + # Set the element Type + if "281" == config["elementType"]: + fid.write("type, 11\n") else: - if '181' == config["elementType"]: - fid.write('type, 12\n') + if "181" == config["elementType"]: + fid.write("type, 12\n") else: # errordlg(sprintf('Element System %s not yet available',config["elementType"]),'write_shell7 error') - raise Exception('Element System %s not yet available',config["elementType"]) + raise Exception( + "Element System %s not yet available", config["elementType"] + ) dup = [] - fid.write('\n! DEFINE ELEMENTS =======================================\n') + fid.write("\n! DEFINE ELEMENTS =======================================\n") for iElement in range(nelements): - if np.unique(elements[iElement,:]).size == 4: + if np.unique(elements[iElement, :]).size == 4: elem_data = ( - elements[iElement,0]+1, - elements[iElement,1]+1, - elements[iElement,2]+1, - elements[iElement,3]+1, - iElement+1 + elements[iElement, 0] + 1, + elements[iElement, 1] + 1, + elements[iElement, 2] + 1, + elements[iElement, 3] + 1, + iElement + 1, ) - fid.write('e, %i, %i, %i, %i !Element %i \n' % elem_data) + fid.write("e, %i, %i, %i, %i !Element %i \n" % elem_data) else: dup.append(iElement) - - fid.write('\n! ASSIGN SECTIONS TO OUTER SHELL ELEMENTS =======================================\n') - nElements = len(meshData['sections']) + + fid.write( + "\n! ASSIGN SECTIONS TO OUTER SHELL ELEMENTS =======================================\n" + ) + nElements = len(meshData["sections"]) for nelem in range(nElements): - stackName = meshData['sections'][nelem]['elementSet'] - strList = stackName.split('_') + stackName = meshData["sections"][nelem]["elementSet"] + strList = stackName.split("_") if strList[2] == "SW": swstart = nelem break nstat = int(strList[0]) - secID = nelem+1 - csID = 1000 + nstat+1 + secID = nelem + 1 + csID = 1000 + nstat + 1 elementList = meshData["sets"]["element"][nelem]["labels"] for iEl in range(len(elementList)): - fid.write(' emodif,%i,secnum,%i\n' % (elementList[iEl]+1,secID)) - fid.write(' emodif,%i,esys,%i\n' % (elementList[iEl]+1,csID)) + fid.write(" emodif,%i,secnum,%i\n" % (elementList[iEl] + 1, secID)) + fid.write(" emodif,%i,esys,%i\n" % (elementList[iEl] + 1, csID)) - fid.write('\n! ASSIGN SECTIONS TO SHEARWEB(S) SHELL ELEMENTS =======================================\n') + fid.write( + "\n! ASSIGN SECTIONS TO SHEARWEB(S) SHELL ELEMENTS =======================================\n" + ) for nelem in range(swstart, nElements): - secID = nelem+1 - section = meshData['sections'][nelem] - layup = section['layup'] + secID = nelem + 1 + section = meshData["sections"][nelem] + layup = section["layup"] if layup: csID = 1000 + nstat elementList = meshData["sets"]["element"][nelem]["labels"] for iEl in range(len(elementList)): - fid.write(' emodif,%i,secnum,%i\n' % (elementList[iEl],secID)) + fid.write(" emodif,%i,secnum,%i\n" % (elementList[iEl], secID)) # fprintf(fid,' emodif,#i,esys,#i\n',elementList(iEl),csID); - - fid.write('\n ENSYM,,,,1,%i' % (nelements)) - #jcb: are these 2 lines necessary now that we have local coordinate + + fid.write("\n ENSYM,,,,1,%i" % (nelements)) + # jcb: are these 2 lines necessary now that we have local coordinate # systems to deal with presweep and precurve? - fid.write('\n local,11,CART,0,0,0,90,0,-90') - fid.write('\n esys,11') - #LocationZ_lastStation = data.station(TotalStations).LocationZ; - #fprintf(fid,'\n local,12,cart,0,0,#f,0,0,0',LocationZ_lastStation); - fid.write('\n csys,12') - fid.write('\n nsel,none') - fid.write('\n n,,0.0,0.0,0.0') - fid.write('\n *get,z_master_node_number,node,,num,max') - fid.write('\n type,21') - fid.write('\n real,999') - fid.write('\n e,z_master_node_number') - fid.write('\n nsel,all') - fid.write('\n csys,0') - fid.write('\n allsel\n') - fid.write('\n nsll,s,1') - fid.write('\n nsel,a,node,,z_master_node_number') - if config["elementType"] in ['91','99','281','181']: - fid.write('\n cerig,z_master_node_number,all,RXYZ\n') + fid.write("\n local,11,CART,0,0,0,90,0,-90") + fid.write("\n esys,11") + # LocationZ_lastStation = data.station(TotalStations).LocationZ; + # fprintf(fid,'\n local,12,cart,0,0,#f,0,0,0',LocationZ_lastStation); + fid.write("\n csys,12") + fid.write("\n nsel,none") + fid.write("\n n,,0.0,0.0,0.0") + fid.write("\n *get,z_master_node_number,node,,num,max") + fid.write("\n type,21") + fid.write("\n real,999") + fid.write("\n e,z_master_node_number") + fid.write("\n nsel,all") + fid.write("\n csys,0") + fid.write("\n allsel\n") + fid.write("\n nsll,s,1") + fid.write("\n nsel,a,node,,z_master_node_number") + if config["elementType"] in ["91", "99", "281", "181"]: + fid.write("\n cerig,z_master_node_number,all,RXYZ\n") else: - if config["elementType"] == '191': - fid.write('\n cerig,z_master_node_number,all,uxyz\n') - if config["BoundaryCondition"]=='cantilevered': - #jcb: FIXME - nsel could break with swept/bent blades - fid.write('\n nsel,s,loc,z,0') - fid.write('\n d,all,all') - fid.write('\n nsel,all\n') - fid.write('\nallsel') - fid.write('\n! nummrg,all') - fid.write('\n! numcmp,node') - fid.write('\ncsys,0\n') + if config["elementType"] == "191": + fid.write("\n cerig,z_master_node_number,all,uxyz\n") + if config["BoundaryCondition"] == "cantilevered": + # jcb: FIXME - nsel could break with swept/bent blades + fid.write("\n nsel,s,loc,z,0") + fid.write("\n d,all,all") + fid.write("\n nsel,all\n") + fid.write("\nallsel") + fid.write("\n! nummrg,all") + fid.write("\n! numcmp,node") + fid.write("\ncsys,0\n") ### Material Properties ### - fid.write('mpwrite,Materials,txt,,\n') - #if ~all(cellfun('isempty',fcvalues)) - fid.write('/output,Strengths,txt,,\n') - fid.write('TBLIST, ,ALL\n') - fid.write('/output\n') - #end + fid.write("mpwrite,Materials,txt,,\n") + # if ~all(cellfun('isempty',fcvalues)) + fid.write("/output,Strengths,txt,,\n") + fid.write("TBLIST, ,ALL\n") + fid.write("/output\n") + # end # enter POST1 for postprocessing configuration commands - fid.write('\nfinish') - fid.write('\n/post1\n') - fid.write('\nfctyp,dele,all ! remove all material failure-criteria postprocessing\n') + fid.write("\nfinish") + fid.write("\n/post1\n") + fid.write( + "\nfctyp,dele,all ! remove all material failure-criteria postprocessing\n" + ) for kfc in config["FailureCriteria"].keys(): if config["FailureCriteria"][kfc]: - fid.write('fctyp,add,%s\n' % (config["FailureCriteria"][kfc])) - fid.write('\nfinish\n') + fid.write("fctyp,add,%s\n" % (config["FailureCriteria"][kfc])) + fid.write("\nfinish\n") ### Material Properties ### - fid.write('mpwrite,Materials,txt,,\n') + fid.write("mpwrite,Materials,txt,,\n") # commenting below since fcvalues are not touched after initialization -kb # if not np.all(cellfun('isempty',fcvalues)) : # fid.write('/output,Strengths,txt,,\n') # fid.write('TBLIST, ,ALL\n') # fid.write('/output\n') ### Section Properties ### - fid.write('/output, Sections,txt\n') - fid.write('SLIST,,,,FULL\n') + fid.write("/output, Sections,txt\n") + fid.write("SLIST,,,,FULL\n") # fprintf(fid,'SLIST,\n'); - fid.write('/output\n') + fid.write("/output\n") ### Element Properties ### - fid.write('/output, Elements,txt\n') - fid.write('elist,all,,,0,0 \n') - fid.write('/output\n') + fid.write("/output, Elements,txt\n") + fid.write("elist,all,,,0,0 \n") + fid.write("/output\n") # save database file - fid.write('\nfinish') - fid.write('\nsave') - + fid.write("\nfinish") + fid.write("\nsave") + fid.close() - print('The following file has been written %s\n',filename) - + print("The following file has been written %s\n", filename) + return - - + + def fullyPopluateStrengthsArray(strengthArray): nStrenghts = strengthArray.shape[0] if nStrenghts < 3: for i in range(3 - nStrenghts): - strengthArray = np.concatenate([strengthArray,[strengthArray[i]]]) - + strengthArray = np.concatenate([strengthArray, [strengthArray[i]]]) + return strengthArray def write_ansys_loads(nodeData, loads, forcefilename, analysisConfig): - """ - """ - maptype = 'map3D_fxM0' - if ('FollowerForces' in analysisConfig["analysisFlags"]) and \ - not len(analysisConfig["analysisFlags"].FollowerForces)==0 \ - and analysisConfig["analysisFlags"].FollowerForces != 0 and \ - ('StaticNonlinear' in analysisConfig["analysisFlags"]) and not \ - len(analysisConfig["analysisFlags"].StaticNonlinear)==0 and \ - analysisConfig["analysisFlags"]["StaticNonlinear"] != 0: - forcemap,forcesums = beamForceToAnsysShellFollower(nodeData,loads, maptype=maptype) + """ """ + maptype = "map3D_fxM0" + if ( + ("FollowerForces" in analysisConfig["analysisFlags"]) + and not len(analysisConfig["analysisFlags"].FollowerForces) == 0 + and analysisConfig["analysisFlags"].FollowerForces != 0 + and ("StaticNonlinear" in analysisConfig["analysisFlags"]) + and not len(analysisConfig["analysisFlags"].StaticNonlinear) == 0 + and analysisConfig["analysisFlags"]["StaticNonlinear"] != 0 + ): + forcemap, forcesums = beamForceToAnsysShellFollower( + nodeData, loads, maptype=maptype + ) else: - forcemap,forcesums = beamForceToAnsysShell(nodeData,loads, maptype=maptype) + forcemap, forcesums = beamForceToAnsysShell(nodeData, loads, maptype=maptype) - writeforcefile(forcefilename+'.src',forcemap,forcesums,maptype) - print('Forces mapped to ANSYS model') - return \ No newline at end of file + writeforcefile(forcefilename + ".src", forcemap, forcesums, maptype) + print("Forces mapped to ANSYS model") + return diff --git a/src/pynumad/analysis/beamUtils.py b/src/pynumad/analysis/beamUtils.py index 84e6d66..128e9bd 100644 --- a/src/pynumad/analysis/beamUtils.py +++ b/src/pynumad/analysis/beamUtils.py @@ -1,68 +1,68 @@ import numpy as np from pynumad.utils.interpolation import interpolator_wrap import os -def readVABShomogenization(fileName): - #Stiffness - beam_stiff=np.zeros((6,6)) +def readVABShomogenization(fileName): + # Stiffness + beam_stiff = np.zeros((6, 6)) with open(fileName) as f: - lines=f.readlines() - - for lineNumber,line in enumerate(lines): - if 'Timoshenko Stiffness Matrix' in line: + lines = f.readlines() + + for lineNumber, line in enumerate(lines): + if "Timoshenko Stiffness Matrix" in line: break - lineStart=lineNumber+3 - lineEnd=lineStart+6 - ct=0 - for iLine in range(lineStart,lineEnd): - dataList=[float(i) for i in lines[iLine].split() if i.strip()] - beam_stiff[ct,:]=dataList - ct+=1 - - - #mass - beam_inertia=np.zeros((6,6)) - for lineNumber,line in enumerate(lines): - if 'Mass Matrix' in line: + lineStart = lineNumber + 3 + lineEnd = lineStart + 6 + ct = 0 + for iLine in range(lineStart, lineEnd): + dataList = [float(i) for i in lines[iLine].split() if i.strip()] + beam_stiff[ct, :] = dataList + ct += 1 + + # mass + beam_inertia = np.zeros((6, 6)) + for lineNumber, line in enumerate(lines): + if "Mass Matrix" in line: break - lineStart=lineNumber+3 - lineEnd=lineStart+6 - ct=0 - for iLine in range(lineStart,lineEnd): - dataList=[float(i) for i in lines[iLine].split() if i.strip()] - beam_inertia[ct,:]=dataList - ct+=1 - + lineStart = lineNumber + 3 + lineEnd = lineStart + 6 + ct = 0 + for iLine in range(lineStart, lineEnd): + dataList = [float(i) for i in lines[iLine].split() if i.strip()] + beam_inertia[ct, :] = dataList + ct += 1 + return beam_stiff, beam_inertia - -def transformMatrixToBeamDyn(beam_stiff,beam_inertia): - beamDynData={} + +def transformMatrixToBeamDyn(beam_stiff, beam_inertia): + beamDynData = {} B = np.array([[0, 0, 1], [0, -1, 0], [1, 0, 0]]) # NEW transformation matrix T = np.dot(np.identity(3), np.linalg.inv(B)) - - nStations, _,_=np.shape(beam_stiff) + + nStations, _, _ = np.shape(beam_stiff) for iStation in range(nStations): - beam_stiff[iStation,:,:]=trsf_sixbysix(beam_stiff[iStation,:,:], T) - beam_inertia[iStation,:,:]=trsf_sixbysix(beam_inertia[iStation,:,:], T) - - return(beam_stiff,beam_inertia) + beam_stiff[iStation, :, :] = trsf_sixbysix(beam_stiff[iStation, :, :], T) + beam_inertia[iStation, :, :] = trsf_sixbysix(beam_inertia[iStation, :, :], T) + + return (beam_stiff, beam_inertia) + def trsf_sixbysix(M, T): """ - Transform six-by-six compliance/stiffness matrix. + Transform six-by-six compliance/stiffness matrix. change of reference frame in engineering (or Voigt) notation. - + Parameters ---------- M : np.ndarray 6x6 Siffness or Mass Matrix T : np.ndarray Transformation Matrix - + Returns ---------- res : np.ndarray @@ -79,201 +79,309 @@ def trsf_sixbysix(M, T): res = np.hstack((tmp_1, tmp_2)) return res -# --- Write BeamDyn file with blade reference line locations ---# -def write_beamdyn_axis(directory, wt_name, blade,radial_stations): +# --- Write BeamDyn file with blade reference line locations ---# +def write_beamdyn_axis(directory, wt_name, blade, radial_stations): n_pts = 50 grid = np.linspace(0, 1, n_pts) - kp_xr=interpolator_wrap(radial_stations,blade.prebend,grid,'pchip', axis=1) - kp_yr=interpolator_wrap(radial_stations,blade.sweep,grid,'pchip', axis=1) - kp_zr=interpolator_wrap(radial_stations,blade.ispan,grid,'pchip', axis=1) - twist_interp=interpolator_wrap(radial_stations,blade.idegreestwist,grid,'pchip', axis=1) - + kp_xr = interpolator_wrap(radial_stations, blade.prebend, grid, "pchip", axis=1) + kp_yr = interpolator_wrap(radial_stations, blade.sweep, grid, "pchip", axis=1) + kp_zr = interpolator_wrap(radial_stations, blade.ispan, grid, "pchip", axis=1) + twist_interp = interpolator_wrap( + radial_stations, blade.idegreestwist, grid, "pchip", axis=1 + ) data = np.vstack((kp_xr, kp_yr, kp_zr, twist_interp)).T if not os.path.exists(directory): os.makedirs(directory) - axisFileName=wt_name + '_BeamDyn.dat' - - file = open(directory +'/'+ axisFileName, 'w') - file.write('--------- BEAMDYN with OpenFAST INPUT FILE -------------------------------------------\n') - file.write('%s blade\n' % (wt_name)) - file.write('---------------------- SIMULATION CONTROL --------------------------------------\n') - file.write('True Echo - Echo input data to ".ech" (flag)\n') - file.write('True QuasiStaticInit - Use quasistatic pre-conditioning with centripetal accelerations in initialization (flag) [dynamic solve only]\n') - file.write(' 0 rhoinf - Numerical damping parameter for generalized-alpha integrator\n') - file.write(' 2 quadrature - Quadrature method: 1=Gaussian; 2=Trapezoidal (switch)\n') - file.write(' 2 refine - Refinement factor for trapezoidal quadrature (-). DEFAULT = 1 [used only when quadrature=2]\n') - file.write('"DEFAULT" n_fact - Factorization frequency (-). DEFAULT = 5\n') + axisFileName = wt_name + "_BeamDyn.dat" + + file = open(directory + "/" + axisFileName, "w") + file.write( + "--------- BEAMDYN with OpenFAST INPUT FILE -------------------------------------------\n" + ) + file.write("%s blade\n" % (wt_name)) + file.write( + "---------------------- SIMULATION CONTROL --------------------------------------\n" + ) + file.write( + 'True Echo - Echo input data to ".ech" (flag)\n' + ) + file.write( + "True QuasiStaticInit - Use quasistatic pre-conditioning with centripetal accelerations in initialization (flag) [dynamic solve only]\n" + ) + file.write( + " 0 rhoinf - Numerical damping parameter for generalized-alpha integrator\n" + ) + file.write( + " 2 quadrature - Quadrature method: 1=Gaussian; 2=Trapezoidal (switch)\n" + ) + file.write( + " 2 refine - Refinement factor for trapezoidal quadrature (-). DEFAULT = 1 [used only when quadrature=2]\n" + ) + file.write( + '"DEFAULT" n_fact - Factorization frequency (-). DEFAULT = 5\n' + ) file.write('"DEFAULT" DTBeam - Time step size (s).\n') - file.write(' 50 load_retries - Number of factored load retries before quitting the aimulation\n') - file.write('"DEFAULT" NRMax - Max number of iterations in Newton-Ralphson algorithm (-). DEFAULT = 10\n') + file.write( + " 50 load_retries - Number of factored load retries before quitting the aimulation\n" + ) + file.write( + '"DEFAULT" NRMax - Max number of iterations in Newton-Ralphson algorithm (-). DEFAULT = 10\n' + ) file.write('"DEFAULT" stop_tol - Tolerance for stopping criterion (-)\n') - file.write('"DEFAULT" tngt_stf_fd - Flag to use finite differenced tangent stiffness matrix (-)\n') - file.write('"DEFAULT" tngt_stf_comp - Flag to compare analytical finite differenced tangent stiffness matrix (-)\n') - file.write('"DEFAULT" tngt_stf_pert - perturbation size for finite differencing (-)\n') - file.write('"DEFAULT" tngt_stf_difftol- Maximum allowable relative difference between analytical and fd tangent stiffness (-)\n') - file.write('True RotStates - Orient states in the rotating frame during linearization? (flag) [used only when linearizing]\n') - file.write('---------------------- GEOMETRY PARAMETER --------------------------------------\n') - file.write(' 1 member_total - Total number of members (-)\n') - file.write(' %u kp_total - Total number of key points (-) [must be at least 3]\n' % (n_pts)) - file.write(' 1 %u - Member number; Number of key points in this member\n' % (n_pts)) - file.write('\t\t kp_xr \t\t\t kp_yr \t\t\t kp_zr \t\t initial_twist\n') - file.write('\t\t (m) \t\t\t (m) \t\t\t (m) \t\t (deg)\n') - + file.write( + '"DEFAULT" tngt_stf_fd - Flag to use finite differenced tangent stiffness matrix (-)\n' + ) + file.write( + '"DEFAULT" tngt_stf_comp - Flag to compare analytical finite differenced tangent stiffness matrix (-)\n' + ) + file.write( + '"DEFAULT" tngt_stf_pert - perturbation size for finite differencing (-)\n' + ) + file.write( + '"DEFAULT" tngt_stf_difftol- Maximum allowable relative difference between analytical and fd tangent stiffness (-)\n' + ) + file.write( + "True RotStates - Orient states in the rotating frame during linearization? (flag) [used only when linearizing]\n" + ) + file.write( + "---------------------- GEOMETRY PARAMETER --------------------------------------\n" + ) + file.write(" 1 member_total - Total number of members (-)\n") + file.write( + " %u kp_total - Total number of key points (-) [must be at least 3]\n" + % (n_pts) + ) + file.write( + " 1 %u - Member number; Number of key points in this member\n" + % (n_pts) + ) + file.write("\t\t kp_xr \t\t\t kp_yr \t\t\t kp_zr \t\t initial_twist\n") + file.write("\t\t (m) \t\t\t (m) \t\t\t (m) \t\t (deg)\n") for i in range(n_pts): - file.write('\t %.5e \t %.5e \t %.5e \t %.5e \n' % (data[i, 0], data[i, 1], data[i, 2], data[i, 3])) - - file.write('---------------------- MESH PARAMETER ------------------------------------------\n') - file.write(' 10 order_elem - Order of interpolation (basis) function (-)\n') - file.write('---------------------- MATERIAL PARAMETER --------------------------------------\n') - file.write('"%s" BldFile - Name of file containing properties for blade (quoted string)\n' % (wt_name + '_BeamDyn_Blade.dat')) - file.write('---------------------- PITCH ACTUATOR PARAMETERS -------------------------------\n') - file.write('False UsePitchAct - Whether a pitch actuator should be used (flag)\n') - file.write(' 200 PitchJ - Pitch actuator inertia (kg-m^2) [used only when UsePitchAct is true]\n') - file.write(' 2E+07 PitchK - Pitch actuator stiffness (kg-m^2/s^2) [used only when UsePitchAct is true]\n') - file.write(' 500000 PitchC - Pitch actuator damping (kg-m^2/s) [used only when UsePitchAct is true]\n') - file.write('---------------------- OUTPUTS -------------------------------------------------\n') - file.write('False SumPrint - Print summary data to ".sum" (flag)\n') - file.write('"ES10.3E2" OutFmt - Format used for text tabular output, excluding the time channel.\n') - file.write(' 1 NNodeOuts - Number of nodes to output to file [0 - 9] (-)\n') - file.write('3, 6, 9, 12, 15, 18, 21, 24, 27 OutNd - Nodes whose values will be output (-)\n') - file.write(' OutList - The next line(s) contains a list of output parameters. See OutListParameters.xlsx for a listing of available output channels, (-)\n') - - coordinate={} - coordinate['F']='l' - coordinate['M']='l' - coordinate['RD']='r' - coordinate['TD']='r' - - channelList=['F','M','RD','TD'] + file.write( + "\t %.5e \t %.5e \t %.5e \t %.5e \n" + % (data[i, 0], data[i, 1], data[i, 2], data[i, 3]) + ) + + file.write( + "---------------------- MESH PARAMETER ------------------------------------------\n" + ) + file.write( + " 10 order_elem - Order of interpolation (basis) function (-)\n" + ) + file.write( + "---------------------- MATERIAL PARAMETER --------------------------------------\n" + ) + file.write( + '"%s" BldFile - Name of file containing properties for blade (quoted string)\n' + % (wt_name + "_BeamDyn_Blade.dat") + ) + file.write( + "---------------------- PITCH ACTUATOR PARAMETERS -------------------------------\n" + ) + file.write( + "False UsePitchAct - Whether a pitch actuator should be used (flag)\n" + ) + file.write( + " 200 PitchJ - Pitch actuator inertia (kg-m^2) [used only when UsePitchAct is true]\n" + ) + file.write( + " 2E+07 PitchK - Pitch actuator stiffness (kg-m^2/s^2) [used only when UsePitchAct is true]\n" + ) + file.write( + " 500000 PitchC - Pitch actuator damping (kg-m^2/s) [used only when UsePitchAct is true]\n" + ) + file.write( + "---------------------- OUTPUTS -------------------------------------------------\n" + ) + file.write( + 'False SumPrint - Print summary data to ".sum" (flag)\n' + ) + file.write( + '"ES10.3E2" OutFmt - Format used for text tabular output, excluding the time channel.\n' + ) + file.write( + " 1 NNodeOuts - Number of nodes to output to file [0 - 9] (-)\n" + ) + file.write( + "3, 6, 9, 12, 15, 18, 21, 24, 27 OutNd - Nodes whose values will be output (-)\n" + ) + file.write( + " OutList - The next line(s) contains a list of output parameters. See OutListParameters.xlsx for a listing of available output channels, (-)\n" + ) + + coordinate = {} + coordinate["F"] = "l" + coordinate["M"] = "l" + coordinate["RD"] = "r" + coordinate["TD"] = "r" + + channelList = ["F", "M", "RD", "TD"] for iNode in range(9): for load in channelList: - for dir in ['x','y','z']: + for dir in ["x", "y", "z"]: file.write(f'"N{iNode+1}{load}{dir}{coordinate[load]}"\n') - - #Root - coordinate={} - coordinate['F']='r' - coordinate['M']='r' - - channelList=['F','M'] - for iNode in ['Root']: + # Root + coordinate = {} + coordinate["F"] = "r" + coordinate["M"] = "r" + + channelList = ["F", "M"] + for iNode in ["Root"]: for load in channelList: - for dir in ['x','y','z']: + for dir in ["x", "y", "z"]: file.write(f'"{iNode}{load}{dir}{coordinate[load]}"\n') - #Tip - coordinate={} - coordinate['RD']='r' - coordinate['TD']='r' - - channelList=['RD','TD'] - for iNode in ['Tip']: + # Tip + coordinate = {} + coordinate["RD"] = "r" + coordinate["TD"] = "r" + + channelList = ["RD", "TD"] + for iNode in ["Tip"]: for load in channelList: - for dir in ['x','y','z']: + for dir in ["x", "y", "z"]: file.write(f'"{iNode}{load}{dir}{coordinate[load]}"\n') - - file.write('END of input file (the word "END" must appear in the first 3 columns of this last OutList line)\n') - file.write('---------------------------------------------------------------------------------------\n') + file.write( + 'END of input file (the word "END" must appear in the first 3 columns of this last OutList line)\n' + ) + file.write( + "---------------------------------------------------------------------------------------\n" + ) file.close() - print('Finished writing BeamDyn File') + print("Finished writing BeamDyn File") return axisFileName + # --- Write BeamDyn_Blade file with blade properties ---# def write_beamdyn_prop(folder, wt_name, radial_stations, beam_stiff, beam_inertia, mu): n_pts = len(radial_stations) if not os.path.exists(folder): os.makedirs(folder) - - propFileName= wt_name + '_BeamDyn_Blade.dat' - - - file = open(folder +'/'+propFileName, 'w') - file.write(' ------- BEAMDYN V1.00.* INDIVIDUAL BLADE INPUT FILE --------------------------\n') - file.write(' Test Format 1\n') - file.write(' ---------------------- BLADE PARAMETERS --------------------------------------\n') - file.write('%u station_total - Number of blade input stations (-)\n' % (n_pts)) - file.write(' 1 damp_type - Damping type: 0: no damping; 1: damped\n') - file.write(' ---------------------- DAMPING COEFFICIENT------------------------------------\n') - file.write(' mu1 mu2 mu3 mu4 mu5 mu6\n') - file.write(' (-) (-) (-) (-) (-) (-)\n') - file.write('\t %.5e \t %.5e \t %.5e \t %.5e \t %.5e \t %.5e\n' % (mu[0], mu[1], mu[2], mu[3], mu[4], mu[5])) - file.write(' ---------------------- DISTRIBUTED PROPERTIES---------------------------------\n') - + + propFileName = wt_name + "_BeamDyn_Blade.dat" + + file = open(folder + "/" + propFileName, "w") + file.write( + " ------- BEAMDYN V1.00.* INDIVIDUAL BLADE INPUT FILE --------------------------\n" + ) + file.write(" Test Format 1\n") + file.write( + " ---------------------- BLADE PARAMETERS --------------------------------------\n" + ) + file.write("%u station_total - Number of blade input stations (-)\n" % (n_pts)) + file.write(" 1 damp_type - Damping type: 0: no damping; 1: damped\n") + file.write( + " ---------------------- DAMPING COEFFICIENT------------------------------------\n" + ) + file.write(" mu1 mu2 mu3 mu4 mu5 mu6\n") + file.write(" (-) (-) (-) (-) (-) (-)\n") + file.write( + "\t %.5e \t %.5e \t %.5e \t %.5e \t %.5e \t %.5e\n" + % (mu[0], mu[1], mu[2], mu[3], mu[4], mu[5]) + ) + file.write( + " ---------------------- DISTRIBUTED PROPERTIES---------------------------------\n" + ) + for i in range(n_pts): - file.write('\t %.6f \n' % (radial_stations[i])) + file.write("\t %.6f \n" % (radial_stations[i])) # write stiffness matrices for j in range(6): - file.write('\t %.16e \t %.16e \t %.16e \t %.16e \t %.16e \t %.16e\n' % ( - beam_stiff[i, j, 0], beam_stiff[i, j, 1], beam_stiff[i, j, 2], beam_stiff[i, j, 3], beam_stiff[i, j, 4], - beam_stiff[i, j, 5])) - file.write('\n') + file.write( + "\t %.16e \t %.16e \t %.16e \t %.16e \t %.16e \t %.16e\n" + % ( + beam_stiff[i, j, 0], + beam_stiff[i, j, 1], + beam_stiff[i, j, 2], + beam_stiff[i, j, 3], + beam_stiff[i, j, 4], + beam_stiff[i, j, 5], + ) + ) + file.write("\n") # write inertia properties for j in range(6): - file.write('\t %.16e \t %.16e \t %.16e \t %.16e \t %.16e \t %.16e\n' % ( - beam_inertia[i, j, 0], beam_inertia[i, j, 1], beam_inertia[i, j, 2], beam_inertia[i, j, 3], - beam_inertia[i, j, 4], beam_inertia[i, j, 5])) - file.write('\n') + file.write( + "\t %.16e \t %.16e \t %.16e \t %.16e \t %.16e \t %.16e\n" + % ( + beam_inertia[i, j, 0], + beam_inertia[i, j, 1], + beam_inertia[i, j, 2], + beam_inertia[i, j, 3], + beam_inertia[i, j, 4], + beam_inertia[i, j, 5], + ) + ) + file.write("\n") # ToDO: check correct translation of stiffness and mass matrices from VABS and anbax !!! file.close() - print('Finished writing BeamDyn_Blade File') + print("Finished writing BeamDyn_Blade File") return propFileName -def writeBeamDynStandAlone(fileNames,disrLoads,tipLoads,directory='.'): - +def writeBeamDynStandAlone(fileNames, disrLoads, tipLoads, directory="."): if not os.path.exists(directory): os.makedirs(directory) from pynumad.utils.misc_utils import copy_and_replace - templateFileName='beamDynStandAlone.template' - - analysisFileName=fileNames[0]+'_driver.inp' + templateFileName = "beamDynStandAlone.template" - pathName=directory+'/'+analysisFileName + analysisFileName = fileNames[0] + "_driver.inp" + pathName = directory + "/" + analysisFileName - - - copy_and_replace(templateFileName, pathName, + copy_and_replace( + templateFileName, + pathName, { - 'DISTRLOAD1' : str(disrLoads[0]), - 'DISTRLOAD2' : str(disrLoads[1]), - 'DISTRLOAD3' : str(disrLoads[2]), - 'DISTRLOAD4' : str(disrLoads[3]), - 'DISTRLOAD5' : str(disrLoads[4]), - 'DISTRLOAD6' : str(disrLoads[5]), - 'TIPLOAD1' : str(tipLoads[0]), - 'TIPLOAD2' : str(tipLoads[1]), - 'TIPLOAD3' : str(tipLoads[2]), - 'TIPLOAD4' : str(tipLoads[3]), - 'TIPLOAD5' : str(tipLoads[4]), - 'TIPLOAD6' : str(tipLoads[5]), - 'AXIS FILE NAME': fileNames[0], - }) + "DISTRLOAD1": str(disrLoads[0]), + "DISTRLOAD2": str(disrLoads[1]), + "DISTRLOAD3": str(disrLoads[2]), + "DISTRLOAD4": str(disrLoads[3]), + "DISTRLOAD5": str(disrLoads[4]), + "DISTRLOAD6": str(disrLoads[5]), + "TIPLOAD1": str(tipLoads[0]), + "TIPLOAD2": str(tipLoads[1]), + "TIPLOAD3": str(tipLoads[2]), + "TIPLOAD4": str(tipLoads[3]), + "TIPLOAD5": str(tipLoads[4]), + "TIPLOAD6": str(tipLoads[5]), + "AXIS FILE NAME": fileNames[0], + }, + ) return analysisFileName -def runBeamDynStandAlone(beamDynDriverFileName,log,directory='.'): + +def runBeamDynStandAlone(beamDynDriverFileName, log, directory="."): from pynumad import path_data import subprocess + try: - this_cmd = path_data['openFast']+'beamdyn_driver '+directory+'/'+beamDynDriverFileName - log.info(f' running: {this_cmd}') + this_cmd = ( + path_data["openFast"] + + "beamdyn_driver " + + directory + + "/" + + beamDynDriverFileName + ) + log.info(f" running: {this_cmd}") subprocess.run(this_cmd, shell=True, check=True, capture_output=True) # with open(filePath+'.ech', 'r') as f: @@ -282,8 +390,4 @@ def runBeamDynStandAlone(beamDynDriverFileName,log,directory='.'): # log.error(f'****************************\n{lines[-1]}\n******************************') except subprocess.CalledProcessError as e: - log.error(f'Error running {this_cmd}: {e}') - - - - + log.error(f"Error running {this_cmd}: {e}") diff --git a/src/pynumad/analysis/cubit/cubitBlade.py b/src/pynumad/analysis/cubit/cubitBlade.py index 1da75f1..c460526 100644 --- a/src/pynumad/analysis/cubit/cubitBlade.py +++ b/src/pynumad/analysis/cubit/cubitBlade.py @@ -6,33 +6,41 @@ import glob -def generateCubitCrossSections(blade, wt_name, settings, crosssectionParams, model2Dor3D, stationList=None, directory='.'): - - if stationList is None or len(stationList)==0: +def generateCubitCrossSections( + blade, + wt_name, + settings, + crosssectionParams, + model2Dor3D, + stationList=None, + directory=".", +): + if stationList is None or len(stationList) == 0: stationList = list(range(len(blade.ispan))) # Initialize variables surfaceDict = {} # Uniquly track which materiall IDs are actuall used in blade model materialsUsed = set() - iLE = blade.LEindex+1 + iLE = blade.LEindex + 1 thicknessScaling = 0.001 - geometryScaling = thicknessScaling*1000 + geometryScaling = thicknessScaling * 1000 # Set up Cubit - cubit.init(['cubit','-nojournal']) - - cubit.cmd('undo off') - cubit.cmd('set geometry accuracy 1e-6') + cubit.init(["cubit", "-nojournal"]) + + cubit.cmd("undo off") + cubit.cmd("set geometry accuracy 1e-6") # making numerus 3D volumes is very slow with autosize on - cubit.cmd('set default autosize off') + cubit.cmd("set default autosize off") # Modify blade object to accomodate actual layer thicknesses - - expandTEthicknesses=list(crosssectionParams['TE_adhesive']+6*crosssectionParams['minimumLayerThickness']) - blade.expandBladeGeometryTEs(expandTEthicknesses) - + expandTEthicknesses = list( + crosssectionParams["TE_adhesive"] + + 6 * crosssectionParams["minimumLayerThickness"] + ) + blade.expandBladeGeometryTEs(expandTEthicknesses) blade.editStacksForSolidMesh() @@ -52,35 +60,33 @@ def generateCubitCrossSections(blade, wt_name, settings, crosssectionParams, mod refLineCoords = np.vstack(([blade.sweep, blade.prebend, blade.ispan])).transpose() spanwiseMatOriCurve = 1 - - roundStations=np.argwhere(np.array(blade.TEtype)=='round') - roundStations=list(roundStations[:,0]) - lastRoundStation = roundStations[-1] + roundStations = np.argwhere(np.array(blade.TEtype) == "round") + roundStations = list(roundStations[:, 0]) + lastRoundStation = roundStations[-1] - with open('cubitBlade.log', 'w') as logFile: - logFile.write(f'Making cross sections for {wt_name}\n') + with open("cubitBlade.log", "w") as logFile: + logFile.write(f"Making cross sections for {wt_name}\n") + pathName = directory + "/" + wt_name + "-crossSections" - pathName=directory+'/'+wt_name+'-crossSections' - for iStation in stationList: - - cubit.cmd('reset ') # This is needed to restart node numbering for VABS. VABS neeeds every element and node starting from 1 to nelem/nnode should be present + cubit.cmd( + "reset " + ) # This is needed to restart node numbering for VABS. VABS neeeds every element and node starting from 1 to nelem/nnode should be present writeSplineFromCoordinatePoints(cubit, refLineCoords) iStationGeometry = iStation - if iStation == len(blade.ispan)-1: # Only do this for the last station - blade.addInterpolatedStation(blade.ispan[-1]*0.999) + if iStation == len(blade.ispan) - 1: # Only do this for the last station + blade.addInterpolatedStation(blade.ispan[-1] * 0.999) blade.editStacksForSolidMesh() expandTEthicknesses.append(expandTEthicknesses[-1]) blade.expandBladeGeometryTEs(expandTEthicknesses) - # adjustLastStackAfterNewTipStation(iStation) - iStationGeometry = iStation+1 + iStationGeometry = iStation + 1 - if blade.getprofileTEtype(iStationGeometry) == 'flat': + if blade.getprofileTEtype(iStationGeometry) == "flat": isFlatback = True else: isFlatback = False @@ -97,38 +103,64 @@ def generateCubitCrossSections(blade, wt_name, settings, crosssectionParams, mod if iStation < iStationFirstWeb: iWebStation = iStationFirstWeb - # elif iStationLastWeb == len(blade.ispan) - 1-1: + # elif iStationLastWeb == len(blade.ispan) - 1-1: else: iWebStation = iStationLastWeb - # else: - # raise Exception('assuming web ends at last station for now. ') + # else: + # raise Exception('assuming web ends at last station for now. ') webNumber = 1 aftWebStack = blade.swstacks[webNumber][iWebStation] webNumber = 0 foreWebStack = blade.swstacks[webNumber][iWebStation] - # Only save birdsMouthVerts for the right cross-section if iStation == iStationFirstWeb: - birdsMouthVerts = writeCubitCrossSection(surfaceDict, iStation, iStationGeometry, blade, - hasWebs[iStation], aftWebStack, foreWebStack, iLE, crosssectionParams, geometryScaling, thicknessScaling, isFlatback, lastRoundStation, materialsUsed) + birdsMouthVerts = writeCubitCrossSection( + surfaceDict, + iStation, + iStationGeometry, + blade, + hasWebs[iStation], + aftWebStack, + foreWebStack, + iLE, + crosssectionParams, + geometryScaling, + thicknessScaling, + isFlatback, + lastRoundStation, + materialsUsed, + ) else: - - writeCubitCrossSection(surfaceDict, iStation, iStationGeometry, blade, hasWebs[iStation], aftWebStack, foreWebStack, - iLE, crosssectionParams, geometryScaling, thicknessScaling, isFlatback, lastRoundStation, materialsUsed) + writeCubitCrossSection( + surfaceDict, + iStation, + iStationGeometry, + blade, + hasWebs[iStation], + aftWebStack, + foreWebStack, + iLE, + crosssectionParams, + geometryScaling, + thicknessScaling, + isFlatback, + lastRoundStation, + materialsUsed, + ) birdsMouthVerts = [] - cubit.cmd(f'delete curve all with Is_Free except {spanwiseMatOriCurve}') + cubit.cmd(f"delete curve all with Is_Free except {spanwiseMatOriCurve}") # Chord line for rotation of cross-section for homogenization - if model2Dor3D.lower() == '2d': + if model2Dor3D.lower() == "2d": # #Blocks for imat, materialName in enumerate(materialsUsed): cubit.cmd(f'block {imat+1} add surface with name "*{materialName}*"') - addColor(blade, 'surface') + addColor(blade, "surface") # create_vertex(blade.geometry[0,0,iStation]*geometryScaling,blade.geometry[0,1,iStation]*geometryScaling,blade.geometry[0,2,iStation]*geometryScaling) # TEvert=get_last_id("vertex") @@ -142,60 +174,76 @@ def generateCubitCrossSections(blade, wt_name, settings, crosssectionParams, mod # crossSectionRotationAngle=math.atan2(tangentDirection[1],tangentDirection[0])*180/pi parseString = f'with name "*Station{str(iStation)}*"' - volumeIDs = parse_cubit_list('surface', parseString) + volumeIDs = parse_cubit_list("surface", parseString) # Undo initial twist - cubit.cmd(f'rotate Surface {l2s(volumeIDs)} angle {blade.degreestwist[iStation]} about Z include_merged ') + cubit.cmd( + f"rotate Surface {l2s(volumeIDs)} angle {blade.degreestwist[iStation]} about Z include_merged " + ) # Undo prebend if blade.prebend[iStation] != 0: - cubit.cmd(f'move surface {l2s(volumeIDs)} y {-1*blade.prebend[iStation]} include_merged') + cubit.cmd( + f"move surface {l2s(volumeIDs)} y {-1*blade.prebend[iStation]} include_merged" + ) # Undo sweep if blade.sweep[iStation] != 0: - raise ValueError('Presweep is untested for cross-sectional meshing') + raise ValueError("Presweep is untested for cross-sectional meshing") # Mesh the cross-section - cubit.cmd(f'curve with name "layerThickness*" interval {crosssectionParams["nelPerLayer"]}') - #cubit.cmd(f'imprint volume {l2s(surfaceIDs)}') - cubit.cmd(f'merge volume {l2s(volumeIDs)}') - cubit.cmd(f'set default autosize on') - - if crosssectionParams['elementShape'].lower() == 'tri': - cubit.cmd(f'surface {l2s(volumeIDs)} scheme tri') + cubit.cmd( + f'curve with name "layerThickness*" interval {crosssectionParams["nelPerLayer"]}' + ) + # cubit.cmd(f'imprint volume {l2s(surfaceIDs)}') + cubit.cmd(f"merge volume {l2s(volumeIDs)}") + cubit.cmd(f"set default autosize on") + + if crosssectionParams["elementShape"].lower() == "tri": + cubit.cmd(f"surface {l2s(volumeIDs)} scheme tri") else: - cubit.cmd(f'surface {l2s(volumeIDs)} scheme map') + cubit.cmd(f"surface {l2s(volumeIDs)} scheme map") - cubit.cmd(f'mesh surface {l2s(volumeIDs)}') + cubit.cmd(f"mesh surface {l2s(volumeIDs)}") - fileName = wt_name+'-'+str(iStation)+'-t-0.in' + fileName = wt_name + "-" + str(iStation) + "-t-0.in" if not os.path.exists(directory): os.makedirs(directory) - - if settings['export'] is not None: - if 'g' in settings['export'].lower(): + if settings["export"] is not None: + if "g" in settings["export"].lower(): cubit.cmd(f'export mesh "{pathName}.g" overwrite') - elif 'cub' in settings['export'].lower(): - cubit.cmd(f'delete curve {spanwiseMatOriCurve}') + elif "cub" in settings["export"].lower(): + cubit.cmd(f"delete curve {spanwiseMatOriCurve}") cubit.cmd(f'save as "{pathName}-{str(iStation)}.cub" overwrite') else: - raise NameError(f'Unknown model export format: {settings["export"]}') - - if settings['solver'] is not None: - if 'vabs' in settings['solver'].lower(): - writeVABSinput(surfaceDict, blade, crosssectionParams,directory,fileName, volumeIDs,materialsUsed) - - - elif 'anba' in settings['solver'].lower(): - raise ValueError('ANBA currently not supported') + raise NameError( + f'Unknown model export format: {settings["export"]}' + ) + + if settings["solver"] is not None: + if "vabs" in settings["solver"].lower(): + writeVABSinput( + surfaceDict, + blade, + crosssectionParams, + directory, + fileName, + volumeIDs, + materialsUsed, + ) + + elif "anba" in settings["solver"].lower(): + raise ValueError("ANBA currently not supported") else: - raise NameError(f'Unknown beam cross-sectional solver: {settings["solver"]}') + raise NameError( + f'Unknown beam cross-sectional solver: {settings["solver"]}' + ) - #Import all cross-sections into one cub file - if settings['export'] is not None and'cub' in settings['export'].lower(): - cubit.cmd('reset ') + # Import all cross-sections into one cub file + if settings["export"] is not None and "cub" in settings["export"].lower(): + cubit.cmd("reset ") writeSplineFromCoordinatePoints(cubit, refLineCoords) for iStation in stationList: @@ -203,20 +251,40 @@ def generateCubitCrossSections(blade, wt_name, settings, crosssectionParams, mod cubit.cmd(f'save as "{pathName}.cub" overwrite') # Remove unnecessary files to save space - for filePath in glob.glob(f'{pathName}-*.cub'): + for filePath in glob.glob(f"{pathName}-*.cub"): os.remove(filePath) - return cubit, blade, surfaceDict, birdsMouthVerts, iStationFirstWeb, iStationLastWeb, materialsUsed, spanwiseMatOriCurve - - -def generateCubitSolidModel(blade, wt_name, settings, crosssectionParams, stationList=None): - - if stationList is None or len(stationList)==0: + return ( + cubit, + blade, + surfaceDict, + birdsMouthVerts, + iStationFirstWeb, + iStationLastWeb, + materialsUsed, + spanwiseMatOriCurve, + ) + + +def generateCubitSolidModel( + blade, wt_name, settings, crosssectionParams, stationList=None +): + if stationList is None or len(stationList) == 0: stationList = list(range(len(blade.ispan))) - elif len(stationList)==1: - raise ValueError('Need more that one cross section to make a solid model') - - cubit, blade, surfaceDict, birdsMouthVerts, iStationFirstWeb, iStationLastWeb, materialsUsed, spanwiseMatOriCurve = generateCubitCrossSections( - blade, wt_name, settings, crosssectionParams, '3D', stationList) + elif len(stationList) == 1: + raise ValueError("Need more that one cross section to make a solid model") + + ( + cubit, + blade, + surfaceDict, + birdsMouthVerts, + iStationFirstWeb, + iStationLastWeb, + materialsUsed, + spanwiseMatOriCurve, + ) = generateCubitCrossSections( + blade, wt_name, settings, crosssectionParams, "3D", stationList + ) iStationStart = stationList[0] iStationEnd = stationList[-1] @@ -225,48 +293,54 @@ def generateCubitSolidModel(blade, wt_name, settings, crosssectionParams, statio ### ### ### ### meshVolList = [] - partName = 'shell' + partName = "shell" orderedList = getOrderedList(partName) if len(orderedList) > 0: - meshVolList = makeAeroshell( - surfaceDict, orderedList, meshVolList, iStationEnd) -# cubit.cmd(f'save as "python2.cub" overwrite') -# foo + meshVolList = makeAeroshell(surfaceDict, orderedList, meshVolList, iStationEnd) + # cubit.cmd(f'save as "python2.cub" overwrite') + # foo - partName = 'web' + partName = "web" orderedList = getOrderedList(partName) orderedListWeb = orderedList.copy() if orderedList and len(orderedList[0]) > 1: - meshVolList = makeAeroshell( - surfaceDict, orderedList, meshVolList, iStationEnd) + meshVolList = makeAeroshell(surfaceDict, orderedList, meshVolList, iStationEnd) - partName = 'roundTEadhesive' + partName = "roundTEadhesive" orderedList = getOrderedList(partName) if orderedList and len(orderedList[0]) > 1: - meshVolList = makeAeroshell( - surfaceDict, orderedList, meshVolList, iStationEnd) + meshVolList = makeAeroshell(surfaceDict, orderedList, meshVolList, iStationEnd) - partName = 'flatTEadhesive' + partName = "flatTEadhesive" orderedList = getOrderedList(partName) if orderedList and len(orderedList[0]) > 1: - meshVolList = makeAeroshell( - surfaceDict, orderedList, meshVolList, iStationEnd) - - if orderedListWeb and len(orderedListWeb[0]) > 1 and crosssectionParams['amplitudeFraction'] and birdsMouthVerts: - - makeBirdsMouth(blade, birdsMouthVerts, - crosssectionParams['amplitudeFraction'], iStationFirstWeb, iStationLastWeb) - - cubit.cmd(f'merge volume {l2s(meshVolList)}') - cubit.cmd(f'reset volume all') - - cubit.cmd(f'delete surface with Is_Free') + meshVolList = makeAeroshell(surfaceDict, orderedList, meshVolList, iStationEnd) + + if ( + orderedListWeb + and len(orderedListWeb[0]) > 1 + and crosssectionParams["amplitudeFraction"] + and birdsMouthVerts + ): + makeBirdsMouth( + blade, + birdsMouthVerts, + crosssectionParams["amplitudeFraction"], + iStationFirstWeb, + iStationLastWeb, + ) + + cubit.cmd(f"merge volume {l2s(meshVolList)}") + cubit.cmd(f"reset volume all") + + cubit.cmd(f"delete surface with Is_Free") cubit.cmd( - f'curve with name "layerThickness*" interval {crosssectionParams["nelPerLayer"]}') - cubit.cmd('set default autosize on') - cubit.cmd(f'mesh volume {l2s(meshVolList)}') - cubit.cmd(f'draw volume {l2s(meshVolList)}') + f'curve with name "layerThickness*" interval {crosssectionParams["nelPerLayer"]}' + ) + cubit.cmd("set default autosize on") + cubit.cmd(f"mesh volume {l2s(meshVolList)}") + cubit.cmd(f"draw volume {l2s(meshVolList)}") # Blocks # for imat,material in enumerate(blade.materials): @@ -274,35 +348,35 @@ def generateCubitSolidModel(blade, wt_name, settings, crosssectionParams, statio cubit.cmd(f'block {imat+1} add volume with name "*{materialName}*"') cubit.cmd(f'block {imat+1} name "{materialName}"') - addColor(blade, 'volume') + addColor(blade, "volume") # Adding Nodesets # Root Nodeset parseString = f'with name "*station{iStationStart}*"' - print(f'parseString{parseString}') - surfaceIDs = parse_cubit_list('surface', parseString) - cubit.cmd(f'nodeset 1 add surface {l2s(surfaceIDs)} ') + print(f"parseString{parseString}") + surfaceIDs = parse_cubit_list("surface", parseString) + cubit.cmd(f"nodeset 1 add surface {l2s(surfaceIDs)} ") cubit.cmd(f'nodeset 1 name "root"') for iLoop, iStation in enumerate(stationList[1:-1]): parseString = f'with name "*station{iStation}*"' - print(f'parseString{parseString}') - surfaceIDs = parse_cubit_list('surface', parseString) - cubit.cmd(f'nodeset {iLoop+2} add surface {l2s(surfaceIDs)} ') + print(f"parseString{parseString}") + surfaceIDs = parse_cubit_list("surface", parseString) + cubit.cmd(f"nodeset {iLoop+2} add surface {l2s(surfaceIDs)} ") cubit.cmd(f'nodeset {iLoop+2} name "station{iStation}"') if not stationList[1:-1]: iLoop = -1 # Tip Nodeset parseString = f'with name "*station{iStationEnd}*"' - surfaceIDs = parse_cubit_list('surface', parseString) - cubit.cmd(f'nodeset {iLoop+3} add surface {l2s(surfaceIDs)} ') + surfaceIDs = parse_cubit_list("surface", parseString) + cubit.cmd(f"nodeset {iLoop+3} add surface {l2s(surfaceIDs)} ") cubit.cmd(f'nodeset {iLoop+3} name "tip"') # Outer mold-line nodeset cubit.cmd('draw surf with name "*layer0_bottomFace*"') parseString = f'with name "*layer0_bottomFace*"' - surfaceIDs = parse_cubit_list('surface', parseString) - cubit.cmd(f'nodeset {iLoop+4} add surface {l2s(surfaceIDs)} ') + surfaceIDs = parse_cubit_list("surface", parseString) + cubit.cmd(f"nodeset {iLoop+4} add surface {l2s(surfaceIDs)} ") cubit.cmd(f'nodeset {iLoop+4} name "oml"') # #################################### @@ -347,8 +421,8 @@ def generateCubitSolidModel(blade, wt_name, settings, crosssectionParams, statio # length*perimeterDirection[1], coords[2]+length*perimeterDirection[2]) # iVert2 = get_last_id("vertex") # cubit.cmd(f'create curve vertex {iVert1} {iVert2}') - if settings['export'] is not None: - if 'g' in settings['export'].lower(): + if settings["export"] is not None: + if "g" in settings["export"].lower(): cubit.cmd(f'export mesh "{wt_name}.g" overwrite') - if 'cub' in settings['export'].lower(): + if "cub" in settings["export"].lower(): cubit.cmd(f'save as "{wt_name}.cub" overwrite') diff --git a/src/pynumad/analysis/cubit/cubitUtils.py b/src/pynumad/analysis/cubit/cubitUtils.py index d9e3d33..64dc142 100644 --- a/src/pynumad/analysis/cubit/cubitUtils.py +++ b/src/pynumad/analysis/cubit/cubitUtils.py @@ -1,949 +1,1318 @@ from cubit import * -from PyCubed_Main import * +from PyCubed_Main import * import numpy as np import os -def addColor(blade,volumeOrSurface): - - #Adds color to volume or surfaces by material - colorDict={} - colorDict['adhesive']='yellow' - colorDict['carbon']='grey' - colorDict['uni']='seagreen' - colorDict['triax']='lightgreen' - colorDict['biax']='greenyellow' - colorDict['foam']='khaki' + +def addColor(blade, volumeOrSurface): + # Adds color to volume or surfaces by material + colorDict = {} + colorDict["adhesive"] = "yellow" + colorDict["carbon"] = "grey" + colorDict["uni"] = "seagreen" + colorDict["triax"] = "lightgreen" + colorDict["biax"] = "greenyellow" + colorDict["foam"] = "khaki" for matName in blade.materials: for color in colorDict.keys(): if color in matName.lower(): - parseString=f'with name "*{matName}*"' + parseString = f'with name "*{matName}*"' - volIDs=parse_cubit_list(volumeOrSurface, parseString) - cubit.cmd(f'color {volumeOrSurface} {l2s(volIDs)} mesh {colorDict[color]}') - cubit.cmd(f'color {volumeOrSurface} {l2s(volIDs)} geometry {colorDict[color]}') + volIDs = parse_cubit_list(volumeOrSurface, parseString) + cubit.cmd( + f"color {volumeOrSurface} {l2s(volIDs)} mesh {colorDict[color]}" + ) + cubit.cmd( + f"color {volumeOrSurface} {l2s(volIDs)} geometry {colorDict[color]}" + ) break -def surfaceFromTwoCurves(topCurve,bottomCurve): - v2Left,v2Right=selCurveVerts(topCurve) - v1Left,v1Right=selCurveVerts(bottomCurve) - cubit.cmd(f'create curve vertex {v1Left} {v2Left}') - cubit.cmd(f'create curve vertex {v1Right} {v2Right}') - cubit.cmd(f'create surface curve {l2s([get_last_id("curve")-1, bottomCurve,topCurve,get_last_id("curve")])}') +def surfaceFromTwoCurves(topCurve, bottomCurve): + v2Left, v2Right = selCurveVerts(topCurve) + v1Left, v1Right = selCurveVerts(bottomCurve) + cubit.cmd(f"create curve vertex {v1Left} {v2Left}") + cubit.cmd(f"create curve vertex {v1Right} {v2Right}") + cubit.cmd( + f'create surface curve {l2s([get_last_id("curve")-1, bottomCurve,topCurve,get_last_id("curve")])}' + ) -def getCrossSectionNormalVector(xyz): - npts,_=xyz.shape +def getCrossSectionNormalVector(xyz): + npts, _ = xyz.shape - #Create Referece line as a spline - vertexList=[] + # Create Referece line as a spline + vertexList = [] for kcp in range(npts): - vertexList.append(create_vertex(xyz[kcp,0],xyz[kcp,1],xyz[kcp,2])) + vertexList.append(create_vertex(xyz[kcp, 0], xyz[kcp, 1], xyz[kcp, 2])) - c1=create_curve(vertexList[0],vertexList[1]) - c2=create_curve(vertexList[0],vertexList[2]) + c1 = create_curve(vertexList[0], vertexList[1]) + c2 = create_curve(vertexList[0], vertexList[2]) - midPoint=list(c1.position_from_fraction(0.5)) - tangent1=c1.tangent(midPoint) - midPoint=list(c2.position_from_fraction(0.5)) - tangent2=c2.tangent(midPoint) - crossSectionNormal=vectNorm(crossProd(list(tangent1),list(tangent2))) + midPoint = list(c1.position_from_fraction(0.5)) + tangent1 = c1.tangent(midPoint) + midPoint = list(c2.position_from_fraction(0.5)) + tangent2 = c2.tangent(midPoint) + crossSectionNormal = vectNorm(crossProd(list(tangent1), list(tangent2))) cubit.cmd(f'delete curve {get_last_id("curve")}') cubit.cmd(f'delete curve {get_last_id("curve")-1}') return crossSectionNormal -def getBladeGeometryForStation(blade,iStation): - return np.array([blade.geometry[:,0,iStation],blade.geometry[:,1,iStation],blade.geometry[:,2,iStation]]).transpose() + + +def getBladeGeometryForStation(blade, iStation): + return np.array( + [ + blade.geometry[:, 0, iStation], + blade.geometry[:, 1, iStation], + blade.geometry[:, 2, iStation], + ] + ).transpose() # xcoords=blade.profiles[:,0,iStation]*blade.ichord[iStation] # ycoords=blade.profiles[:,1,iStation]*blade.ichord[iStation] # zcoord=blade.ispan[iStation] - # zcoords=[zcoord]*len(xcoords) + # zcoords=[zcoord]*len(xcoords) # return np.array([xcoords,ycoords,zcoords]).transpose() -def writeSplineFromCoordinatePoints(cubit,xyz): - - #xyz is npts by 3 array holding the coordinates of the points - npts,_=xyz.shape - nStart=get_last_id("vertex")+1 +def writeSplineFromCoordinatePoints(cubit, xyz): + # xyz is npts by 3 array holding the coordinates of the points + npts, _ = xyz.shape + nStart = get_last_id("vertex") + 1 for iPoint in range(npts): - coords=xyz[iPoint,:] - create_vertex(coords[0],coords[1],coords[2]) - nEnd=get_last_id("vertex") - vertexList=list(range(nStart,nEnd+1)) - cubit.cmd(f'create curve spline vertex {l2s(vertexList)}') - -def extendCurveAtVertexToLength(curveToExtendID,extensionLength,curveStartOrEnd): - #Extend all offset curves - if curveStartOrEnd.lower() == 'start': - tempVertID,_=selCurveVerts(curveToExtendID) - vectorDirectionSign=-1 + coords = xyz[iPoint, :] + create_vertex(coords[0], coords[1], coords[2]) + nEnd = get_last_id("vertex") + vertexList = list(range(nStart, nEnd + 1)) + cubit.cmd(f"create curve spline vertex {l2s(vertexList)}") + + +def extendCurveAtVertexToLength(curveToExtendID, extensionLength, curveStartOrEnd): + # Extend all offset curves + if curveStartOrEnd.lower() == "start": + tempVertID, _ = selCurveVerts(curveToExtendID) + vectorDirectionSign = -1 else: - _,tempVertID=selCurveVerts(curveToExtendID) - vectorDirectionSign=1 - - tangentLocationCoords=cubit.vertex(tempVertID).coordinates() - - x=cubit.curve(curveToExtendID).tangent(tangentLocationCoords)[0] - y=cubit.curve(curveToExtendID).tangent(tangentLocationCoords)[1] - z=cubit.curve(curveToExtendID).tangent(tangentLocationCoords)[2] - tangentDirection=vectorDirectionSign*extensionLength*np.array(vectNorm([x,y,z])) #Unit vector of tangent *Scaled by offset curve length - newVertexCoords=np.array(tangentLocationCoords)+tangentDirection - - v1=cubit.create_vertex(newVertexCoords[0],newVertexCoords[1],newVertexCoords[2]) - - #if statement needed to maintain original curve sense - if curveStartOrEnd.lower() == 'start': - c1=cubit.create_curve(v1,cubit.vertex(tempVertID)) - #Combine offset with extenstion - cubit.cmd(f'create curve combine curve {get_last_id("curve")} {curveToExtendID}') + _, tempVertID = selCurveVerts(curveToExtendID) + vectorDirectionSign = 1 + + tangentLocationCoords = cubit.vertex(tempVertID).coordinates() + + x = cubit.curve(curveToExtendID).tangent(tangentLocationCoords)[0] + y = cubit.curve(curveToExtendID).tangent(tangentLocationCoords)[1] + z = cubit.curve(curveToExtendID).tangent(tangentLocationCoords)[2] + tangentDirection = ( + vectorDirectionSign * extensionLength * np.array(vectNorm([x, y, z])) + ) # Unit vector of tangent *Scaled by offset curve length + newVertexCoords = np.array(tangentLocationCoords) + tangentDirection + + v1 = cubit.create_vertex(newVertexCoords[0], newVertexCoords[1], newVertexCoords[2]) + + # if statement needed to maintain original curve sense + if curveStartOrEnd.lower() == "start": + c1 = cubit.create_curve(v1, cubit.vertex(tempVertID)) + # Combine offset with extenstion + cubit.cmd( + f'create curve combine curve {get_last_id("curve")} {curveToExtendID}' + ) else: - c1=cubit.create_curve(cubit.vertex(tempVertID),v1) - #Combine offset with extenstion - cubit.cmd(f'create curve combine curve {curveToExtendID} {get_last_id("curve")}') - + c1 = cubit.create_curve(cubit.vertex(tempVertID), v1) + # Combine offset with extenstion + cubit.cmd( + f'create curve combine curve {curveToExtendID} {get_last_id("curve")}' + ) + cubit.cmd(f'delete curve {curveToExtendID} {get_last_id("curve")-1}') return get_last_id("curve") -def removeBadTEgeometry(blade,iStation,curveID,flatBackCurveID): - cubit.cmd(f'split curve {curveID} distance {blade.chord[iStation]*0.002} from start ') + +def removeBadTEgeometry(blade, iStation, curveID, flatBackCurveID): + cubit.cmd( + f"split curve {curveID} distance {blade.chord[iStation]*0.002} from start " + ) cubit.cmd(f'delete curve {get_last_id("curve")-1}') - curveID=get_last_id("curve") + curveID = get_last_id("curve") - curveStartOrEnd='start' - extensionLength=1*cubit.curve(curveID).length() - curveID=extendCurveAtVertexToLength(curveID,extensionLength,curveStartOrEnd) + curveStartOrEnd = "start" + extensionLength = 1 * cubit.curve(curveID).length() + curveID = extendCurveAtVertexToLength(curveID, extensionLength, curveStartOrEnd) - _,v1=selCurveVerts(curveID) - cubit.cmd(f'trim curve {curveID} atintersection curve {flatBackCurveID} keepside vertex {v1}') + _, v1 = selCurveVerts(curveID) + cubit.cmd( + f"trim curve {curveID} atintersection curve {flatBackCurveID} keepside vertex {v1}" + ) return get_last_id("curve") -def printOffsetDirectionCheck(curveID,lpHpside,crossSectionNormal): - #This function is used to determine which way a curve offset will go. This is needed, for example, - #to make sure the outer mold line curve is being offset towrads the interior of the blade. - tol=0.01 - cubit.cmd(f'create curve offset curve {curveID} distance 0.0001') - tempCurveID=get_last_id("curve") - cubit.cmd(f'create surface skin curve {curveID} {tempCurveID}') - cubit.cmd(f'delete curve {tempCurveID}') - n=get_surface_normal(get_last_id("surface")) - if abs(n[0]-crossSectionNormal[0]) < tol and abs(n[1]-crossSectionNormal[1]) < tol and abs(n[2]-crossSectionNormal[2]) < tol: - offsetSign=1 + +def printOffsetDirectionCheck(curveID, lpHpside, crossSectionNormal): + # This function is used to determine which way a curve offset will go. This is needed, for example, + # to make sure the outer mold line curve is being offset towrads the interior of the blade. + tol = 0.01 + cubit.cmd(f"create curve offset curve {curveID} distance 0.0001") + tempCurveID = get_last_id("curve") + cubit.cmd(f"create surface skin curve {curveID} {tempCurveID}") + cubit.cmd(f"delete curve {tempCurveID}") + n = get_surface_normal(get_last_id("surface")) + if ( + abs(n[0] - crossSectionNormal[0]) < tol + and abs(n[1] - crossSectionNormal[1]) < tol + and abs(n[2] - crossSectionNormal[2]) < tol + ): + offsetSign = 1 else: - offsetSign=-1 + offsetSign = -1 cubit.cmd(f'delete body {get_last_id("body")}') - - #Need to flip direction since LP curve is written to run clockwise - if lpHpside.lower() == 'lp': - offsetSign=-1*offsetSign + + # Need to flip direction since LP curve is written to run clockwise + if lpHpside.lower() == "lp": + offsetSign = -1 * offsetSign return offsetSign -def offsetCurveAndCombineFragmentsIfNeeded(curveID,offsetDistance): - #Sometimes when offseting a curve, the offset is broken into multiple curves. - #This happens when the curvature is to high for the offset distance. If this happens - #this fuction combines the fragmented curves into one spline. - nStart=get_last_id("curve") - cubit.cmd(f'create curve offset curve {curveID} distance {offsetDistance} extended') - nEnd=get_last_id("curve") + +def offsetCurveAndCombineFragmentsIfNeeded(curveID, offsetDistance): + # Sometimes when offseting a curve, the offset is broken into multiple curves. + # This happens when the curvature is to high for the offset distance. If this happens + # this fuction combines the fragmented curves into one spline. + nStart = get_last_id("curve") + cubit.cmd(f"create curve offset curve {curveID} distance {offsetDistance} extended") + nEnd = get_last_id("curve") # print(f'curveID {curveID}') # print(f'nStart {nStart}') # print(f'nEnd {nEnd}') # print(f'offsetDistance {nEnd}') - # cubit.cmd(f'save as "debug.cub" overwrite') + # cubit.cmd(f'save as "debug.cub" overwrite') - if nEnd-nStart > 1: - curveList=[] - curveList+=list(range(nStart+1,nEnd+1)) - vertexList=[] - v1,_=selCurveVerts(curveList[0]) + if nEnd - nStart > 1: + curveList = [] + curveList += list(range(nStart + 1, nEnd + 1)) + vertexList = [] + v1, _ = selCurveVerts(curveList[0]) vertexList.append(v1) for curve in curveList: - nStart=get_last_id("vertex")+1 - cubit.cmd(f'create vertex on curve {curve} segment 200') - nEnd=get_last_id("vertex") - vertexList+=list(range(nStart,nEnd+1)) - _,v1=selCurveVerts(curveList[-1]) - cubit.cmd(f'create curve spline vertex {l2s(vertexList)}') - cubit.cmd(f'delete vertex {l2s(vertexList[1:-1])}') - cubit.cmd(f'delete curve {l2s(curveList)}') - - - -def streamlineCurveIntersections(firstCurveID,secondCurveID,keepCurve): - #keepCurve is either 1 or 2 - nStart=get_last_id("vertex") - cubit.cmd(f'create vertex atintersection curve {firstCurveID} {secondCurveID}') - nEnd=get_last_id("vertex") - vertexList=list(range(nStart+1,nEnd+1)) - if len(vertexList)>0: - cubit.cmd(f'split curve {firstCurveID} at vertex {vertexList[-1]}') - firstCurveID=get_last_id("curve")-1 - cubit.cmd(f'split curve {secondCurveID} at vertex {vertexList[-1]}') - secondCurveID=get_last_id("curve") - tempID=spliceTwoCurves(firstCurveID,secondCurveID,keepCurve) + nStart = get_last_id("vertex") + 1 + cubit.cmd(f"create vertex on curve {curve} segment 200") + nEnd = get_last_id("vertex") + vertexList += list(range(nStart, nEnd + 1)) + _, v1 = selCurveVerts(curveList[-1]) + cubit.cmd(f"create curve spline vertex {l2s(vertexList)}") + cubit.cmd(f"delete vertex {l2s(vertexList[1:-1])}") + cubit.cmd(f"delete curve {l2s(curveList)}") + + +def streamlineCurveIntersections(firstCurveID, secondCurveID, keepCurve): + # keepCurve is either 1 or 2 + nStart = get_last_id("vertex") + cubit.cmd(f"create vertex atintersection curve {firstCurveID} {secondCurveID}") + nEnd = get_last_id("vertex") + vertexList = list(range(nStart + 1, nEnd + 1)) + if len(vertexList) > 0: + cubit.cmd(f"split curve {firstCurveID} at vertex {vertexList[-1]}") + firstCurveID = get_last_id("curve") - 1 + cubit.cmd(f"split curve {secondCurveID} at vertex {vertexList[-1]}") + secondCurveID = get_last_id("curve") + tempID = spliceTwoCurves(firstCurveID, secondCurveID, keepCurve) return tempID, vertexList[-1] else: - if keepCurve==1: - return firstCurveID,None + if keepCurve == 1: + return firstCurveID, None else: - return secondCurveID,None -def spliceTwoCurves(firstCurveID,secondCurveID,keepCurve): - #Given two curves splice them into one curve. This should even work for curves that make corners. - #Curve sence matters. The first curve's sense (tangent) should point towards the second curve. - cubit.cmd(f'split curve {firstCurveID} distance 0.005 from end ') - firstCurveID=get_last_id("curve")-1 - cubit.cmd(f'split curve {secondCurveID} distance 0.005 from start ') - secondCurveID=get_last_id("curve") - vertexList=[] - v1,_=selCurveVerts(firstCurveID) + return secondCurveID, None + + +def spliceTwoCurves(firstCurveID, secondCurveID, keepCurve): + # Given two curves splice them into one curve. This should even work for curves that make corners. + # Curve sence matters. The first curve's sense (tangent) should point towards the second curve. + cubit.cmd(f"split curve {firstCurveID} distance 0.005 from end ") + firstCurveID = get_last_id("curve") - 1 + cubit.cmd(f"split curve {secondCurveID} distance 0.005 from start ") + secondCurveID = get_last_id("curve") + vertexList = [] + v1, _ = selCurveVerts(firstCurveID) vertexList.append(v1) - nStart=get_last_id("vertex")+1 - cubit.cmd(f'create vertex on curve {firstCurveID} segment 200') - nEnd=get_last_id("vertex") - vertexList+=list(range(nStart,nEnd+1)) - _,v1=selCurveVerts(firstCurveID) + nStart = get_last_id("vertex") + 1 + cubit.cmd(f"create vertex on curve {firstCurveID} segment 200") + nEnd = get_last_id("vertex") + vertexList += list(range(nStart, nEnd + 1)) + _, v1 = selCurveVerts(firstCurveID) vertexList.append(v1) - v2,_=selCurveVerts(secondCurveID) + v2, _ = selCurveVerts(secondCurveID) vertexList.append(v2) - nStart=get_last_id("vertex")+1 - cubit.cmd(f'create vertex on curve {secondCurveID} segment 200') - nEnd=get_last_id("vertex") - vertexList+=list(range(nStart,nEnd+1)) - _,v2=selCurveVerts(secondCurveID) + nStart = get_last_id("vertex") + 1 + cubit.cmd(f"create vertex on curve {secondCurveID} segment 200") + nEnd = get_last_id("vertex") + vertexList += list(range(nStart, nEnd + 1)) + _, v2 = selCurveVerts(secondCurveID) vertexList.append(v2) - cubit.cmd(f'create curve spline vertex {l2s(vertexList)}') - cubit.cmd(f'delete curve {firstCurveID} {secondCurveID}') - secondCurveID=get_last_id("curve") - cubit.cmd(f'delete vertex {l2s(vertexList[1:-1])}') - if keepCurve==1: + cubit.cmd(f"create curve spline vertex {l2s(vertexList)}") + cubit.cmd(f"delete curve {firstCurveID} {secondCurveID}") + secondCurveID = get_last_id("curve") + cubit.cmd(f"delete vertex {l2s(vertexList[1:-1])}") + if keepCurve == 1: return firstCurveID - elif keepCurve==2: + elif keepCurve == 2: return secondCurveID -def extendCurvePastCurveAndTrim(curveToExtendID,curveStartOrEnd,curveIDThatCutsExtendedCurve): - #Given two curves that are not necessarily intersecting extend {curveToExtendID} then trim it at - #{curveIDThatCutsExtendedCurve}. {curveStartOrEnd} defines which side of the curve to extend so - #you need to know the curve sense - - extensionLength=2*cubit.curve(curveToExtendID).length() - curveToExtendID=extendCurveAtVertexToLength(curveToExtendID,extensionLength,curveStartOrEnd) - - nStart=get_last_id("vertex") - cubit.cmd(f'create vertex AtIntersection curve {curveIDThatCutsExtendedCurve} {curveToExtendID}') - splitVertexID=get_last_id("vertex") + + +def extendCurvePastCurveAndTrim( + curveToExtendID, curveStartOrEnd, curveIDThatCutsExtendedCurve +): + # Given two curves that are not necessarily intersecting extend {curveToExtendID} then trim it at + # {curveIDThatCutsExtendedCurve}. {curveStartOrEnd} defines which side of the curve to extend so + # you need to know the curve sense + + extensionLength = 2 * cubit.curve(curveToExtendID).length() + curveToExtendID = extendCurveAtVertexToLength( + curveToExtendID, extensionLength, curveStartOrEnd + ) + + nStart = get_last_id("vertex") + cubit.cmd( + f"create vertex AtIntersection curve {curveIDThatCutsExtendedCurve} {curveToExtendID}" + ) + splitVertexID = get_last_id("vertex") if nStart == splitVertexID: - print(f'curveToExtendID {curveToExtendID} curveIDThatCutsExtendedCurve {curveIDThatCutsExtendedCurve}') + print( + f"curveToExtendID {curveToExtendID} curveIDThatCutsExtendedCurve {curveIDThatCutsExtendedCurve}" + ) cubit.cmd(f'save as "debug.cub" overwrite') - raise Exception(f'Curve {curveToExtendID} was not able to be extended to curve {curveIDThatCutsExtendedCurve} because their intersection was not found.') + raise Exception( + f"Curve {curveToExtendID} was not able to be extended to curve {curveIDThatCutsExtendedCurve} because their intersection was not found." + ) + + cubit.cmd(f"split curve {curveToExtendID} at vertex {splitVertexID}") - cubit.cmd(f'split curve {curveToExtendID} at vertex {splitVertexID}') - - if curveStartOrEnd.lower() == 'start': - curveToExtendID=get_last_id("curve") + if curveStartOrEnd.lower() == "start": + curveToExtendID = get_last_id("curve") cubit.cmd(f'delete curve {get_last_id("curve")-1}') else: - curveToExtendID=get_last_id("curve")-1 - cubit.cmd(f'delete curve {get_last_id("curve")}') + curveToExtendID = get_last_id("curve") - 1 + cubit.cmd(f'delete curve {get_last_id("curve")}') return curveToExtendID -def renameLastSurface(partName, iStation,iModeledLayers,materialName,partNameID): - #Every cross sectional surface that is created must be followed by a call to this function - partNameID+=1 - surfaceName=partName+'Station'+str(iStation)+'_layer'+str(iModeledLayers)+'_'+materialName+'_surface'+str(partNameID) + + +def renameLastSurface(partName, iStation, iModeledLayers, materialName, partNameID): + # Every cross sectional surface that is created must be followed by a call to this function + partNameID += 1 + surfaceName = ( + partName + + "Station" + + str(iStation) + + "_layer" + + str(iModeledLayers) + + "_" + + materialName + + "_surface" + + str(partNameID) + ) cubit.cmd(f'surface {get_last_id("surface")} rename "{surfaceName}"') return partNameID -def addSurfaceDictEntry(surfaceDict,surfaceObject,myCurveOrder, myVertOrder,materialName,plyAngle): - surfaceDict[surfaceObject.id()]={} - #Curves: - idList=[] +def addSurfaceDictEntry( + surfaceDict, surfaceObject, myCurveOrder, myVertOrder, materialName, plyAngle +): + surfaceDict[surfaceObject.id()] = {} + + # Curves: + idList = [] for curveObject in surfaceObject.curves(): idList.append(curveObject.id()) idList = [idList[i] for i in myCurveOrder] - surfaceDict[surfaceObject.id()]['curves']=idList + surfaceDict[surfaceObject.id()]["curves"] = idList # getSurfaceCurves[surfaceObject.id()]=idList - - #Verts: - idList=[] + + # Verts: + idList = [] for vertObject in surfaceObject.vertices(): idList.append(vertObject.id()) idList = [idList[i] for i in myVertOrder] - surfaceDict[surfaceObject.id()]['verts']=idList - surfaceDict[surfaceObject.id()]['materialName']=materialName - surfaceDict[surfaceObject.id()]['plyAngle']=plyAngle + surfaceDict[surfaceObject.id()]["verts"] = idList + surfaceDict[surfaceObject.id()]["materialName"] = materialName + surfaceDict[surfaceObject.id()]["plyAngle"] = plyAngle # getSurfaceVerts[surfaceObject.id()]=idList - -def makeCrossSectionSurface(surfaceDict,iStation,partName,topCurve,bottomCurve,materialName,plyAngle,partNameID,iModeledLayers,materialsUsed): - #Given two curves in a cross section, create a surface by connecting the end points then - #rename the surface and add to the surface dictionary - surfaceFromTwoCurves(topCurve,bottomCurve) + + +def makeCrossSectionSurface( + surfaceDict, + iStation, + partName, + topCurve, + bottomCurve, + materialName, + plyAngle, + partNameID, + iModeledLayers, + materialsUsed, +): + # Given two curves in a cross section, create a surface by connecting the end points then + # rename the surface and add to the surface dictionary + surfaceFromTwoCurves(topCurve, bottomCurve) materialsUsed.add(materialName) - - partNameID=renameLastSurface(partName,iStation,iModeledLayers,materialName,partNameID) - addSurfaceDictEntry(surfaceDict,cubit.surface(get_last_id("surface")),[0,1,2,3],[0,1,2,3],materialName,plyAngle) - - cubit.cmd(f'curve {surfaceDict[get_last_id("surface")]["curves"][1]} rename "layerThickness"') - cubit.cmd(f'curve {surfaceDict[get_last_id("surface")]["verts"][-1]} rename "layerThickness"') - - - return partNameID,materialsUsed - - -def writeLEAdhesiveCurves(HPstackThickness,LPstackThickness,adhesiveThickness,hpKeyCurve,lpKeyCurve,crossSectionNormal): - def uniteLEgeomWithAdhesiveCurves(lpHpside,keyCurve): - if lpHpside.lower() == 'hp': - #keyCurve=hpKeyCurve - adhesiveCurveID=LEadhesiveCurveIDs[0][0] - offsetCurve=hpOffset + + partNameID = renameLastSurface( + partName, iStation, iModeledLayers, materialName, partNameID + ) + addSurfaceDictEntry( + surfaceDict, + cubit.surface(get_last_id("surface")), + [0, 1, 2, 3], + [0, 1, 2, 3], + materialName, + plyAngle, + ) + + cubit.cmd( + f'curve {surfaceDict[get_last_id("surface")]["curves"][1]} rename "layerThickness"' + ) + cubit.cmd( + f'curve {surfaceDict[get_last_id("surface")]["verts"][-1]} rename "layerThickness"' + ) + + return partNameID, materialsUsed + + +def writeLEAdhesiveCurves( + HPstackThickness, + LPstackThickness, + adhesiveThickness, + hpKeyCurve, + lpKeyCurve, + crossSectionNormal, +): + def uniteLEgeomWithAdhesiveCurves(lpHpside, keyCurve): + if lpHpside.lower() == "hp": + # keyCurve=hpKeyCurve + adhesiveCurveID = LEadhesiveCurveIDs[0][0] + offsetCurve = hpOffset else: - #keyCurve=lpKeyCurve - adhesiveCurveID=LEadhesiveCurveIDs[1][0] - offsetCurve=lpOffset - - print(f'lpHpside: {lpHpside} keyCurve {keyCurve}') - - - v1,_=selCurveVerts(keyCurve) - - cubit.cmd(f'trim curve {keyCurve} atintersection curve {adhesiveCurveID} keepside vertex {v1}') - keyCurve=get_last_id("curve") - - v1,_=selCurveVerts(offsetCurve) - cubit.cmd(f'trim curve {offsetCurve} atintersection curve {adhesiveCurveID} keepside vertex {v1}') - - offsetCurve=get_last_id("curve") - _,v1=selCurveVerts(keyCurve) - _,v2=selCurveVerts(offsetCurve) - - cubit.cmd(f'create curve spline vertex {v1} {v2}') - cubit.cmd(f'delete curve {adhesiveCurveID}') - cubit.cmd(f'delete curve {offsetCurve}') - + # keyCurve=lpKeyCurve + adhesiveCurveID = LEadhesiveCurveIDs[1][0] + offsetCurve = lpOffset + + print(f"lpHpside: {lpHpside} keyCurve {keyCurve}") + + v1, _ = selCurveVerts(keyCurve) + + cubit.cmd( + f"trim curve {keyCurve} atintersection curve {adhesiveCurveID} keepside vertex {v1}" + ) + keyCurve = get_last_id("curve") + + v1, _ = selCurveVerts(offsetCurve) + cubit.cmd( + f"trim curve {offsetCurve} atintersection curve {adhesiveCurveID} keepside vertex {v1}" + ) + + offsetCurve = get_last_id("curve") + _, v1 = selCurveVerts(keyCurve) + _, v2 = selCurveVerts(offsetCurve) + + cubit.cmd(f"create curve spline vertex {v1} {v2}") + cubit.cmd(f"delete curve {adhesiveCurveID}") + cubit.cmd(f"delete curve {offsetCurve}") + return get_last_id("curve"), keyCurve - - LEadhesiveCurveIDs=[[],[]] - - - _,p1=selCurveVerts(hpKeyCurve) - _,p2=selCurveVerts(lpKeyCurve) - - coords=[] + + LEadhesiveCurveIDs = [[], []] + + _, p1 = selCurveVerts(hpKeyCurve) + _, p2 = selCurveVerts(lpKeyCurve) + + coords = [] coords.append(list(cubit.vertex(p1).coordinates())) coords.append(list(cubit.vertex(p2).coordinates())) - coords=np.array(coords) - coords=np.mean(coords,0) - v1=cubit.create_vertex(coords[0],coords[1],coords[2]) - + coords = np.array(coords) + coords = np.mean(coords, 0) + v1 = cubit.create_vertex(coords[0], coords[1], coords[2]) + cubit.cmd(f'vertex {get_last_id("vertex")} copy') - midPointOML=get_last_id("vertex") - - #Offset OML curves to final layer offset - cubit.cmd(f'curve {hpKeyCurve} copy') + midPointOML = get_last_id("vertex") + + # Offset OML curves to final layer offset + cubit.cmd(f"curve {hpKeyCurve} copy") cubit.cmd(f'split curve {get_last_id("curve")} fraction 0.05 from end') - offsetCurveAndCombineFragmentsIfNeeded(get_last_id("curve"),-1*HPstackThickness) - #cubit.cmd(f'create curve offset curve {get_last_id("curve")} distance {-1*HPstackThickness} extended') - _,p1=selCurveVerts(get_last_id("curve")) - hpOffset=get_last_id("curve") - - cubit.cmd(f'curve {lpKeyCurve} copy') + offsetCurveAndCombineFragmentsIfNeeded(get_last_id("curve"), -1 * HPstackThickness) + # cubit.cmd(f'create curve offset curve {get_last_id("curve")} distance {-1*HPstackThickness} extended') + _, p1 = selCurveVerts(get_last_id("curve")) + hpOffset = get_last_id("curve") + + cubit.cmd(f"curve {lpKeyCurve} copy") cubit.cmd(f'split curve {get_last_id("curve")} fraction 0.05 from end') - offsetCurveAndCombineFragmentsIfNeeded(get_last_id("curve"),-1*LPstackThickness) - #cubit.cmd(f'create curve offset curve {get_last_id("curve")} distance {-1*LPstackThickness} extended') - _,p2=selCurveVerts(get_last_id("curve")) - lpOffset=get_last_id("curve") + offsetCurveAndCombineFragmentsIfNeeded(get_last_id("curve"), -1 * LPstackThickness) + # cubit.cmd(f'create curve offset curve {get_last_id("curve")} distance {-1*LPstackThickness} extended') + _, p2 = selCurveVerts(get_last_id("curve")) + lpOffset = get_last_id("curve") - coords=[] + coords = [] coords.append(list(cubit.vertex(p1).coordinates())) coords.append(list(cubit.vertex(p2).coordinates())) - coords=np.array(coords) - coords=np.mean(coords,0) - v2=cubit.create_vertex(coords[0],coords[1],coords[2]) - c1=cubit.create_curve(v1,v2) - adhesiveMidLine=get_last_id("curve") - - #Extend midline on both sides to make sure other curves eventually intersect with it - curveStartOrEnd='start' - extensionLength=2*cubit.curve(adhesiveMidLine).length() - adhesiveMidLine=extendCurveAtVertexToLength(adhesiveMidLine,extensionLength,curveStartOrEnd) - - curveStartOrEnd='end' - extensionLength=1*cubit.curve(adhesiveMidLine).length() - adhesiveMidLine=extendCurveAtVertexToLength(adhesiveMidLine,extensionLength,curveStartOrEnd) - - #Copy and move since offset does not seem to work with strait lines - - #get offset vector - - axialDirection=crossSectionNormal + coords = np.array(coords) + coords = np.mean(coords, 0) + v2 = cubit.create_vertex(coords[0], coords[1], coords[2]) + c1 = cubit.create_curve(v1, v2) + adhesiveMidLine = get_last_id("curve") + + # Extend midline on both sides to make sure other curves eventually intersect with it + curveStartOrEnd = "start" + extensionLength = 2 * cubit.curve(adhesiveMidLine).length() + adhesiveMidLine = extendCurveAtVertexToLength( + adhesiveMidLine, extensionLength, curveStartOrEnd + ) + + curveStartOrEnd = "end" + extensionLength = 1 * cubit.curve(adhesiveMidLine).length() + adhesiveMidLine = extendCurveAtVertexToLength( + adhesiveMidLine, extensionLength, curveStartOrEnd + ) + + # Copy and move since offset does not seem to work with strait lines + + # get offset vector + + axialDirection = crossSectionNormal position = cubit.curve(adhesiveMidLine).position_from_fraction(1.0) - tangentDirection=cubit.curve(adhesiveMidLine).tangent(position) - - normalDirection=crossProd(axialDirection,tangentDirection) - normalDirection=adhesiveThickness/2*np.array(vectNorm([normalDirection[0],normalDirection[1],normalDirection[2]])) - cubit.cmd(f'curve {adhesiveMidLine} copy move x {normalDirection[0]} y {normalDirection[1]} z {normalDirection[2]} nomesh') - + tangentDirection = cubit.curve(adhesiveMidLine).tangent(position) + + normalDirection = crossProd(axialDirection, tangentDirection) + normalDirection = ( + adhesiveThickness + / 2 + * np.array( + vectNorm([normalDirection[0], normalDirection[1], normalDirection[2]]) + ) + ) + cubit.cmd( + f"curve {adhesiveMidLine} copy move x {normalDirection[0]} y {normalDirection[1]} z {normalDirection[2]} nomesh" + ) + LEadhesiveCurveIDs[0].append(get_last_id("curve")) - normalDirection=-1*normalDirection - cubit.cmd(f'curve {adhesiveMidLine} copy move x {normalDirection[0]} y {normalDirection[1]} z {normalDirection[2]} nomesh') + normalDirection = -1 * normalDirection + cubit.cmd( + f"curve {adhesiveMidLine} copy move x {normalDirection[0]} y {normalDirection[1]} z {normalDirection[2]} nomesh" + ) LEadhesiveCurveIDs[1].append(get_last_id("curve")) - cubit.cmd(f'delete curve {adhesiveMidLine}') + cubit.cmd(f"delete curve {adhesiveMidLine}") - keyCurves=[hpKeyCurve,lpKeyCurve] - for iSide, lpHpside in enumerate(['HP','LP']): - + keyCurves = [hpKeyCurve, lpKeyCurve] + for iSide, lpHpside in enumerate(["HP", "LP"]): ###HP### - LEadhesiveCurveIDs[iSide][0],keyCurves[iSide]=uniteLEgeomWithAdhesiveCurves(lpHpside,keyCurves[iSide]) + LEadhesiveCurveIDs[iSide][0], keyCurves[iSide] = uniteLEgeomWithAdhesiveCurves( + lpHpside, keyCurves[iSide] + ) - #Make Copies - cubit.cmd(f'curve {LEadhesiveCurveIDs[iSide][0]} copy') + # Make Copies + cubit.cmd(f"curve {LEadhesiveCurveIDs[iSide][0]} copy") LEadhesiveCurveIDs[iSide].append(get_last_id("curve")) - cubit.cmd(f'curve {LEadhesiveCurveIDs[iSide][1]} copy') + cubit.cmd(f"curve {LEadhesiveCurveIDs[iSide][1]} copy") LEadhesiveCurveIDs[iSide].append(get_last_id("curve")) - #Extend - curveStartOrEnd='end' - + # Extend + curveStartOrEnd = "end" - extensionLength=1*cubit.curve(LEadhesiveCurveIDs[iSide][1]).length() - LEadhesiveCurveIDs[iSide][1]=extendCurveAtVertexToLength(LEadhesiveCurveIDs[iSide][1],extensionLength,curveStartOrEnd) + extensionLength = 1 * cubit.curve(LEadhesiveCurveIDs[iSide][1]).length() + LEadhesiveCurveIDs[iSide][1] = extendCurveAtVertexToLength( + LEadhesiveCurveIDs[iSide][1], extensionLength, curveStartOrEnd + ) + return keyCurves[0], keyCurves[1], LEadhesiveCurveIDs - return keyCurves[0],keyCurves[1],LEadhesiveCurveIDs -def splitCurveAtCoordintePoints(coordinatesToSplitCurve,curveIDToSplit): - cubit.cmd(f'curve {curveIDToSplit} copy') - tempCurveID=get_last_id("curve") - +def splitCurveAtCoordintePoints(coordinatesToSplitCurve, curveIDToSplit): + cubit.cmd(f"curve {curveIDToSplit} copy") + tempCurveID = get_last_id("curve") - nDPs,_=coordinatesToSplitCurve.shape - idStart=get_last_id("vertex")+1 + nDPs, _ = coordinatesToSplitCurve.shape + idStart = get_last_id("vertex") + 1 for kcp in range(nDPs): - temp=cubit.curve(tempCurveID).closest_point([coordinatesToSplitCurve[kcp,0],coordinatesToSplitCurve[kcp,1],coordinatesToSplitCurve[kcp,2]]) - create_vertex(temp[0],temp[1],temp[2]) - idEnd=get_last_id("vertex") - - DPverticies = [i for i in range(idStart,idEnd+1)] - - idStart=get_last_id("curve")+1 - cubit.cmd(f'split curve {tempCurveID} at vertex {l2s(DPverticies)}') - idEnd=get_last_id("curve") - keyCurves = [i for i in range(idStart,idEnd+1)] + temp = cubit.curve(tempCurveID).closest_point( + [ + coordinatesToSplitCurve[kcp, 0], + coordinatesToSplitCurve[kcp, 1], + coordinatesToSplitCurve[kcp, 2], + ] + ) + create_vertex(temp[0], temp[1], temp[2]) + idEnd = get_last_id("vertex") + + DPverticies = [i for i in range(idStart, idEnd + 1)] + + idStart = get_last_id("curve") + 1 + cubit.cmd(f"split curve {tempCurveID} at vertex {l2s(DPverticies)}") + idEnd = get_last_id("curve") + keyCurves = [i for i in range(idStart, idEnd + 1)] return keyCurves -def splitKeyCurves(keyCurves,aftWebStack,foreWebStack,web_adhesive_width): + +def splitKeyCurves(keyCurves, aftWebStack, foreWebStack, web_adhesive_width): ###Do not split TE reinf - tempBaseCurveIDs=[keyCurves[0]] + tempBaseCurveIDs = [keyCurves[0]] ###split TE panel in half - cubit.cmd(f'split curve {keyCurves[1]} fraction 0.5') + cubit.cmd(f"split curve {keyCurves[1]} fraction 0.5") - tempBaseCurveIDs.append(get_last_id("curve")-1) + tempBaseCurveIDs.append(get_last_id("curve") - 1) tempBaseCurveIDs.append(get_last_id("curve")) ###Partition sparcap curve - vertexList=[] - webLayerThickness=0 - nStart=get_last_id("vertex")+1 + vertexList = [] + webLayerThickness = 0 + nStart = get_last_id("vertex") + 1 for iLayer in reversed(range(len(aftWebStack.plygroups))): - webLayerThickness+=aftWebStack.plygroups[iLayer].thickness*aftWebStack.plygroups[iLayer].nPlies/1000 - cubit.cmd(f'create vertex on curve {keyCurves[2]} distance {webLayerThickness} from start') - cubit.cmd(f'create vertex on curve {keyCurves[2]} distance {webLayerThickness+web_adhesive_width} from start') - - #get total foreweb thickness - webLayerThickness=sum(foreWebStack.layerThicknesses())/1000 - cubit.cmd(f'create vertex on curve {keyCurves[2]} distance {webLayerThickness+web_adhesive_width} from end') + webLayerThickness += ( + aftWebStack.plygroups[iLayer].thickness + * aftWebStack.plygroups[iLayer].nPlies + / 1000 + ) + cubit.cmd( + f"create vertex on curve {keyCurves[2]} distance {webLayerThickness} from start" + ) + cubit.cmd( + f"create vertex on curve {keyCurves[2]} distance {webLayerThickness+web_adhesive_width} from start" + ) + + # get total foreweb thickness + webLayerThickness = sum(foreWebStack.layerThicknesses()) / 1000 + cubit.cmd( + f"create vertex on curve {keyCurves[2]} distance {webLayerThickness+web_adhesive_width} from end" + ) for iLayer in reversed(range(len(foreWebStack.plygroups))): - cubit.cmd(f'create vertex on curve {keyCurves[2]} distance {webLayerThickness} from end') - webLayerThickness-=foreWebStack.plygroups[iLayer].thickness*foreWebStack.plygroups[iLayer].nPlies/1000 - - - nEnd=get_last_id("vertex") - vertexList+=list(range(nStart,nEnd+1)) - - nStart=get_last_id("curve")+1 - cubit.cmd(f'split curve {keyCurves[2]} at vertex {l2s(vertexList)}') - nEnd=get_last_id("curve") + cubit.cmd( + f"create vertex on curve {keyCurves[2]} distance {webLayerThickness} from end" + ) + webLayerThickness -= ( + foreWebStack.plygroups[iLayer].thickness + * foreWebStack.plygroups[iLayer].nPlies + / 1000 + ) + + nEnd = get_last_id("vertex") + vertexList += list(range(nStart, nEnd + 1)) + + nStart = get_last_id("curve") + 1 + cubit.cmd(f"split curve {keyCurves[2]} at vertex {l2s(vertexList)}") + nEnd = get_last_id("curve") tempBaseCurveIDs.append(nStart) tempBaseCurveIDs.append(nEnd) - sparCapBaseCurves=list(range(nStart+1,nEnd)) + sparCapBaseCurves = list(range(nStart + 1, nEnd)) ###split LE panel in half - cubit.cmd(f'split curve {keyCurves[3]} fraction 0.5') - tempBaseCurveIDs.append(get_last_id("curve")-1) + cubit.cmd(f"split curve {keyCurves[3]} fraction 0.5") + tempBaseCurveIDs.append(get_last_id("curve") - 1) tempBaseCurveIDs.append(get_last_id("curve")) ###Do not split LE reinf tempBaseCurveIDs.append(keyCurves[-1]) - return tempBaseCurveIDs,sparCapBaseCurves -def getMidLine(blade,iLE,iStation,geometryScaling): - X=blade.geometry[:,0,iStation]* geometryScaling - Y=blade.geometry[:,1,iStation]* geometryScaling - Z=blade.geometry[:,2,iStation]* geometryScaling + return tempBaseCurveIDs, sparCapBaseCurves + +def getMidLine(blade, iLE, iStation, geometryScaling): + X = blade.geometry[:, 0, iStation] * geometryScaling + Y = blade.geometry[:, 1, iStation] * geometryScaling + Z = blade.geometry[:, 2, iStation] * geometryScaling ###### Get averge line xHP = X[1:iLE] - xLP = np.flip(X[iLE-1:-1]) + xLP = np.flip(X[iLE - 1 : -1]) yHP = Y[1:iLE] - yLP = np.flip(Y[iLE-1:-1]) + yLP = np.flip(Y[iLE - 1 : -1]) zHP = Z[1:iLE] - zLP = np.flip(Z[iLE-1:-1]) - midline = np.zeros((len(xHP),3)) + zLP = np.flip(Z[iLE - 1 : -1]) + midline = np.zeros((len(xHP), 3)) for iPoint in range(len(xHP)): - midline[iPoint,0] = (xHP[iPoint] + xLP[iPoint]) / 2 - midline[iPoint,1] = (yHP[iPoint] + yLP[iPoint]) / 2 - midline[iPoint,2] = (zHP[iPoint] + zLP[iPoint]) / 2 + midline[iPoint, 0] = (xHP[iPoint] + xLP[iPoint]) / 2 + midline[iPoint, 1] = (yHP[iPoint] + yLP[iPoint]) / 2 + midline[iPoint, 2] = (zHP[iPoint] + zLP[iPoint]) / 2 return midline -def getAdjustmentCurve(curveIDs,layerOffsetDist,curveStartOrEnd,endLayerTaperCurve): - nStart=get_last_id("vertex")+1 - curveFraction=1.0/3 - for iCurve,curveID in enumerate(curveIDs): - curveLength=cubit.curve(curveID).length() - if endLayerTaperCurve is not None and iCurve < endLayerTaperCurve-1: - if curveLength*curveFraction < layerOffsetDist: - cubit.cmd(f'create vertex on curve {curveID} fraction {curveFraction} from {curveStartOrEnd}') +def getAdjustmentCurve(curveIDs, layerOffsetDist, curveStartOrEnd, endLayerTaperCurve): + nStart = get_last_id("vertex") + 1 + curveFraction = 1.0 / 3 + for iCurve, curveID in enumerate(curveIDs): + curveLength = cubit.curve(curveID).length() + if endLayerTaperCurve is not None and iCurve < endLayerTaperCurve - 1: + if curveLength * curveFraction < layerOffsetDist: + cubit.cmd( + f"create vertex on curve {curveID} fraction {curveFraction} from {curveStartOrEnd}" + ) else: - cubit.cmd(f'create vertex on curve {curveID} distance {layerOffsetDist} from {curveStartOrEnd}') + cubit.cmd( + f"create vertex on curve {curveID} distance {layerOffsetDist} from {curveStartOrEnd}" + ) else: - cubit.cmd(f'create vertex on curve {curveID} distance {layerOffsetDist} from {curveStartOrEnd}') - - - nEnd=get_last_id("vertex") - vertexList=list(range(nStart,nEnd+1)) - cubit.cmd(f'create curve spline vertex {l2s(vertexList)}') - adjustmentCurve=get_last_id("curve") - cubit.cmd(f'delete vertex {l2s(vertexList[1:-1])}') + cubit.cmd( + f"create vertex on curve {curveID} distance {layerOffsetDist} from {curveStartOrEnd}" + ) + + nEnd = get_last_id("vertex") + vertexList = list(range(nStart, nEnd + 1)) + cubit.cmd(f"create curve spline vertex {l2s(vertexList)}") + adjustmentCurve = get_last_id("curve") + cubit.cmd(f"delete vertex {l2s(vertexList[1:-1])}") return adjustmentCurve -def makeCrossSectionLayerAreas_perimeter(surfaceDict,iStation,stationStacks,params,thicknessScaling,lpHpside,isFlatback,TEangle,lastRoundStation,partNameID, nModeledLayers,crossSectionNormal,lpHpCurveDict,materialsUsed): - partName=lpHpside+'shell' - #Assumes that #HP side is made first - if lpHpside.lower() == 'hp': - lpHpsideIndex=0 - camberOffsetSign=1 + +def makeCrossSectionLayerAreas_perimeter( + surfaceDict, + iStation, + stationStacks, + params, + thicknessScaling, + lpHpside, + isFlatback, + TEangle, + lastRoundStation, + partNameID, + nModeledLayers, + crossSectionNormal, + lpHpCurveDict, + materialsUsed, +): + partName = lpHpside + "shell" + + # Assumes that #HP side is made first + if lpHpside.lower() == "hp": + lpHpsideIndex = 0 + camberOffsetSign = 1 else: - lpHpsideIndex=1 - camberOffsetSign=-1 + lpHpsideIndex = 1 + camberOffsetSign = -1 + + offsetSign_camberID = printOffsetDirectionCheck( + lpHpCurveDict["camberID"], "LP", crossSectionNormal + ) - offsetSign_camberID=printOffsetDirectionCheck(lpHpCurveDict['camberID'],'LP',crossSectionNormal) - - baseCurveIndexCT=0 + baseCurveIndexCT = 0 stationStacks.shape - nStationLayups=len(stationStacks) + nStationLayups = len(stationStacks) stationStacks.shape - lastPerimeter=nStationLayups-2 + lastPerimeter = nStationLayups - 2 - for iPerimeter in range(nStationLayups-1): #Skip the last stack since the current and the next stack are generated at the same time. - with open('cubitBlade.log', 'a') as logFile: - logFile.write(f'\tlpHpside {lpHpside}, iPerimeter={iPerimeter}\n') + for iPerimeter in range( + nStationLayups - 1 + ): # Skip the last stack since the current and the next stack are generated at the same time. + with open("cubitBlade.log", "a") as logFile: + logFile.write(f"\tlpHpside {lpHpside}, iPerimeter={iPerimeter}\n") currentStack = stationStacks[iPerimeter] - nextStack = stationStacks[iPerimeter+1] - - currentStackLayerThicknesses=np.array(currentStack.layerThicknesses())/1000 - nextStackLayerThicknesses=np.array(nextStack.layerThicknesses())/1000 - - cubit.cmd(f'curve {lpHpCurveDict["baseCurveIDs"][lpHpsideIndex][baseCurveIndexCT]} copy') - currentBaseCurveID=get_last_id("curve") - baseCurveIndexCT+=1 - cubit.cmd(f'curve {lpHpCurveDict["baseCurveIDs"][lpHpsideIndex][baseCurveIndexCT]} copy') - nextBaseCurveID=get_last_id("curve") - baseCurveIndexCT+=1 - - currentStackSurfaceList=[] - transitionStackSurfaceList=[] - nextStackSurfaceList=[] + nextStack = stationStacks[iPerimeter + 1] + + currentStackLayerThicknesses = np.array(currentStack.layerThicknesses()) / 1000 + nextStackLayerThicknesses = np.array(nextStack.layerThicknesses()) / 1000 + + cubit.cmd( + f'curve {lpHpCurveDict["baseCurveIDs"][lpHpsideIndex][baseCurveIndexCT]} copy' + ) + currentBaseCurveID = get_last_id("curve") + baseCurveIndexCT += 1 + cubit.cmd( + f'curve {lpHpCurveDict["baseCurveIDs"][lpHpsideIndex][baseCurveIndexCT]} copy' + ) + nextBaseCurveID = get_last_id("curve") + baseCurveIndexCT += 1 + + currentStackSurfaceList = [] + transitionStackSurfaceList = [] + nextStackSurfaceList = [] currentStackLayerOffset = 0 nextStackLayerOffset = 0 layerThicknessTransitionLengths = [] - #Get all offsets and layerThicknessTransitionLengths - thinestLayerThicknessCurrentStack = 1e+22 #initialize to a large value - thinestLayerThicknessNextStack = 1e+22 + # Get all offsets and layerThicknessTransitionLengths + thinestLayerThicknessCurrentStack = 1e22 # initialize to a large value + thinestLayerThicknessNextStack = 1e22 for iModeledLayers in range(nModeledLayers): - currentStackLayerOffset+=currentStackLayerThicknesses[iModeledLayers] - nextStackLayerOffset+=nextStackLayerThicknesses[iModeledLayers] + currentStackLayerOffset += currentStackLayerThicknesses[iModeledLayers] + nextStackLayerOffset += nextStackLayerThicknesses[iModeledLayers] - adjacentLayerMissmatch=abs(currentStackLayerOffset-nextStackLayerOffset) + adjacentLayerMissmatch = abs(currentStackLayerOffset - nextStackLayerOffset) - - if adjacentLayerMissmatch > params['minimumTransitionLength'][iStation]: - layerThicknessTransitionLengths.append(adjacentLayerMissmatch / tan(math.radians(params['transitionTaperAngle']))) + if adjacentLayerMissmatch > params["minimumTransitionLength"][iStation]: + layerThicknessTransitionLengths.append( + adjacentLayerMissmatch + / tan(math.radians(params["transitionTaperAngle"])) + ) else: - layerThicknessTransitionLengths.append(params['minimumTransitionLength'][iStation]) - - #Also find the thinest layer in stack for meshing purposes - if currentStackLayerThicknesses[iModeledLayers]>0 and currentStackLayerThicknesses[iModeledLayers] < thinestLayerThicknessCurrentStack: - thinestLayerThicknessCurrentStack=currentStackLayerThicknesses[iModeledLayers] - - if nextStackLayerThicknesses[iModeledLayers]>0 and nextStackLayerThicknesses[iModeledLayers] < thinestLayerThicknessNextStack: - thinestLayerThicknessNextStack=nextStackLayerThicknesses[iModeledLayers] - - maxLayerThicknessTransitionLength=max(layerThicknessTransitionLengths) - - - if iPerimeter in [0,2]: - leftBottomCurve=currentBaseCurveID - cubit.cmd(f'split curve {nextBaseCurveID} distance {maxLayerThicknessTransitionLength} from start ') - transitionBottomCurve=get_last_id("curve")-1 - rightBottomCurve=get_last_id("curve") + layerThicknessTransitionLengths.append( + params["minimumTransitionLength"][iStation] + ) + + # Also find the thinest layer in stack for meshing purposes + if ( + currentStackLayerThicknesses[iModeledLayers] > 0 + and currentStackLayerThicknesses[iModeledLayers] + < thinestLayerThicknessCurrentStack + ): + thinestLayerThicknessCurrentStack = currentStackLayerThicknesses[ + iModeledLayers + ] + + if ( + nextStackLayerThicknesses[iModeledLayers] > 0 + and nextStackLayerThicknesses[iModeledLayers] + < thinestLayerThicknessNextStack + ): + thinestLayerThicknessNextStack = nextStackLayerThicknesses[ + iModeledLayers + ] + + maxLayerThicknessTransitionLength = max(layerThicknessTransitionLengths) + + if iPerimeter in [0, 2]: + leftBottomCurve = currentBaseCurveID + cubit.cmd( + f"split curve {nextBaseCurveID} distance {maxLayerThicknessTransitionLength} from start " + ) + transitionBottomCurve = get_last_id("curve") - 1 + rightBottomCurve = get_last_id("curve") transitionStack = nextStack - elif iPerimeter in [1,3]: - rightBottomCurve=nextBaseCurveID - cubit.cmd(f'split curve {currentBaseCurveID} distance {maxLayerThicknessTransitionLength} from end ') - leftBottomCurve=get_last_id("curve")-1 - transitionBottomCurve=get_last_id("curve") + elif iPerimeter in [1, 3]: + rightBottomCurve = nextBaseCurveID + cubit.cmd( + f"split curve {currentBaseCurveID} distance {maxLayerThicknessTransitionLength} from end " + ) + leftBottomCurve = get_last_id("curve") - 1 + transitionBottomCurve = get_last_id("curve") transitionStack = currentStack else: - raise ValueError(f'iPerimeter {iPerimeter} not recognized') - - bottomLeftVertexCurveLeft,bottomRightVertexCurveLeft=selCurveVerts(leftBottomCurve) - bottomLeftVertexCurveRight,bottomRightVertexCurveRight=selCurveVerts(rightBottomCurve) + raise ValueError(f"iPerimeter {iPerimeter} not recognized") + + bottomLeftVertexCurveLeft, bottomRightVertexCurveLeft = selCurveVerts( + leftBottomCurve + ) + bottomLeftVertexCurveRight, bottomRightVertexCurveRight = selCurveVerts( + rightBottomCurve + ) + + # This if statement prepares all layer curves such that they taper at the TE + if iPerimeter == 0: + currentStackRightCurves = [] + currentStackLeftCurves = [] + + # Base curve copy + cubit.cmd(f"curve {leftBottomCurve} copy") + baseCurveIDCopy = get_last_id("curve") + offsetSign_baseCurveIDCopy = printOffsetDirectionCheck( + baseCurveIDCopy, lpHpside, crossSectionNormal + ) + + # offset camber to make gap + cubit.cmd( + f'create curve offset curve {lpHpCurveDict["camberID"]} distance {camberOffsetSign*offsetSign_camberID*params["TE_adhesive"][iStation]/2} extended' + ) + camberOffset = get_last_id("curve") + + # Top Bounding Curve + offsetDistance = ( + 1 * offsetSign_baseCurveIDCopy * sum(currentStackLayerThicknesses) + ) + offsetCurveAndCombineFragmentsIfNeeded(baseCurveIDCopy, offsetDistance) + topBoundingCurve = get_last_id("curve") - - - #This if statement prepares all layer curves such that they taper at the TE - if iPerimeter==0: - currentStackRightCurves=[] - currentStackLeftCurves=[] - - #Base curve copy - cubit.cmd(f'curve {leftBottomCurve} copy') - baseCurveIDCopy=get_last_id("curve") - offsetSign_baseCurveIDCopy=printOffsetDirectionCheck(baseCurveIDCopy,lpHpside,crossSectionNormal) - - #offset camber to make gap - cubit.cmd(f'create curve offset curve {lpHpCurveDict["camberID"]} distance {camberOffsetSign*offsetSign_camberID*params["TE_adhesive"][iStation]/2} extended') - camberOffset=get_last_id("curve") - - #Top Bounding Curve - offsetDistance=1*offsetSign_baseCurveIDCopy*sum(currentStackLayerThicknesses) - offsetCurveAndCombineFragmentsIfNeeded(baseCurveIDCopy,offsetDistance) - topBoundingCurve=get_last_id("curve") - if isFlatback: - - curveStartOrEnd='start' - extensionLength=1*cubit.curve(topBoundingCurve).length() - topBoundingCurve=extendCurveAtVertexToLength(topBoundingCurve,extensionLength,curveStartOrEnd) - keepCurve=2 - - topBoundingCurve,beginLayerTaperVertexID=streamlineCurveIntersections(camberOffset,topBoundingCurve,keepCurve) + curveStartOrEnd = "start" + extensionLength = 1 * cubit.curve(topBoundingCurve).length() + topBoundingCurve = extendCurveAtVertexToLength( + topBoundingCurve, extensionLength, curveStartOrEnd + ) + keepCurve = 2 + + ( + topBoundingCurve, + beginLayerTaperVertexID, + ) = streamlineCurveIntersections( + camberOffset, topBoundingCurve, keepCurve + ) else: - lpHpCurveDict['flatBackCurveID']=camberOffset - - curveStartOrEnd='start' - extensionLength=1*cubit.curve(baseCurveIDCopy).length() - baseCurveIDCopy=extendCurveAtVertexToLength(baseCurveIDCopy,extensionLength,curveStartOrEnd) - - curveStartOrEnd='start' - extensionLength=1*cubit.curve(topBoundingCurve).length() - topBoundingCurve=extendCurveAtVertexToLength(topBoundingCurve,extensionLength,curveStartOrEnd) - - _,v1=selCurveVerts(baseCurveIDCopy) - cubit.cmd(f'trim curve {baseCurveIDCopy} atintersection curve {lpHpCurveDict["flatBackCurveID"]} keepside vertex {v1}') - baseCurveIDCopy=get_last_id("curve") - - #Trim curve at TE.adhesive - _,v1=selCurveVerts(topBoundingCurve) - cubit.cmd(f'trim curve {topBoundingCurve} atintersection curve {lpHpCurveDict["flatBackCurveID"]} keepside vertex {v1}') - topBoundingCurve=get_last_id("curve") - offsetSign_topBoundingCurve=printOffsetDirectionCheck(topBoundingCurve,lpHpside,crossSectionNormal) - - if isFlatback:# and beginLayerTaperVertexID is not None: - - #Make list of curves that will be used to taper each layer - npts=30 - nStart=get_last_id("curve")+1 - - if lpHpside.lower()=='hp': - signCorrection=1 + lpHpCurveDict["flatBackCurveID"] = camberOffset + + curveStartOrEnd = "start" + extensionLength = 1 * cubit.curve(baseCurveIDCopy).length() + baseCurveIDCopy = extendCurveAtVertexToLength( + baseCurveIDCopy, extensionLength, curveStartOrEnd + ) + + curveStartOrEnd = "start" + extensionLength = 1 * cubit.curve(topBoundingCurve).length() + topBoundingCurve = extendCurveAtVertexToLength( + topBoundingCurve, extensionLength, curveStartOrEnd + ) + + _, v1 = selCurveVerts(baseCurveIDCopy) + cubit.cmd( + f'trim curve {baseCurveIDCopy} atintersection curve {lpHpCurveDict["flatBackCurveID"]} keepside vertex {v1}' + ) + baseCurveIDCopy = get_last_id("curve") + + # Trim curve at TE.adhesive + _, v1 = selCurveVerts(topBoundingCurve) + cubit.cmd( + f'trim curve {topBoundingCurve} atintersection curve {lpHpCurveDict["flatBackCurveID"]} keepside vertex {v1}' + ) + topBoundingCurve = get_last_id("curve") + offsetSign_topBoundingCurve = printOffsetDirectionCheck( + topBoundingCurve, lpHpside, crossSectionNormal + ) + + if isFlatback: # and beginLayerTaperVertexID is not None: + # Make list of curves that will be used to taper each layer + npts = 30 + nStart = get_last_id("curve") + 1 + + if lpHpside.lower() == "hp": + signCorrection = 1 else: - signCorrection=-1 + signCorrection = -1 for iPoint in range(npts): - if iPoint==0: - cubit.cmd(f'create vertex on curve {baseCurveIDCopy} start') - cubit.cmd(f'create vertex on curve {topBoundingCurve} start') + if iPoint == 0: + cubit.cmd(f"create vertex on curve {baseCurveIDCopy} start") + cubit.cmd(f"create vertex on curve {topBoundingCurve} start") else: - cubit.cmd(f'create vertex on curve {baseCurveIDCopy} fraction {(iPoint)/(npts-1)} from start') - cubit.cmd(f'create vertex on curve {topBoundingCurve} fraction {(iPoint)/(npts-1)} from start') - cubit.cmd(f'create curve vertex {get_last_id("vertex")-1} {get_last_id("vertex")}') - - nEnd=get_last_id("curve") - curveIDs=list(range(nStart,nEnd+1)) - - #If layers are tapeded toward TE find the index in the list of curves (curveIDs) marks the end of the tapering + cubit.cmd( + f"create vertex on curve {baseCurveIDCopy} fraction {(iPoint)/(npts-1)} from start" + ) + cubit.cmd( + f"create vertex on curve {topBoundingCurve} fraction {(iPoint)/(npts-1)} from start" + ) + cubit.cmd( + f'create curve vertex {get_last_id("vertex")-1} {get_last_id("vertex")}' + ) + + nEnd = get_last_id("curve") + curveIDs = list(range(nStart, nEnd + 1)) + + # If layers are tapeded toward TE find the index in the list of curves (curveIDs) marks the end of the tapering if beginLayerTaperVertexID is not None: - foundFlag=False + foundFlag = False for curveID in curveIDs: - temp=cubit.curve(curveID).curve_center() - tangentDirection=cubit.curve(get_last_id("curve")).tangent(temp) - - print(f'beginLayerTaperVertexID {beginLayerTaperVertexID}') - print(f'baseCurveIDCopy{baseCurveIDCopy} topBoundingCurve {topBoundingCurve}') - - momentArm=np.array(temp)-np.array(cubit.vertex(beginLayerTaperVertexID).coordinates()) - - normalDirection=signCorrection*np.array(crossProd(tangentDirection,momentArm)) - - if not foundFlag and dotProd(normalDirection,crossSectionNormal)<0: - foundFlag=True - endLayerTaperCurve=iPoint + temp = cubit.curve(curveID).curve_center() + tangentDirection = cubit.curve(get_last_id("curve")).tangent( + temp + ) + + print(f"beginLayerTaperVertexID {beginLayerTaperVertexID}") + print( + f"baseCurveIDCopy{baseCurveIDCopy} topBoundingCurve {topBoundingCurve}" + ) + + momentArm = np.array(temp) - np.array( + cubit.vertex(beginLayerTaperVertexID).coordinates() + ) + + normalDirection = signCorrection * np.array( + crossProd(tangentDirection, momentArm) + ) + + if ( + not foundFlag + and dotProd(normalDirection, crossSectionNormal) < 0 + ): + foundFlag = True + endLayerTaperCurve = iPoint else: - endLayerTaperCurve=None + endLayerTaperCurve = None - - #TE Adhesive curve + # TE Adhesive curve if isFlatback: - cubit.cmd(f'curve {topBoundingCurve} copy') - cubit.cmd(f'split curve {get_last_id("curve")} distance {params["TE_adhesive_width"][iStation]} from start') - #HPTEadhesiveCurveID=get_last_id("curve")-1 - lpHpCurveDict['TEadhesiveCurveList'][lpHpsideIndex].append(get_last_id("curve")-1) + cubit.cmd(f"curve {topBoundingCurve} copy") + cubit.cmd( + f'split curve {get_last_id("curve")} distance {params["TE_adhesive_width"][iStation]} from start' + ) + # HPTEadhesiveCurveID=get_last_id("curve")-1 + lpHpCurveDict["TEadhesiveCurveList"][lpHpsideIndex].append( + get_last_id("curve") - 1 + ) cubit.cmd(f'delete curve {get_last_id("curve")}') else: - v1,_=selCurveVerts(baseCurveIDCopy) - v2,_=selCurveVerts(topBoundingCurve) - cubit.cmd(f'split curve {lpHpCurveDict["flatBackCurveID"]} at vertex {v1} {v2}') - cubit.cmd(f'delete curve {get_last_id("curve")} {get_last_id("curve")-2}') - lpHpCurveDict['flatBackCurveID']=get_last_id("curve")-1 + v1, _ = selCurveVerts(baseCurveIDCopy) + v2, _ = selCurveVerts(topBoundingCurve) + cubit.cmd( + f'split curve {lpHpCurveDict["flatBackCurveID"]} at vertex {v1} {v2}' + ) + cubit.cmd( + f'delete curve {get_last_id("curve")} {get_last_id("curve")-2}' + ) + lpHpCurveDict["flatBackCurveID"] = get_last_id("curve") - 1 cubit.cmd(f'curve {lpHpCurveDict["flatBackCurveID"]} copy') - #HPTEadhesiveCurveID=get_last_id("curve") - - iLayer=0 - offsetDistance=1*offsetSign_baseCurveIDCopy*currentStackLayerThicknesses[iLayer] - offsetCurveAndCombineFragmentsIfNeeded(baseCurveIDCopy,offsetDistance) - firstLayerOffset=get_last_id("curve") - - curveStartOrEnd='start' - firstLayerOffset=extendCurvePastCurveAndTrim(firstLayerOffset,curveStartOrEnd,lpHpCurveDict['flatBackCurveID']) - - #Only do the following if all layer thicknesses are unequal - - if isFlatback and abs(min(currentStack.layerThicknesses())-max(currentStack.layerThicknesses())) > 0.0001: - - layerOffsetDist=currentStackLayerThicknesses[0] - curveStartOrEnd='start' - firstLayerOffset=getAdjustmentCurve(curveIDs,layerOffsetDist,curveStartOrEnd,endLayerTaperCurve) - - - - cubit.cmd(f'create curve offset curve {lpHpCurveDict["camberID"]} distance {camberOffsetSign*offsetSign_camberID*params["TE_adhesive"][iStation]/2} extended') - camberOffset=get_last_id("curve") - - offsetSign_topBoundingCurve=printOffsetDirectionCheck(topBoundingCurve,lpHpside,crossSectionNormal) - - offsetDistance=-1*offsetSign_topBoundingCurve*currentStackLayerThicknesses[-1] - offsetCurveAndCombineFragmentsIfNeeded(topBoundingCurve,offsetDistance) - lastLayerOffset=get_last_id("curve") - - - curveStartOrEnd='start' - lastLayerOffset=extendCurvePastCurveAndTrim(lastLayerOffset,curveStartOrEnd,lpHpCurveDict['flatBackCurveID']) - - if isFlatback and abs(min(currentStack.layerThicknesses())-max(currentStack.layerThicknesses())) > 0.0001: - - layerOffsetDist=currentStackLayerThicknesses[-1] - curveStartOrEnd='end' - lastLayerOffset=getAdjustmentCurve(curveIDs,layerOffsetDist,curveStartOrEnd,endLayerTaperCurve) - - cubit.cmd(f'split curve {firstLayerOffset} distance {params["TE_adhesive_width"][iStation]} from start') - currentStackLeftCurves.append(get_last_id("curve")-1) + # HPTEadhesiveCurveID=get_last_id("curve") + + iLayer = 0 + offsetDistance = ( + 1 * offsetSign_baseCurveIDCopy * currentStackLayerThicknesses[iLayer] + ) + offsetCurveAndCombineFragmentsIfNeeded(baseCurveIDCopy, offsetDistance) + firstLayerOffset = get_last_id("curve") + + curveStartOrEnd = "start" + firstLayerOffset = extendCurvePastCurveAndTrim( + firstLayerOffset, curveStartOrEnd, lpHpCurveDict["flatBackCurveID"] + ) + + # Only do the following if all layer thicknesses are unequal + + if ( + isFlatback + and abs( + min(currentStack.layerThicknesses()) + - max(currentStack.layerThicknesses()) + ) + > 0.0001 + ): + layerOffsetDist = currentStackLayerThicknesses[0] + curveStartOrEnd = "start" + firstLayerOffset = getAdjustmentCurve( + curveIDs, layerOffsetDist, curveStartOrEnd, endLayerTaperCurve + ) + + cubit.cmd( + f'create curve offset curve {lpHpCurveDict["camberID"]} distance {camberOffsetSign*offsetSign_camberID*params["TE_adhesive"][iStation]/2} extended' + ) + camberOffset = get_last_id("curve") + + offsetSign_topBoundingCurve = printOffsetDirectionCheck( + topBoundingCurve, lpHpside, crossSectionNormal + ) + + offsetDistance = ( + -1 * offsetSign_topBoundingCurve * currentStackLayerThicknesses[-1] + ) + offsetCurveAndCombineFragmentsIfNeeded(topBoundingCurve, offsetDistance) + lastLayerOffset = get_last_id("curve") + + curveStartOrEnd = "start" + lastLayerOffset = extendCurvePastCurveAndTrim( + lastLayerOffset, curveStartOrEnd, lpHpCurveDict["flatBackCurveID"] + ) + + if ( + isFlatback + and abs( + min(currentStack.layerThicknesses()) + - max(currentStack.layerThicknesses()) + ) + > 0.0001 + ): + layerOffsetDist = currentStackLayerThicknesses[-1] + curveStartOrEnd = "end" + lastLayerOffset = getAdjustmentCurve( + curveIDs, layerOffsetDist, curveStartOrEnd, endLayerTaperCurve + ) + + cubit.cmd( + f'split curve {firstLayerOffset} distance {params["TE_adhesive_width"][iStation]} from start' + ) + currentStackLeftCurves.append(get_last_id("curve") - 1) currentStackRightCurves.append(get_last_id("curve")) - curveLen=cubit.curve(currentStackLeftCurves[0]).length() - cubit.cmd(f'split curve {baseCurveIDCopy} distance {curveLen} from start') - currentStackLeftCurves.insert(0,get_last_id("curve")-1) - currentStackRightCurves.insert(0,get_last_id("curve")) - cubit.cmd(f'split curve {lastLayerOffset} distance {curveLen} from start') - currentStackLeftCurves.append(get_last_id("curve")-1) + curveLen = cubit.curve(currentStackLeftCurves[0]).length() + cubit.cmd(f"split curve {baseCurveIDCopy} distance {curveLen} from start") + currentStackLeftCurves.insert(0, get_last_id("curve") - 1) + currentStackRightCurves.insert(0, get_last_id("curve")) + cubit.cmd(f"split curve {lastLayerOffset} distance {curveLen} from start") + currentStackLeftCurves.append(get_last_id("curve") - 1) currentStackRightCurves.append(get_last_id("curve")) - cubit.cmd(f'split curve {topBoundingCurve} distance {curveLen} from start') - currentStackLeftCurves.append(get_last_id("curve")-1) + cubit.cmd(f"split curve {topBoundingCurve} distance {curveLen} from start") + currentStackLeftCurves.append(get_last_id("curve") - 1) currentStackRightCurves.append(get_last_id("curve")) - - #### Next Stack (the panel might intersect the camberline so the following is needed - nextStackCurves=[] - cubit.cmd(f'curve {rightBottomCurve} copy') - baseCurveIDCopy=get_last_id("curve") - offsetSign_baseCurveIDCopy=printOffsetDirectionCheck(baseCurveIDCopy,lpHpside,crossSectionNormal) + #### Next Stack (the panel might intersect the camberline so the following is needed + nextStackCurves = [] + cubit.cmd(f"curve {rightBottomCurve} copy") + baseCurveIDCopy = get_last_id("curve") + offsetSign_baseCurveIDCopy = printOffsetDirectionCheck( + baseCurveIDCopy, lpHpside, crossSectionNormal + ) nextStackCurves.append(baseCurveIDCopy) - + ### Offset camber to make gap - #Offset is increased to create a larger clearance between HP LP shells so that the panels - #to not self intersect during a simulation (this may not be needed) - cubit.cmd(f'create curve offset curve {lpHpCurveDict["camberID"]} distance {camberOffsetSign*offsetSign_camberID*0.001*4} extended') - camberOffset=get_last_id("curve") - - iLayer=0 - offsetDistance=1*offsetSign_baseCurveIDCopy*nextStackLayerThicknesses[0] - offsetCurveAndCombineFragmentsIfNeeded(baseCurveIDCopy,offsetDistance) + # Offset is increased to create a larger clearance between HP LP shells so that the panels + # to not self intersect during a simulation (this may not be needed) + cubit.cmd( + f'create curve offset curve {lpHpCurveDict["camberID"]} distance {camberOffsetSign*offsetSign_camberID*0.001*4} extended' + ) + camberOffset = get_last_id("curve") + + iLayer = 0 + offsetDistance = ( + 1 * offsetSign_baseCurveIDCopy * nextStackLayerThicknesses[0] + ) + offsetCurveAndCombineFragmentsIfNeeded(baseCurveIDCopy, offsetDistance) nextStackCurves.append(get_last_id("curve")) + offsetDistance = ( + 1 * offsetSign_baseCurveIDCopy * sum(nextStackLayerThicknesses) + ) + offsetCurveAndCombineFragmentsIfNeeded(baseCurveIDCopy, offsetDistance) + topBoundingCurve = get_last_id("curve") - offsetDistance=1*offsetSign_baseCurveIDCopy*sum(nextStackLayerThicknesses) - offsetCurveAndCombineFragmentsIfNeeded(baseCurveIDCopy,offsetDistance) - topBoundingCurve=get_last_id("curve") + keepCurve = 2 + topBoundingCurve, intersectionVertex = streamlineCurveIntersections( + camberOffset, topBoundingCurve, keepCurve + ) - keepCurve=2 - topBoundingCurve,intersectionVertex=streamlineCurveIntersections(camberOffset,topBoundingCurve,keepCurve) - - v1,_=selCurveVerts(baseCurveIDCopy) - cubit.cmd(f'split curve {topBoundingCurve} at vertex {v1}') - topBoundingCurve=get_last_id("curve") + v1, _ = selCurveVerts(baseCurveIDCopy) + cubit.cmd(f"split curve {topBoundingCurve} at vertex {v1}") + topBoundingCurve = get_last_id("curve") cubit.cmd(f'delete curve {get_last_id("curve")-1}') nextStackCurves.append(topBoundingCurve) - offsetSign_topBoundingCurve=printOffsetDirectionCheck(topBoundingCurve,lpHpside,crossSectionNormal) - - - offsetDistance=-1*offsetSign_topBoundingCurve*nextStackLayerThicknesses[-1] - offsetCurveAndCombineFragmentsIfNeeded(topBoundingCurve,offsetDistance) - lastLayerOffset=get_last_id("curve") - nextStackCurves.insert(-1,lastLayerOffset) + offsetSign_topBoundingCurve = printOffsetDirectionCheck( + topBoundingCurve, lpHpside, crossSectionNormal + ) + offsetDistance = ( + -1 * offsetSign_topBoundingCurve * nextStackLayerThicknesses[-1] + ) + offsetCurveAndCombineFragmentsIfNeeded(topBoundingCurve, offsetDistance) + lastLayerOffset = get_last_id("curve") + nextStackCurves.insert(-1, lastLayerOffset) for iModeledLayers in range(nModeledLayers): - currentStackOffset=currentStackLayerThicknesses[iModeledLayers] - nextStackOffset=nextStackLayerThicknesses[iModeledLayers] - - - offsetSign_leftBottomCurve=printOffsetDirectionCheck(leftBottomCurve,lpHpside,crossSectionNormal) - offsetSign_rightBottomCurve=printOffsetDirectionCheck(rightBottomCurve,lpHpside,crossSectionNormal) - - #Special Treatment for TE - if iPerimeter==0: - #Create Left Areas Only + currentStackOffset = currentStackLayerThicknesses[iModeledLayers] + nextStackOffset = nextStackLayerThicknesses[iModeledLayers] + + offsetSign_leftBottomCurve = printOffsetDirectionCheck( + leftBottomCurve, lpHpside, crossSectionNormal + ) + offsetSign_rightBottomCurve = printOffsetDirectionCheck( + rightBottomCurve, lpHpside, crossSectionNormal + ) + + # Special Treatment for TE + if iPerimeter == 0: + # Create Left Areas Only + + materialName = currentStack.plygroups[iModeledLayers].materialid + plyAngle = currentStack.plygroups[iModeledLayers].angle + + partNameID, materialsUsed = makeCrossSectionSurface( + surfaceDict, + iStation, + partName, + currentStackLeftCurves[iModeledLayers + 1], + currentStackLeftCurves[iModeledLayers], + materialName, + plyAngle, + partNameID, + iModeledLayers, + materialsUsed, + ) - materialName=currentStack.plygroups[iModeledLayers].materialid - plyAngle=currentStack.plygroups[iModeledLayers].angle - - partNameID,materialsUsed=makeCrossSectionSurface(surfaceDict,iStation,partName,currentStackLeftCurves[iModeledLayers+1],currentStackLeftCurves[iModeledLayers],materialName,plyAngle,partNameID,iModeledLayers,materialsUsed) - if TEangle > 120: - lpHpCurveDict['TEadhesiveCurveList'][lpHpsideIndex].append(surfaceDict[get_last_id('surface')]['curves'][-1]) - - if iStation== lastRoundStation-1: - v1=surfaceDict[get_last_id("surface")]['verts'][0] + lpHpCurveDict["TEadhesiveCurveList"][lpHpsideIndex].append( + surfaceDict[get_last_id("surface")]["curves"][-1] + ) + + if iStation == lastRoundStation - 1: + v1 = surfaceDict[get_last_id("surface")]["verts"][0] cubit.cmd(f'vertex {v1} rename "linear"') - v1=surfaceDict[get_last_id("surface")]['verts'][-1] + v1 = surfaceDict[get_last_id("surface")]["verts"][-1] cubit.cmd(f'vertex {v1} rename "linear"') - leftBottomCurve=currentStackRightCurves[iModeledLayers] - leftTopCurve=currentStackRightCurves[iModeledLayers+1] - [bottomLeftVertexCurveLeft,bottomRightVertexCurveLeft]=selCurveVerts(currentStackRightCurves[iModeledLayers]) - [topLeftVertexCurveLeft,topRightVertexCurveLeft]=selCurveVerts(currentStackRightCurves[iModeledLayers+1]) - - #Create Left Areas Only - rightBottomCurve=nextStackCurves[iModeledLayers] - rightTopCurve=nextStackCurves[iModeledLayers+1] - [bottomLeftVertexCurveRight,bottomRightVertexCurveRight]=selCurveVerts(nextStackCurves[iModeledLayers]) - [topLeftVertexCurveRight,topRightVertexCurveRight]=selCurveVerts(nextStackCurves[iModeledLayers+1]) + leftBottomCurve = currentStackRightCurves[iModeledLayers] + leftTopCurve = currentStackRightCurves[iModeledLayers + 1] + [bottomLeftVertexCurveLeft, bottomRightVertexCurveLeft] = selCurveVerts( + currentStackRightCurves[iModeledLayers] + ) + [topLeftVertexCurveLeft, topRightVertexCurveLeft] = selCurveVerts( + currentStackRightCurves[iModeledLayers + 1] + ) + + # Create Left Areas Only + rightBottomCurve = nextStackCurves[iModeledLayers] + rightTopCurve = nextStackCurves[iModeledLayers + 1] + [ + bottomLeftVertexCurveRight, + bottomRightVertexCurveRight, + ] = selCurveVerts(nextStackCurves[iModeledLayers]) + [topLeftVertexCurveRight, topRightVertexCurveRight] = selCurveVerts( + nextStackCurves[iModeledLayers + 1] + ) else: - cubit.cmd(f'create curve offset curve {leftBottomCurve} distance {offsetSign_leftBottomCurve*currentStackOffset} extended') - leftTopCurve=get_last_id("curve") - [topLeftVertexCurveLeft,topRightVertexCurveLeft]=selCurveVerts(get_last_id("curve")) - - offsetCurveAndCombineFragmentsIfNeeded(rightBottomCurve,offsetSign_rightBottomCurve*nextStackOffset) - lastOffsetCurve=get_last_id("curve") - rightTopCurve=get_last_id("curve") - [topLeftVertexCurveRight,topRightVertexCurveRight]=selCurveVerts(get_last_id("curve")) - - - if iPerimeter==lastPerimeter: - curveStartOrEnd='end' - print(f'iPerimeter {iPerimeter}') - lastOffsetCurve=extendCurvePastCurveAndTrim(lastOffsetCurve,curveStartOrEnd,lpHpCurveDict['LEadhesiveCurveIDs'][lpHpsideIndex][1]) - rightTopCurve=lastOffsetCurve - [bottomLeftVertexCurveRight,bottomRightVertexCurveRight]=selCurveVerts(rightBottomCurve) - [topLeftVertexCurveRight,topRightVertexCurveRight]=selCurveVerts(rightTopCurve) - - - - cubit.cmd(f'create curve vertex {topRightVertexCurveLeft} {topLeftVertexCurveRight}') - transitionTopCurve=get_last_id("curve") - cubit.cmd(f'create curve vertex {bottomLeftVertexCurveLeft} {topLeftVertexCurveLeft}') - cubit.cmd(f'create curve vertex {bottomRightVertexCurveLeft} {topRightVertexCurveLeft}') - nCurvesFinal=get_last_id("curve") - leftSideCurves = [i for i in range(nCurvesFinal-1,nCurvesFinal+1)] - cubit.cmd(f'create curve vertex {bottomLeftVertexCurveRight} {topLeftVertexCurveRight}') - cubit.cmd(f'create curve vertex {bottomRightVertexCurveRight} {topRightVertexCurveRight}') - nCurvesFinal=get_last_id("curve") - rightSideCurves = [i for i in range(nCurvesFinal-1,nCurvesFinal+1)] - - if iPerimeter==lastPerimeter: + cubit.cmd( + f"create curve offset curve {leftBottomCurve} distance {offsetSign_leftBottomCurve*currentStackOffset} extended" + ) + leftTopCurve = get_last_id("curve") + [topLeftVertexCurveLeft, topRightVertexCurveLeft] = selCurveVerts( + get_last_id("curve") + ) + + offsetCurveAndCombineFragmentsIfNeeded( + rightBottomCurve, offsetSign_rightBottomCurve * nextStackOffset + ) + lastOffsetCurve = get_last_id("curve") + rightTopCurve = get_last_id("curve") + [topLeftVertexCurveRight, topRightVertexCurveRight] = selCurveVerts( + get_last_id("curve") + ) + + if iPerimeter == lastPerimeter: + curveStartOrEnd = "end" + print(f"iPerimeter {iPerimeter}") + lastOffsetCurve = extendCurvePastCurveAndTrim( + lastOffsetCurve, + curveStartOrEnd, + lpHpCurveDict["LEadhesiveCurveIDs"][lpHpsideIndex][1], + ) + rightTopCurve = lastOffsetCurve + [ + bottomLeftVertexCurveRight, + bottomRightVertexCurveRight, + ] = selCurveVerts(rightBottomCurve) + [topLeftVertexCurveRight, topRightVertexCurveRight] = selCurveVerts( + rightTopCurve + ) + + cubit.cmd( + f"create curve vertex {topRightVertexCurveLeft} {topLeftVertexCurveRight}" + ) + transitionTopCurve = get_last_id("curve") + cubit.cmd( + f"create curve vertex {bottomLeftVertexCurveLeft} {topLeftVertexCurveLeft}" + ) + cubit.cmd( + f"create curve vertex {bottomRightVertexCurveLeft} {topRightVertexCurveLeft}" + ) + nCurvesFinal = get_last_id("curve") + leftSideCurves = [i for i in range(nCurvesFinal - 1, nCurvesFinal + 1)] + cubit.cmd( + f"create curve vertex {bottomLeftVertexCurveRight} {topLeftVertexCurveRight}" + ) + cubit.cmd( + f"create curve vertex {bottomRightVertexCurveRight} {topRightVertexCurveRight}" + ) + nCurvesFinal = get_last_id("curve") + rightSideCurves = [i for i in range(nCurvesFinal - 1, nCurvesFinal + 1)] + + if iPerimeter == lastPerimeter: if isFlatback: - cubit.cmd(f'split curve {lpHpCurveDict["LEadhesiveCurveIDs"][lpHpsideIndex][1]} at vertex {topRightVertexCurveRight}') - rightSideCurves[-1]=get_last_id("curve")-1 - lpHpCurveDict['LEadhesiveCurveIDs'][lpHpsideIndex][1]=get_last_id("curve") - lpHpCurveDict['LEadhesiveCurveList'][lpHpsideIndex].append(rightSideCurves[-1]) - + cubit.cmd( + f'split curve {lpHpCurveDict["LEadhesiveCurveIDs"][lpHpsideIndex][1]} at vertex {topRightVertexCurveRight}' + ) + rightSideCurves[-1] = get_last_id("curve") - 1 + lpHpCurveDict["LEadhesiveCurveIDs"][lpHpsideIndex][1] = get_last_id( + "curve" + ) + lpHpCurveDict["LEadhesiveCurveList"][lpHpsideIndex].append( + rightSideCurves[-1] + ) ### Create Surfaces ### - #Sufaces for currentStack - materialName=currentStack.plygroups[iModeledLayers].materialid - plyAngle=currentStack.plygroups[iModeledLayers].angle - partNameID,materialsUsed=makeCrossSectionSurface(surfaceDict,iStation,partName,leftTopCurve,leftBottomCurve,materialName,plyAngle,partNameID,iModeledLayers,materialsUsed) + # Sufaces for currentStack + materialName = currentStack.plygroups[iModeledLayers].materialid + plyAngle = currentStack.plygroups[iModeledLayers].angle + partNameID, materialsUsed = makeCrossSectionSurface( + surfaceDict, + iStation, + partName, + leftTopCurve, + leftBottomCurve, + materialName, + plyAngle, + partNameID, + iModeledLayers, + materialsUsed, + ) currentStackSurfaceList.append(get_last_id("surface")) - #Surfaces for transitionStack - materialName=transitionStack.plygroups[iModeledLayers].materialid - plyAngle=transitionStack.plygroups[iModeledLayers].angle - partNameID,materialsUsed=makeCrossSectionSurface(surfaceDict,iStation,partName,transitionTopCurve,transitionBottomCurve,materialName,plyAngle,partNameID,iModeledLayers,materialsUsed) + # Surfaces for transitionStack + materialName = transitionStack.plygroups[iModeledLayers].materialid + plyAngle = transitionStack.plygroups[iModeledLayers].angle + partNameID, materialsUsed = makeCrossSectionSurface( + surfaceDict, + iStation, + partName, + transitionTopCurve, + transitionBottomCurve, + materialName, + plyAngle, + partNameID, + iModeledLayers, + materialsUsed, + ) transitionStackSurfaceList.append(get_last_id("surface")) - - #Surfaces for nextStack - materialName=nextStack.plygroups[iModeledLayers].materialid - plyAngle=nextStack.plygroups[iModeledLayers].angle - partNameID,materialsUsed=makeCrossSectionSurface(surfaceDict,iStation,partName,rightTopCurve,rightBottomCurve,materialName,plyAngle,partNameID,iModeledLayers,materialsUsed) + + # Surfaces for nextStack + materialName = nextStack.plygroups[iModeledLayers].materialid + plyAngle = nextStack.plygroups[iModeledLayers].angle + partNameID, materialsUsed = makeCrossSectionSurface( + surfaceDict, + iStation, + partName, + rightTopCurve, + rightBottomCurve, + materialName, + plyAngle, + partNameID, + iModeledLayers, + materialsUsed, + ) nextStackSurfaceList.append(get_last_id("surface")) - ### Reset ### - #Reset curves - leftBottomCurve=leftTopCurve - transitionBottomCurve=transitionTopCurve - rightBottomCurve=rightTopCurve - - #Reset vertices - bottomLeftVertexCurveLeft=topLeftVertexCurveLeft - bottomRightVertexCurveLeft=topRightVertexCurveLeft - bottomLeftVertexCurveRight=topLeftVertexCurveRight - bottomRightVertexCurveRight=topRightVertexCurveRight - - - #Build spar caps - if iPerimeter==1: - lpHpCurveDict['webInterfaceCurves'][lpHpsideIndex]=[rightTopCurve] - for ic, currentCurveID in enumerate(lpHpCurveDict['sparCapBaseCurves'][lpHpsideIndex]): - bottomCurve=currentCurveID - offSetSign=printOffsetDirectionCheck(bottomCurve,lpHpside,crossSectionNormal) - + # Reset curves + leftBottomCurve = leftTopCurve + transitionBottomCurve = transitionTopCurve + rightBottomCurve = rightTopCurve + + # Reset vertices + bottomLeftVertexCurveLeft = topLeftVertexCurveLeft + bottomRightVertexCurveLeft = topRightVertexCurveLeft + bottomLeftVertexCurveRight = topLeftVertexCurveRight + bottomRightVertexCurveRight = topRightVertexCurveRight + + # Build spar caps + if iPerimeter == 1: + lpHpCurveDict["webInterfaceCurves"][lpHpsideIndex] = [rightTopCurve] + for ic, currentCurveID in enumerate( + lpHpCurveDict["sparCapBaseCurves"][lpHpsideIndex] + ): + bottomCurve = currentCurveID + offSetSign = printOffsetDirectionCheck( + bottomCurve, lpHpside, crossSectionNormal + ) + for it, thickness in enumerate(nextStackLayerThicknesses): - cubit.cmd(f'create curve offset curve {bottomCurve} distance {offSetSign*thickness} extended') - topCurve=get_last_id("curve") - - materialName=nextStack.plygroups[it].materialid - plyAngle=nextStack.plygroups[it].angle - partNameID,materialsUsed=makeCrossSectionSurface(surfaceDict,iStation,partName,topCurve,bottomCurve,materialName,plyAngle,partNameID,it,materialsUsed) - nextStackSurfaceList.append(get_last_id("surface")) - - if it==2 and ic!=3: - lpHpCurveDict['webInterfaceCurves'][lpHpsideIndex].append(topCurve) - bottomCurve=topCurve - elif iPerimeter==2: - lpHpCurveDict['webInterfaceCurves'][lpHpsideIndex].append(leftTopCurve) - return partNameID,lpHpCurveDict + cubit.cmd( + f"create curve offset curve {bottomCurve} distance {offSetSign*thickness} extended" + ) + topCurve = get_last_id("curve") + + materialName = nextStack.plygroups[it].materialid + plyAngle = nextStack.plygroups[it].angle + partNameID, materialsUsed = makeCrossSectionSurface( + surfaceDict, + iStation, + partName, + topCurve, + bottomCurve, + materialName, + plyAngle, + partNameID, + it, + materialsUsed, + ) + nextStackSurfaceList.append(get_last_id("surface")) + + if it == 2 and ic != 3: + lpHpCurveDict["webInterfaceCurves"][lpHpsideIndex].append( + topCurve + ) + bottomCurve = topCurve + elif iPerimeter == 2: + lpHpCurveDict["webInterfaceCurves"][lpHpsideIndex].append(leftTopCurve) + return partNameID, lpHpCurveDict + + #################################################### #################################################### #################################################### @@ -951,129 +1320,225 @@ def makeCrossSectionLayerAreas_perimeter(surfaceDict,iStation,stationStacks,para #################################################### -def createSimplistSurfaceForTEorLEadhesive(iStation,surfaceDict,partName,adhesiveCurveList,adhesiveMatID,partNameID,nModeledLayers,materialsUsed): +def createSimplistSurfaceForTEorLEadhesive( + iStation, + surfaceDict, + partName, + adhesiveCurveList, + adhesiveMatID, + partNameID, + nModeledLayers, + materialsUsed, +): for iCurve in range(len(adhesiveCurveList[0])): - plyAngle=0 #Ply angle is always zero since adhesive is always assumed as isotropic - partNameID,materialsUsed=makeCrossSectionSurface(surfaceDict,iStation,partName,adhesiveCurveList[1][iCurve],adhesiveCurveList[0][iCurve],adhesiveMatID,plyAngle,partNameID,nModeledLayers+1,materialsUsed) - + plyAngle = ( + 0 # Ply angle is always zero since adhesive is always assumed as isotropic + ) + partNameID, materialsUsed = makeCrossSectionSurface( + surfaceDict, + iStation, + partName, + adhesiveCurveList[1][iCurve], + adhesiveCurveList[0][iCurve], + adhesiveMatID, + plyAngle, + partNameID, + nModeledLayers + 1, + materialsUsed, + ) + return partNameID - -def printSineCurveBetweenTwoVerts(vBot,vTop,amplitude,direction): - nSineCurveSamplePoints=7 - cubit.cmd(f'create curve vertex {vBot} {vTop}') - - idCurve=get_last_id("curve") - - if round(amplitude,3)>0: - nStart=get_last_id("vertex")+1 - cubit.cmd(f'create vertex on curve {get_last_id("curve")} segment {nSineCurveSamplePoints-1}') - nEnd=get_last_id("vertex") - sineCurveSamplePoints=np.linspace(0,pi,nSineCurveSamplePoints) - vertexOffsets=amplitude*np.sin(sineCurveSamplePoints) - vertexList=list(range(nStart,nEnd+1)) - for iVert, vertexOffset in enumerate(vertexOffsets[1:-1]): #skip first and last point since those are considered fixed and the offset is zero anyway - cubit.cmd(f'move vertex {vertexList[iVert]} {direction} {vertexOffset}') - vertexList.insert(0,vBot) + + +def printSineCurveBetweenTwoVerts(vBot, vTop, amplitude, direction): + nSineCurveSamplePoints = 7 + cubit.cmd(f"create curve vertex {vBot} {vTop}") + + idCurve = get_last_id("curve") + + if round(amplitude, 3) > 0: + nStart = get_last_id("vertex") + 1 + cubit.cmd( + f'create vertex on curve {get_last_id("curve")} segment {nSineCurveSamplePoints-1}' + ) + nEnd = get_last_id("vertex") + sineCurveSamplePoints = np.linspace(0, pi, nSineCurveSamplePoints) + vertexOffsets = amplitude * np.sin(sineCurveSamplePoints) + vertexList = list(range(nStart, nEnd + 1)) + for iVert, vertexOffset in enumerate( + vertexOffsets[1:-1] + ): # skip first and last point since those are considered fixed and the offset is zero anyway + cubit.cmd(f"move vertex {vertexList[iVert]} {direction} {vertexOffset}") + vertexList.insert(0, vBot) vertexList.append(vTop) - cubit.cmd(f'create curve spline vertex {l2s(vertexList)}') - cubit.cmd(f'delete curve {idCurve}') + cubit.cmd(f"create curve spline vertex {l2s(vertexList)}") + cubit.cmd(f"delete curve {idCurve}") return get_last_id("curve") -def makeCrossSectionLayerAreas_web(surfaceDict,iStation,aftWebStack,foreWebStack,webInterfaceCurves,crosssectionParams,partNameID,crossSectionNormal,nModeledLayers,materialsUsed): - aftWebOverwrapThickness=(aftWebStack.layerThicknesses()[0]+aftWebStack.layerThicknesses()[-1])/1000 - foreWebOverwrapThickness=(foreWebStack.layerThicknesses()[0]+foreWebStack.layerThicknesses()[-1])/1000 - partName='web' - ### First create the first two layers. The first layer is the adhesive. The second layer is the web overwrap layer - for iCurveList,curveList in enumerate(webInterfaceCurves): - nBaseCurvesWeb=len(curveList) + +def makeCrossSectionLayerAreas_web( + surfaceDict, + iStation, + aftWebStack, + foreWebStack, + webInterfaceCurves, + crosssectionParams, + partNameID, + crossSectionNormal, + nModeledLayers, + materialsUsed, +): + aftWebOverwrapThickness = ( + aftWebStack.layerThicknesses()[0] + aftWebStack.layerThicknesses()[-1] + ) / 1000 + foreWebOverwrapThickness = ( + foreWebStack.layerThicknesses()[0] + foreWebStack.layerThicknesses()[-1] + ) / 1000 + partName = "web" + ### First create the first two layers. The first layer is the adhesive. The second layer is the web overwrap layer + for iCurveList, curveList in enumerate(webInterfaceCurves): + nBaseCurvesWeb = len(curveList) if iCurveList == 0: - lpHpside='HP' + lpHpside = "HP" else: - lpHpside='LP' - for iCurve,bottomCurve in enumerate(curveList): - - offSetSign=printOffsetDirectionCheck(bottomCurve,lpHpside,crossSectionNormal) - - - if iCurvelastRoundStation: - partName='flatTEadhesive' + ################### + ################### + + # Extend + curveStartOrEnd = "start" + extensionLength = 0.5 * cubit.curve(camberID).length() + camberID = extendCurveAtVertexToLength(camberID, extensionLength, curveStartOrEnd) + + lpHpCurveDict["camberID"] = camberID + lpHpCurveDict["flatBackCurveID"] = flatBackCurveID + + nModeledLayers = 3 + + lpHpside = "HP" + + partNameID, lpHpCurveDict = makeCrossSectionLayerAreas_perimeter( + surfaceDict, + iStation, + blade.stacks[1:6, iStation], + crosssectionParams, + thicknessScaling, + lpHpside, + isFlatback, + TEangle, + lastRoundStation, + partNameID, + nModeledLayers, + crossSectionNormal, + lpHpCurveDict, + materialsUsed, + ) + + lpHpside = "LP" + temp = blade.stacks[:, iStation] + temp = np.flip(temp) + partNameID, lpHpCurveDict = makeCrossSectionLayerAreas_perimeter( + surfaceDict, + iStation, + temp[1:6], + crosssectionParams, + thicknessScaling, + lpHpside, + isFlatback, + TEangle, + lastRoundStation, + partNameID, + nModeledLayers, + crossSectionNormal, + lpHpCurveDict, + materialsUsed, + ) + + partName = "shell" + partNameID = createSimplistSurfaceForTEorLEadhesive( + iStation, + surfaceDict, + partName, + lpHpCurveDict["LEadhesiveCurveList"], + crosssectionParams["adhesiveMatID"], + partNameID, + nModeledLayers, + materialsUsed, + ) + + partNameID = 0 # Reset since outer areoshell is complete (LE adhesive is accouted for as aeroshell) + if iStation > lastRoundStation: + partName = "flatTEadhesive" else: - partName='roundTEadhesive' - - partNameID=createSimplistSurfaceForTEorLEadhesive(iStation,surfaceDict,partName,lpHpCurveDict['TEadhesiveCurveList'],crosssectionParams['adhesiveMatID'],partNameID,nModeledLayers,materialsUsed) - - - - birdsMouthVerts=[] + partName = "roundTEadhesive" + + partNameID = createSimplistSurfaceForTEorLEadhesive( + iStation, + surfaceDict, + partName, + lpHpCurveDict["TEadhesiveCurveList"], + crosssectionParams["adhesiveMatID"], + partNameID, + nModeledLayers, + materialsUsed, + ) + + birdsMouthVerts = [] if hasWebs: - partNameID=0 #Reset since outer areoshell is complete (LE adhesive is accouted for as aeroshell) - - partNameID,birdsMouthVerts=makeCrossSectionLayerAreas_web(surfaceDict,iStation,aftWebStack,foreWebStack,lpHpCurveDict['webInterfaceCurves'],crosssectionParams,partNameID,crossSectionNormal,nModeledLayers,materialsUsed) - - - - - parseString=f'with name "*station{iStation}*"' - crossSectionalSurfaces=parse_cubit_list('surface', parseString) + partNameID = 0 # Reset since outer areoshell is complete (LE adhesive is accouted for as aeroshell) + + partNameID, birdsMouthVerts = makeCrossSectionLayerAreas_web( + surfaceDict, + iStation, + aftWebStack, + foreWebStack, + lpHpCurveDict["webInterfaceCurves"], + crosssectionParams, + partNameID, + crossSectionNormal, + nModeledLayers, + materialsUsed, + ) + + parseString = f'with name "*station{iStation}*"' + crossSectionalSurfaces = parse_cubit_list("surface", parseString) for surfaceID in crossSectionalSurfaces: - n=get_surface_normal(surfaceID) - if n[2]<0: - cubit.cmd(f'reverse surface {surfaceID}') + n = get_surface_normal(surfaceID) + if n[2] < 0: + cubit.cmd(f"reverse surface {surfaceID}") - cubit.cmd(f'delete vertex all with Is_Free') + cubit.cmd(f"delete vertex all with Is_Free") return birdsMouthVerts - -def writeVABSinput(surfaceDict,blade,crosssectionParams,directory,fileName, surfaceIDs,materialsUsed): - #Write VABS inputfile - if crosssectionParams['elementShape'].lower() == 'quad': - expandedConnectivityString='face' - elif crosssectionParams['elementShape'].lower() == 'tri': - expandedConnectivityString='tri' +def writeVABSinput( + surfaceDict, + blade, + crosssectionParams, + directory, + fileName, + surfaceIDs, + materialsUsed, +): + # Write VABS inputfile + if crosssectionParams["elementShape"].lower() == "quad": + expandedConnectivityString = "face" + elif crosssectionParams["elementShape"].lower() == "tri": + expandedConnectivityString = "tri" else: - raise NameError(f'Element type: {crosssectionParams["elementShape"]} not supported' ) - - - + raise NameError( + f'Element type: {crosssectionParams["elementShape"]} not supported' + ) ######Write VABS input file - nnodes=get_node_count() - nelem=get_element_count() - nmate=len(materialsUsed) - nlayer=len(surfaceIDs) #One VABS layer is defined for each surface - + nnodes = get_node_count() + nelem = get_element_count() + nmate = len(materialsUsed) + nlayer = len(surfaceIDs) # One VABS layer is defined for each surface - pathName=directory+'/'+fileName + pathName = directory + "/" + fileName if not os.path.exists(directory): os.makedirs(directory) - - with open(pathName, 'w') as f: - f.write(f'1 {nlayer}\n') #New format. One layer definiton for each element. - f.write('1 0 0\n') - f.write('0 0 0 0\n') - f.write(f'{nnodes} {nelem} {nmate}\n') - #Write Nodes + + with open(pathName, "w") as f: + f.write(f"1 {nlayer}\n") # New format. One layer definiton for each element. + f.write("1 0 0\n") + f.write("0 0 0 0\n") + f.write(f"{nnodes} {nelem} {nmate}\n") + # Write Nodes for iNd in range(nnodes): - nodeID=iNd+1 - coords=list(get_nodal_coordinates(nodeID)) - f.write(f'{nodeID} {coords[0]} {coords[1]}\n') - f.write('\n\n') - #Write Elements - maxNumberOfPossibleNodes=9 + nodeID = iNd + 1 + coords = list(get_nodal_coordinates(nodeID)) + f.write(f"{nodeID} {coords[0]} {coords[1]}\n") + f.write("\n\n") + # Write Elements + maxNumberOfPossibleNodes = 9 for iEl in range(nelem): - elementID=iEl+1 - nodesIDs=cubit.get_expanded_connectivity(crosssectionParams['elementShape'],elementID) - - if nodesIDs[0]==0 or nodesIDs[0]==0.0: + elementID = iEl + 1 + nodesIDs = cubit.get_expanded_connectivity( + crosssectionParams["elementShape"], elementID + ) + + if nodesIDs[0] == 0 or nodesIDs[0] == 0.0: foo - tempStr=str(nodesIDs) #convert tuple to string - tempStr=tempStr.replace('(','') - tempStr=tempStr.replace(')','') - tempStr=tempStr.replace(',',' ') - nZeros=maxNumberOfPossibleNodes-len(nodesIDs) - tempStr2=str([0]*nZeros) - tempStr2=tempStr2.replace('[','') - tempStr2=tempStr2.replace(']','') - tempStr2=tempStr2.replace(',',' ') - f.write(f'{elementID} {tempStr} {tempStr2}\n') - #Write ply angle for all but the TE adhesive + tempStr = str(nodesIDs) # convert tuple to string + tempStr = tempStr.replace("(", "") + tempStr = tempStr.replace(")", "") + tempStr = tempStr.replace(",", " ") + nZeros = maxNumberOfPossibleNodes - len(nodesIDs) + tempStr2 = str([0] * nZeros) + tempStr2 = tempStr2.replace("[", "") + tempStr2 = tempStr2.replace("]", "") + tempStr2 = tempStr2.replace(",", " ") + f.write(f"{elementID} {tempStr} {tempStr2}\n") + # Write ply angle for all but the TE adhesive for iSurface, surfaceID in enumerate(surfaceIDs): - for iEl,elementID in enumerate(get_surface_quads(surfaceID)): - nodesIDs=cubit.get_expanded_connectivity('face',elementID) - coords=[] - for iNd,nodeID in enumerate(nodesIDs): + for iEl, elementID in enumerate(get_surface_quads(surfaceID)): + nodesIDs = cubit.get_expanded_connectivity("face", elementID) + coords = [] + for iNd, nodeID in enumerate(nodesIDs): coords.append(list(get_nodal_coordinates(nodeID))) - coords=np.array(coords) + coords = np.array(coords) # #######For Plotting - find the larges element side length ####### # distances=[] # for iNd,nodeID in enumerate(nodesIDs): - # for jNd,nodeIDj in enumerate(nodesIDs): - # distances.append(norm(vectSub(coords[iNd],coords[jNd]))) + # for jNd,nodeIDj in enumerate(nodesIDs): + # distances.append(norm(vectSub(coords[iNd],coords[jNd]))) # length=max(distances) # #######For Plotting - find the larges element side length ####### - coords=np.mean(coords,0) - #coords=cubit.get_center_point(crosssectionParams['elementShape'], elementID) - - - # minDist=inf #initialize - # closestCurveID=nan #initialize - # #Since there are possibly many curves due to the offset operation, see which curve is closeset to element center - # for iCurve, curveID in enumerate(curves): - # temp=cubit.curve(curveID).closest_point(coords) - # distance=getDist(coords,temp)[0] - # if distance < minDist: - # minDist=distance - # closestCurveID=curveID - - curveIDforMatOri=cubit.surface(surfaceID).curves()[0] - curveLocationForTangent=curveIDforMatOri.closest_point(coords) - x=curveIDforMatOri.tangent(curveLocationForTangent)[0] - y=curveIDforMatOri.tangent(curveLocationForTangent)[1] - z=curveIDforMatOri.tangent(curveLocationForTangent)[2] - tangentDirection=vectNorm([x,y,z]) #Unit vector of tangent - theta1=math.atan2(tangentDirection[1],tangentDirection[0])*180/pi - f.write(f'{elementID} {iSurface+1} {theta1}\n') + coords = np.mean(coords, 0) + # coords=cubit.get_center_point(crosssectionParams['elementShape'], elementID) + + # minDist=inf #initialize + # closestCurveID=nan #initialize + # #Since there are possibly many curves due to the offset operation, see which curve is closeset to element center + # for iCurve, curveID in enumerate(curves): + # temp=cubit.curve(curveID).closest_point(coords) + # distance=getDist(coords,temp)[0] + # if distance < minDist: + # minDist=distance + # closestCurveID=curveID + + curveIDforMatOri = cubit.surface(surfaceID).curves()[0] + curveLocationForTangent = curveIDforMatOri.closest_point(coords) + x = curveIDforMatOri.tangent(curveLocationForTangent)[0] + y = curveIDforMatOri.tangent(curveLocationForTangent)[1] + z = curveIDforMatOri.tangent(curveLocationForTangent)[2] + tangentDirection = vectNorm([x, y, z]) # Unit vector of tangent + theta1 = math.atan2(tangentDirection[1], tangentDirection[0]) * 180 / pi + f.write(f"{elementID} {iSurface+1} {theta1}\n") # #######Only needed For Plotting Orientation Check####### # cubit.create_vertex(coords[0],coords[1],coords[2]) # iVert1=get_last_id("vertex") @@ -1430,39 +1974,45 @@ def writeVABSinput(surfaceDict,blade,crosssectionParams,directory,fileName, surf # cubit.cmd(f'create curve vertex {iVert1} {iVert2}') # #######Only needed For Plotting Orientation Check####### ####Normal to curve - # axialDirection=crossSectionNormal #There will be a slight error here for highly tapeded regions - # normalDirection=crossProd(axialDirection,tangentDirection) - # #######Only needed For Plotting Orientation Check####### - # cubit.create_vertex(coords[0]+length*normalDirection[0],coords[1]+length*normalDirection[1],coords[2]+length*normalDirection[2]) - # c - # cubit.cmd(f'create curve vertex {iVert1} {iVert2}') - # #######Only needed For Plotting Orientation Check####### - #Define Plies + # axialDirection=crossSectionNormal #There will be a slight error here for highly tapeded regions + # normalDirection=crossProd(axialDirection,tangentDirection) + # #######Only needed For Plotting Orientation Check####### + # cubit.create_vertex(coords[0]+length*normalDirection[0],coords[1]+length*normalDirection[1],coords[2]+length*normalDirection[2]) + # c + # cubit.cmd(f'create curve vertex {iVert1} {iVert2}') + # #######Only needed For Plotting Orientation Check####### + # Define Plies for iSurface, surfaceID in enumerate(surfaceIDs): - materialID=list(materialsUsed).index(surfaceDict[surfaceID]['materialName'])+1 - plyAngle=surfaceDict[surfaceID]['plyAngle'] - f.write(f'{iSurface+1} {materialID} {plyAngle}\n') - #Define Materials - for imat,matName in enumerate(materialsUsed): - materialID=imat+1 - material=blade.materials[matName] - f.write(f'{materialID} {1} \n') - f.write(f'{material.ex} {material.ey} {material.ez}\n') - f.write(f'{material.gxy} {material.gxz} {material.gyz}\n') - f.write(f'{material.prxy} {material.prxz} {material.pryz}\n') - f.write(f'{material.density}\n') - print('Done writing VABS input') + materialID = ( + list(materialsUsed).index(surfaceDict[surfaceID]["materialName"]) + 1 + ) + plyAngle = surfaceDict[surfaceID]["plyAngle"] + f.write(f"{iSurface+1} {materialID} {plyAngle}\n") + # Define Materials + for imat, matName in enumerate(materialsUsed): + materialID = imat + 1 + material = blade.materials[matName] + f.write(f"{materialID} {1} \n") + f.write(f"{material.ex} {material.ey} {material.ez}\n") + f.write(f"{material.gxy} {material.gxz} {material.gyz}\n") + f.write(f"{material.prxy} {material.prxz} {material.pryz}\n") + f.write(f"{material.density}\n") + print("Done writing VABS input") return -#Main script fuctions -def getTEangle(hpKeyCurve,lpKeyCurve,fraction): - c1=cubit.curve(hpKeyCurve) - c2=cubit.curve(lpKeyCurve) - coords=list(c1.position_from_fraction(fraction)) - v1=np.array(c1.tangent(coords)) - coords=list(c2.position_from_fraction(fraction)) - v2=np.array(c2.tangent(coords)) - - return math.degrees(math.acos(v1.dot(np.transpose(v2)) / (np.linalg.norm(v1) * np.linalg.norm(v2)))) +# Main script fuctions + + +def getTEangle(hpKeyCurve, lpKeyCurve, fraction): + c1 = cubit.curve(hpKeyCurve) + c2 = cubit.curve(lpKeyCurve) + + coords = list(c1.position_from_fraction(fraction)) + v1 = np.array(c1.tangent(coords)) + coords = list(c2.position_from_fraction(fraction)) + v2 = np.array(c2.tangent(coords)) + return math.degrees( + math.acos(v1.dot(np.transpose(v2)) / (np.linalg.norm(v1) * np.linalg.norm(v2))) + ) diff --git a/src/pynumad/analysis/cubit/solidModelUtils.py b/src/pynumad/analysis/cubit/solidModelUtils.py index 33189d3..c1fb959 100644 --- a/src/pynumad/analysis/cubit/solidModelUtils.py +++ b/src/pynumad/analysis/cubit/solidModelUtils.py @@ -1,295 +1,350 @@ from cubit import * -from PyCubed_Main import * +from PyCubed_Main import * from pynumad.analysis.cubit.cubitUtils import printSineCurveBetweenTwoVerts import numpy as np import re -def getOrderedList(partName): - - orderedList=[] - surfacesToConnect=[1] #Initialize to enter loop - iSurface=-1 #Initialize +def getOrderedList(partName): + orderedList = [] + surfacesToConnect = [1] # Initialize to enter loop + iSurface = -1 # Initialize while surfacesToConnect: - iSurface+=1 - parseString=f'with name "*{partName}*surface{iSurface+1}"' - surfacesToConnect=parse_cubit_list('surface', parseString) - + iSurface += 1 + parseString = f'with name "*{partName}*surface{iSurface+1}"' + surfacesToConnect = parse_cubit_list("surface", parseString) + if surfacesToConnect: orderedList.append(surfacesToConnect) return orderedList -def makeSpanwiseSplines(surfaceDict,orderedList): - spanwiseSplines=[] + + +def makeSpanwiseSplines(surfaceDict, orderedList): + spanwiseSplines = [] for alignedSurfaces in orderedList: - tempList=[] + tempList = [] for iPoint in range(4): - vertexList=[] + vertexList = [] for index, iSurface in enumerate(alignedSurfaces): - vertexID=surfaceDict[iSurface]['verts'][iPoint] + vertexID = surfaceDict[iSurface]["verts"][iPoint] vertexList.append(vertexID) - vertexName=cubit.get_entity_name("vertex",vertexID) + vertexName = cubit.get_entity_name("vertex", vertexID) - curve=cubit.cmd(f'create curve spline vertex {l2s(vertexList)}') + curve = cubit.cmd(f"create curve spline vertex {l2s(vertexList)}") tempList.append(get_last_id("curve")) spanwiseSplines.append(tempList) return spanwiseSplines - -def makeOneVolume(currentSurfaceID,nextSurfaceID,spanwiseSplinesForAvolume,surfaceDict,iStationEnd): - cubit.cmd(f'surface {currentSurfaceID} copy') - currentSurfaceIDcopy=get_last_id("surface") - - cubit.cmd(f'surface {nextSurfaceID} copy') - nextSurfaceIDcopy=get_last_id("surface") - - currentSurface=cubit.surface(currentSurfaceID) - nextSurface=cubit.surface(nextSurfaceID) - - currentSurfaceCurves=surfaceDict[currentSurfaceID]['curves'] - nextSurfaceCurves=surfaceDict[nextSurfaceID]['curves'] - - currentSurfaceVerteces=surfaceDict[currentSurfaceID]['verts'] - nextSurfaceVerteces=surfaceDict[nextSurfaceID]['verts'] - - spanwiseSplinesForAvolume.append(spanwiseSplinesForAvolume[0]) #Make list circle back - - transverseSurfaceIDs=[] + + +def makeOneVolume( + currentSurfaceID, nextSurfaceID, spanwiseSplinesForAvolume, surfaceDict, iStationEnd +): + cubit.cmd(f"surface {currentSurfaceID} copy") + currentSurfaceIDcopy = get_last_id("surface") + + cubit.cmd(f"surface {nextSurfaceID} copy") + nextSurfaceIDcopy = get_last_id("surface") + + currentSurface = cubit.surface(currentSurfaceID) + nextSurface = cubit.surface(nextSurfaceID) + + currentSurfaceCurves = surfaceDict[currentSurfaceID]["curves"] + nextSurfaceCurves = surfaceDict[nextSurfaceID]["curves"] + + currentSurfaceVerteces = surfaceDict[currentSurfaceID]["verts"] + nextSurfaceVerteces = surfaceDict[nextSurfaceID]["verts"] + + spanwiseSplinesForAvolume.append( + spanwiseSplinesForAvolume[0] + ) # Make list circle back + + transverseSurfaceIDs = [] for iCurve in range(len(currentSurfaceCurves)): - cubit.cmd(f'create surface curve {currentSurfaceCurves[iCurve]} {spanwiseSplinesForAvolume[iCurve]} {nextSurfaceCurves[iCurve]} {spanwiseSplinesForAvolume[iCurve+1]}') + cubit.cmd( + f"create surface curve {currentSurfaceCurves[iCurve]} {spanwiseSplinesForAvolume[iCurve]} {nextSurfaceCurves[iCurve]} {spanwiseSplinesForAvolume[iCurve+1]}" + ) transverseSurfaceIDs.append(get_last_id("surface")) - - surfName=cubit.get_entity_name("surface", currentSurface.id()).split('_') - regex = re.compile('layer') + + surfName = cubit.get_entity_name("surface", currentSurface.id()).split("_") + regex = re.compile("layer") layerName = [string for string in surfName if re.match(regex, string)][0] - stringName=layerName+'_bottomFace' + stringName = layerName + "_bottomFace" cubit.cmd(f'surface {transverseSurfaceIDs[0]} rename "{stringName}"') - stringName=layerName+'_topFace' + stringName = layerName + "_topFace" cubit.cmd(f'surface {transverseSurfaceIDs[2]} rename "{stringName}"') - - #cubit.cmd(f'save as "python1.cub" overwrite') - #raise Exception(f'Volume "{volumeName}" creation failed') - #Create Volume - #nStart=get_last_id("volume") - cubit.cmd(f'create volume surface {currentSurfaceIDcopy} {nextSurfaceIDcopy} {l2s(transverseSurfaceIDs)} noheal') - #nEnd=get_last_id("volume") - #print(f'nStart: {nStart}, nEnd: {nEnd}') - - if 'Station'+str(iStationEnd) in cubit.get_entity_name("surface",nextSurfaceID): #This if statement is needed for componets that may have been droped between the last station and the second to last station - volumeName=cubit.get_entity_name("surface",nextSurfaceID) + + # cubit.cmd(f'save as "python1.cub" overwrite') + # raise Exception(f'Volume "{volumeName}" creation failed') + # Create Volume + # nStart=get_last_id("volume") + cubit.cmd( + f"create volume surface {currentSurfaceIDcopy} {nextSurfaceIDcopy} {l2s(transverseSurfaceIDs)} noheal" + ) + # nEnd=get_last_id("volume") + # print(f'nStart: {nStart}, nEnd: {nEnd}') + + if "Station" + str(iStationEnd) in cubit.get_entity_name( + "surface", nextSurfaceID + ): # This if statement is needed for componets that may have been droped between the last station and the second to last station + volumeName = cubit.get_entity_name("surface", nextSurfaceID) else: - volumeName=cubit.get_entity_name("surface",currentSurfaceID) - if len(cubit.volume(get_last_id("volume")).surfaces())<6: - print(f'\n\n ERROR with:\n\n create volume surface {currentSurfaceIDcopy} {nextSurfaceIDcopy} {l2s(transverseSurfaceIDs)} ') - print(f'currentSurfaceIDcopy: {currentSurfaceIDcopy}') - print(f'nextSurfaceIDcopy: {nextSurfaceIDcopy}') - print(f'spanwiseSplinesForAvolume: {spanwiseSplinesForAvolume}') - cubit.cmd(f'save as "python1.cub" overwrite') + volumeName = cubit.get_entity_name("surface", currentSurfaceID) + if len(cubit.volume(get_last_id("volume")).surfaces()) < 6: + print( + f"\n\n ERROR with:\n\n create volume surface {currentSurfaceIDcopy} {nextSurfaceIDcopy} {l2s(transverseSurfaceIDs)} " + ) + print(f"currentSurfaceIDcopy: {currentSurfaceIDcopy}") + print(f"nextSurfaceIDcopy: {nextSurfaceIDcopy}") + print(f"spanwiseSplinesForAvolume: {spanwiseSplinesForAvolume}") + cubit.cmd(f'save as "python1.cub" overwrite') raise Exception(f'Volume "{volumeName}" creation failed') - - volumeName=volumeName.replace('surface','volume') + + volumeName = volumeName.replace("surface", "volume") cubit.cmd(f'volume {get_last_id("volume")} rename "{volumeName}"') - -def getspanwiseSplinesForAvolume(iSpan,nCrossSections,spanwiseSplinesForOneSurface,nextSurfaceVerteces): - #Split off spanwise curves for a single volume and store them - if iSpan1: - for iSpan in range(nCrossSections-1): + + +def makeAeroshell(surfaceDict, orderedList, meshVolList, iStationEnd): + # nIntervals=3 + spanwiseSplines = makeSpanwiseSplines(surfaceDict, orderedList) + nCrossSections = len(orderedList[0]) + nPartSurfaceIDs = len(orderedList) + if nCrossSections > 1: + for iSpan in range(nCrossSections - 1): for partSurfaceIDs in range(nPartSurfaceIDs): - currentSurfaceID=orderedList[partSurfaceIDs][iSpan] - nextSurfaceID=orderedList[partSurfaceIDs][iSpan+1] - spanwiseSplinesForAvolume,spanwiseSplines[partSurfaceIDs]=getspanwiseSplinesForAvolume(iSpan,nCrossSections,spanwiseSplines[partSurfaceIDs],surfaceDict[nextSurfaceID]['verts']) - makeOneVolume(currentSurfaceID,nextSurfaceID,spanwiseSplinesForAvolume,surfaceDict,iStationEnd) + currentSurfaceID = orderedList[partSurfaceIDs][iSpan] + nextSurfaceID = orderedList[partSurfaceIDs][iSpan + 1] + ( + spanwiseSplinesForAvolume, + spanwiseSplines[partSurfaceIDs], + ) = getspanwiseSplinesForAvolume( + iSpan, + nCrossSections, + spanwiseSplines[partSurfaceIDs], + surfaceDict[nextSurfaceID]["verts"], + ) + makeOneVolume( + currentSurfaceID, + nextSurfaceID, + spanwiseSplinesForAvolume, + surfaceDict, + iStationEnd, + ) meshVolList.append(get_last_id("volume")) - #assignIntervals(get_last_id("volume"),nIntervals) - + # assignIntervals(get_last_id("volume"),nIntervals) return meshVolList -def verifyWebCuttingAmplitude(blade,amplitude,tolerance,iStationFirstWeb,iStationLastWeb): - #Check to make sure that the amplitude does not result sharp volumes by cutting near a station location - for iStationCheck in range(iStationFirstWeb+1,iStationLastWeb+1): - bladeSegmentLength=blade.ispan[iStationCheck]-blade.ispan[iStationFirstWeb] - gap=bladeSegmentLength-amplitude - #print(f'bladeSegmentLength: {bladeSegmentLength}\ngap {gap}') +def verifyWebCuttingAmplitude( + blade, amplitude, tolerance, iStationFirstWeb, iStationLastWeb +): + # Check to make sure that the amplitude does not result sharp volumes by cutting near a station location + for iStationCheck in range(iStationFirstWeb + 1, iStationLastWeb + 1): + bladeSegmentLength = blade.ispan[iStationCheck] - blade.ispan[iStationFirstWeb] + gap = bladeSegmentLength - amplitude + # print(f'bladeSegmentLength: {bladeSegmentLength}\ngap {gap}') if abs(gap) > tolerance: break else: if gap > 0: - amplitude=bladeSegmentLength-tolerance + amplitude = bladeSegmentLength - tolerance else: - amplitude=bladeSegmentLength+tolerance + amplitude = bladeSegmentLength + tolerance break - #print(f'new amplitude {amplitude} \nnew gap = {bladeSegmentLength-amplitude}') + # print(f'new amplitude {amplitude} \nnew gap = {bladeSegmentLength-amplitude}') return amplitude -def makeBirdsMouth(blade,birdsMouthVerts,amplitudeFraction,iStationFirstWeb,iStationLastWeb): + +def makeBirdsMouth( + blade, birdsMouthVerts, amplitudeFraction, iStationFirstWeb, iStationLastWeb +): ### Make birds mouth volume that will cut the web volumes ### ############################################################# - - #This function must be ran before merging the volumes since "birdsMouthVerts" will change during mergeing - - v1=cubit.vertex(birdsMouthVerts[0]) - v2=cubit.vertex(birdsMouthVerts[1]) - distance=getDist(v1.coordinates(),v2.coordinates())[0] - create_curve(v1,v2) - #Make the birds mouth cut-out start 5% from where the web meets the aeroshell - cubit.cmd(f'create vertex on curve {get_last_id("curve")} distance {0.05*distance} from start') - cubit.cmd(f'create vertex on curve {get_last_id("curve")} distance {0.05*distance} from end') - v1=cubit.vertex(get_last_id("vertex")-1) - v2=cubit.vertex(get_last_id("vertex")) - straightLine=create_curve(v1,v2) - - - - amplitude=amplitudeFraction*distance - tolerance=distance*0.05 - - amplitude=verifyWebCuttingAmplitude(blade,amplitude,tolerance,iStationFirstWeb,iStationLastWeb) - - - - curvedLine=cubit.curve(printSineCurveBetweenTwoVerts(v1.id(),v2.id(),amplitude,'z')) - cubit.cmd(f'create surface skin curve {curvedLine.id()} {straightLine.id()}') - baseSurface=get_last_id("surface") - - midPoint=list(curvedLine.position_from_fraction(0.5)) - tangent=straightLine.tangent(midPoint) - - #Get the cross-section normal - parseString=f'in surface with name "*webStation{iStationFirstWeb}*"' - surfaceID=parse_cubit_list('surface', parseString)[0] #Pick the first surface in this list since all on same plane - coords=cubit.get_center_point("surface", surfaceID) - surfaceNormal=cubit.surface(surfaceID).normal_at(coords) - cutBlockLength=5*max(blade.ichord) - sweepDirection=np.array(vectNorm(crossProd(list(tangent),list(surfaceNormal)))) - - cubit.cmd(f'sweep surface {baseSurface} direction {l2s(sweepDirection)} distance {cutBlockLength}') - cubit.cmd(f'move volume {get_last_id("volume")} x {-cutBlockLength/2*sweepDirection[0]} y {-cutBlockLength/2*sweepDirection[1]} z {-cutBlockLength/2*sweepDirection[2]}') - - - cuttingVolume=get_last_id("volume") - - - parseString=f'with name "*webStation*"' - webVolumes=parse_cubit_list('volume', parseString) - - cubit.cmd(f'subtract volume {cuttingVolume} from volume {l2s(webVolumes)}') + # This function must be ran before merging the volumes since "birdsMouthVerts" will change during mergeing + + v1 = cubit.vertex(birdsMouthVerts[0]) + v2 = cubit.vertex(birdsMouthVerts[1]) + distance = getDist(v1.coordinates(), v2.coordinates())[0] + create_curve(v1, v2) + + # Make the birds mouth cut-out start 5% from where the web meets the aeroshell + cubit.cmd( + f'create vertex on curve {get_last_id("curve")} distance {0.05*distance} from start' + ) + cubit.cmd( + f'create vertex on curve {get_last_id("curve")} distance {0.05*distance} from end' + ) + v1 = cubit.vertex(get_last_id("vertex") - 1) + v2 = cubit.vertex(get_last_id("vertex")) + straightLine = create_curve(v1, v2) + + amplitude = amplitudeFraction * distance + tolerance = distance * 0.05 + + amplitude = verifyWebCuttingAmplitude( + blade, amplitude, tolerance, iStationFirstWeb, iStationLastWeb + ) + + curvedLine = cubit.curve( + printSineCurveBetweenTwoVerts(v1.id(), v2.id(), amplitude, "z") + ) + cubit.cmd(f"create surface skin curve {curvedLine.id()} {straightLine.id()}") + baseSurface = get_last_id("surface") + + midPoint = list(curvedLine.position_from_fraction(0.5)) + tangent = straightLine.tangent(midPoint) + + # Get the cross-section normal + parseString = f'in surface with name "*webStation{iStationFirstWeb}*"' + surfaceID = parse_cubit_list("surface", parseString)[ + 0 + ] # Pick the first surface in this list since all on same plane + coords = cubit.get_center_point("surface", surfaceID) + surfaceNormal = cubit.surface(surfaceID).normal_at(coords) + cutBlockLength = 5 * max(blade.ichord) + sweepDirection = np.array(vectNorm(crossProd(list(tangent), list(surfaceNormal)))) + + cubit.cmd( + f"sweep surface {baseSurface} direction {l2s(sweepDirection)} distance {cutBlockLength}" + ) + cubit.cmd( + f'move volume {get_last_id("volume")} x {-cutBlockLength/2*sweepDirection[0]} y {-cutBlockLength/2*sweepDirection[1]} z {-cutBlockLength/2*sweepDirection[2]}' + ) + + cuttingVolume = get_last_id("volume") + + parseString = f'with name "*webStation*"' + webVolumes = parse_cubit_list("volume", parseString) + + cubit.cmd(f"subtract volume {cuttingVolume} from volume {l2s(webVolumes)}") return +# cubit.cmd('open "/home/ecamare/myprojects/bar/cubitDev/python/python0.cub"') -#cubit.cmd('open "/home/ecamare/myprojects/bar/cubitDev/python/python0.cub"') def getApproximateThicknessDirectionForVolume(volumeID): - #This function is used when assigning material orientations - - #Get thickness direction tangents - approximateThicknessDirection=[] + # This function is used when assigning material orientations + + # Get thickness direction tangents + approximateThicknessDirection = [] for currentCurve in cubit.volume(volumeID).curves(): - curveName=cubit.get_entity_name("curve", currentCurve.id()) - if 'layerThickness' in curveName: - coords=currentCurve.position_from_fraction(0.5) + curveName = cubit.get_entity_name("curve", currentCurve.id()) + if "layerThickness" in curveName: + coords = currentCurve.position_from_fraction(0.5) approximateThicknessDirection.append(currentCurve.tangent(coords)) - approximateThicknessDirection=np.array(approximateThicknessDirection) - nThicknessCurves,_ =approximateThicknessDirection.shape + approximateThicknessDirection = np.array(approximateThicknessDirection) + nThicknessCurves, _ = approximateThicknessDirection.shape - if nThicknessCurves==4: #All other cases - return np.mean(approximateThicknessDirection,0) - elif nThicknessCurves ==8: #LE adhesive case and round TE adhesive + if nThicknessCurves == 4: # All other cases + return np.mean(approximateThicknessDirection, 0) + elif nThicknessCurves == 8: # LE adhesive case and round TE adhesive return 0 - elif nThicknessCurves ==6: #Web overwrap + elif nThicknessCurves == 6: # Web overwrap # Take the mean of all curves with name 'layerThickness' - mean=np.mean(approximateThicknessDirection,0) - - errorList=[] + mean = np.mean(approximateThicknessDirection, 0) + + errorList = [] for i in range(nThicknessCurves): - diff=approximateThicknessDirection[i]-mean + diff = approximateThicknessDirection[i] - mean - errorList.append(sqrt(dotProd(diff,diff))) - sortIndex=np.argsort(errorList)[:4] #Take the first four. This discards the two directions with the largest deviation from the average + errorList.append(sqrt(dotProd(diff, diff))) + sortIndex = np.argsort(errorList)[ + :4 + ] # Take the first four. This discards the two directions with the largest deviation from the average - return np.mean(approximateThicknessDirection[sortIndex,:],0) + return np.mean(approximateThicknessDirection[sortIndex, :], 0) else: - raise ValueError('The number of thickness curves in volume is unexpected. Cannot assign material orientation' ) - + raise ValueError( + "The number of thickness curves in volume is unexpected. Cannot assign material orientation" + ) + return -def getMatOriSurface(volumeID,spanwiseMatOriCurve): - #This function is used when assigning material orientations - #This gets returns the surface within a volume that will be used to get surface normals. - #The sign +-1 is also returned since some of the surfaces are oriented the wrong way - - approximateThicknessDirection=getApproximateThicknessDirectionForVolume(volumeID) - - #Create a list of surface IDs in the given volume - surfaceIDs=[] - volumeSurfaces=cubit.volume(volumeID).surfaces() + +def getMatOriSurface(volumeID, spanwiseMatOriCurve): + # This function is used when assigning material orientations + # This gets returns the surface within a volume that will be used to get surface normals. + # The sign +-1 is also returned since some of the surfaces are oriented the wrong way + + approximateThicknessDirection = getApproximateThicknessDirectionForVolume(volumeID) + + # Create a list of surface IDs in the given volume + surfaceIDs = [] + volumeSurfaces = cubit.volume(volumeID).surfaces() for currentSurface in volumeSurfaces: surfaceIDs.append(currentSurface.id()) - - #Eliminate surfaces that have two curves named thickness: - surfaceCT=0 + + # Eliminate surfaces that have two curves named thickness: + surfaceCT = 0 for currentSurface in volumeSurfaces: - curveCT=0 #Counts the number of curves in the surface with name 'layerThickness' + curveCT = ( + 0 # Counts the number of curves in the surface with name 'layerThickness' + ) for currentCurve in currentSurface.curves(): - curveName=cubit.get_entity_name("curve", currentCurve.id()) - if 'layerThickness' in curveName: - curveCT+=1 + curveName = cubit.get_entity_name("curve", currentCurve.id()) + if "layerThickness" in curveName: + curveCT += 1 - if curveCT>=2: - surfaceCT+=1 + if curveCT >= 2: + surfaceCT += 1 surfaceIDs.remove(currentSurface.id()) - - - #surfaceIDs now has the list of surfaces w/o thickness curves - if len(surfaceIDs)==2 or len(surfaceIDs)==1: - if len(surfaceIDs)==2: - surfaceName=cubit.get_entity_name("surface", surfaceIDs[0]) - if 'topFace' in surfaceName: - surfaceID=surfaceIDs[0] + + # surfaceIDs now has the list of surfaces w/o thickness curves + if len(surfaceIDs) == 2 or len(surfaceIDs) == 1: + if len(surfaceIDs) == 2: + surfaceName = cubit.get_entity_name("surface", surfaceIDs[0]) + if "topFace" in surfaceName: + surfaceID = surfaceIDs[0] else: - surfaceID=surfaceIDs[-1] - elif len(surfaceIDs)==1: #Web overwrap - surfaceID=surfaceIDs[0] - - coords=cubit.get_center_point("surface", surfaceID) - surfaceNormal=cubit.surface(surfaceID).normal_at(coords) - - if dotProd(surfaceNormal,approximateThicknessDirection) >0: - sign=1.0 + surfaceID = surfaceIDs[-1] + elif len(surfaceIDs) == 1: # Web overwrap + surfaceID = surfaceIDs[0] + + coords = cubit.get_center_point("surface", surfaceID) + surfaceNormal = cubit.surface(surfaceID).normal_at(coords) + + if dotProd(surfaceNormal, approximateThicknessDirection) > 0: + sign = 1.0 else: - sign=-1.0 - elif len(surfaceIDs)==0: #LE adhesive and/or TE adhesive for round cross-sections - #print(f'~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~volumeID {volumeID}') - surfaceID=False - sign=1.0 + sign = -1.0 + elif ( + len(surfaceIDs) == 0 + ): # LE adhesive and/or TE adhesive for round cross-sections + # print(f'~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~volumeID {volumeID}') + surfaceID = False + sign = 1.0 else: - raise ValueError('The number of thickness curves in volume is unexpected. Cannot assign material orientation' ) - + raise ValueError( + "The number of thickness curves in volume is unexpected. Cannot assign material orientation" + ) return surfaceID, sign diff --git a/src/pynumad/analysis/makeModels.py b/src/pynumad/analysis/makeModels.py index b1512b1..d4032d1 100644 --- a/src/pynumad/analysis/makeModels.py +++ b/src/pynumad/analysis/makeModels.py @@ -1,95 +1,113 @@ -#import logging +# import logging import subprocess -#import os + +# import os import glob import numpy as np -def writeBeamModel(wt_name,settings,blade,mu,log,directory='.'): +def writeBeamModel(wt_name, settings, blade, mu, log, directory="."): import pynumad.analysis.beamUtils as beamUtils -# #Runs VABS or OpenSG to homogenize -# #Makes beamDyn or GEBT files - - + # #Runs VABS or OpenSG to homogenize + # #Makes beamDyn or GEBT files - radial_stations=blade.ispan/blade.ispan[-1] - nStations=len(radial_stations) + radial_stations = blade.ispan / blade.ispan[-1] + nStations = len(radial_stations) # #Run input files - if 'vabs' in settings['solver'].lower(): - - log.info(f'\n\n\nRunning VABS homogenization.') - - fileCount=0 - #First remove any lck files - pattern=directory+'/'+wt_name+'*.in' - if len(glob.glob(pattern))==0: - raise RuntimeError(f'Could not find files with pattern: {pattern}. Beam model generation failed') - MAXnLicenceTries=100 - for filePath in glob.glob(directory+'/'+wt_name+'*.in'): - fileCount+=1 + if "vabs" in settings["solver"].lower(): + log.info(f"\n\n\nRunning VABS homogenization.") + + fileCount = 0 + # First remove any lck files + pattern = directory + "/" + wt_name + "*.in" + if len(glob.glob(pattern)) == 0: + raise RuntimeError( + f"Could not find files with pattern: {pattern}. Beam model generation failed" + ) + MAXnLicenceTries = 100 + for filePath in glob.glob(directory + "/" + wt_name + "*.in"): + fileCount += 1 try: - this_cmd = 'VABS ' +filePath - log.info(f' running: {this_cmd}') + this_cmd = "VABS " + filePath + log.info(f" running: {this_cmd}") - licenseAvailable=False - nLicenceTries=0 - while not licenseAvailable and nLicenceTries <=MAXnLicenceTries-1: - subprocess.run(this_cmd, shell=True, check=True, capture_output=True) + licenseAvailable = False + nLicenceTries = 0 + while not licenseAvailable and nLicenceTries <= MAXnLicenceTries - 1: + subprocess.run( + this_cmd, shell=True, check=True, capture_output=True + ) - with open(filePath+'.ech', 'r') as f: + with open(filePath + ".ech", "r") as f: lines = f.readlines() - #log the last line of .ech file: - - if 'Congratulations! No errors' in lines[-1]: - log.info(f'****************************\n{lines[-1]}\n******************************') - licenseAvailable=True - nLicenceTries=0 - elif 'license' in lines[-1].lower(): - nLicenceTries+=1 - log.info(f'****************************\nnLicenceTries: {nLicenceTries}, {lines[-1]}\n******************************') + # log the last line of .ech file: + + if "Congratulations! No errors" in lines[-1]: + log.info( + f"****************************\n{lines[-1]}\n******************************" + ) + licenseAvailable = True + nLicenceTries = 0 + elif "license" in lines[-1].lower(): + nLicenceTries += 1 + log.info( + f"****************************\nnLicenceTries: {nLicenceTries}, {lines[-1]}\n******************************" + ) else: - log.error(f'****************************\n{lines[-1]}\n******************************') - raise Exception(f'Cross-sectional homogenization for file {filePath} failed due to: \n {lines[-1]} \n Beam model creation failed.') - if nLicenceTries ==MAXnLicenceTries: - string=f'License failed to be obtained after {MAXnLicenceTries} tries. Beam model creation failed.' - log.error(string) - raise Exception(string) + log.error( + f"****************************\n{lines[-1]}\n******************************" + ) + raise Exception( + f"Cross-sectional homogenization for file {filePath} failed due to: \n {lines[-1]} \n Beam model creation failed." + ) + if nLicenceTries == MAXnLicenceTries: + string = f"License failed to be obtained after {MAXnLicenceTries} tries. Beam model creation failed." + log.error(string) + raise Exception(string) except subprocess.CalledProcessError as e: - log.error(f'Error running {this_cmd}: {e}') - + log.error(f"Error running {this_cmd}: {e}") + # if fileCount != nStations: # raise Exception('Error. Not enough VABS input files:') - elif 'anba' in settings['solver'].lower(): - raise ValueError('ANBA currently not supported') - + elif "anba" in settings["solver"].lower(): + raise ValueError("ANBA currently not supported") + ### Read inputs + extension = "K" -### Read inputs - extension='K' + blade.ispan = np.delete( + blade.ispan, -2 + ) # TEMP Need to delete the added station near tip + blade.idegreestwist = np.delete( + blade.idegreestwist, -2 + ) # TEMP Need to delete the added station near tip - blade.ispan=np.delete(blade.ispan,-2) #TEMP Need to delete the added station near tip - blade.idegreestwist=np.delete(blade.idegreestwist,-2) #TEMP Need to delete the added station near tip - - radial_stations=blade.ispan/blade.ispan[-1] + radial_stations = blade.ispan / blade.ispan[-1] beam_stiff = np.zeros([len(radial_stations), 6, 6]) beam_inertia = np.zeros([len(radial_stations), 6, 6]) - - - for fileName in glob.glob(directory+'/'+wt_name+"*." +extension): - iStation=int(fileName.split('-')[-3].split('.')[0]) - print(f'fileName {fileName} iStation {iStation}') - - beam_stiff[iStation,:,:],beam_inertia[iStation,:,:]=beamUtils.readVABShomogenization(fileName) - - - if 'beamdyn' in settings['beamSolver'].lower(): - beam_stiff,beam_inertia=beamUtils.transformMatrixToBeamDyn(beam_stiff,beam_inertia) - axisFileName=beamUtils.write_beamdyn_axis(directory, wt_name, blade,radial_stations) - propFileName=beamUtils.write_beamdyn_prop(directory, wt_name, radial_stations, beam_stiff, beam_inertia, mu) - return [axisFileName,propFileName] \ No newline at end of file + for fileName in glob.glob(directory + "/" + wt_name + "*." + extension): + iStation = int(fileName.split("-")[-3].split(".")[0]) + print(f"fileName {fileName} iStation {iStation}") + + ( + beam_stiff[iStation, :, :], + beam_inertia[iStation, :, :], + ) = beamUtils.readVABShomogenization(fileName) + + if "beamdyn" in settings["beamSolver"].lower(): + beam_stiff, beam_inertia = beamUtils.transformMatrixToBeamDyn( + beam_stiff, beam_inertia + ) + axisFileName = beamUtils.write_beamdyn_axis( + directory, wt_name, blade, radial_stations + ) + propFileName = beamUtils.write_beamdyn_prop( + directory, wt_name, radial_stations, beam_stiff, beam_inertia, mu + ) + return [axisFileName, propFileName] diff --git a/src/pynumad/io/excel_to_blade.py b/src/pynumad/io/excel_to_blade.py index e951f37..6bd72c4 100644 --- a/src/pynumad/io/excel_to_blade.py +++ b/src/pynumad/io/excel_to_blade.py @@ -9,98 +9,101 @@ data_dir = DATA_PATH + def excel_to_blade(blade, filename): """ - xlsBlade Construct BladeDef object with inputs from spreadsheet. + xlsBlade Construct BladeDef object with inputs from spreadsheet. Parameters ---------- filename : string path to Excelfile - + Returns ------- blade : Blade blade object populated by Excel file - + Example ------- blade = xlsBlade(FILENAME) """ - + MPa_to_Pa = 1000000.0 # Dictionary containing column indices xls_dict = {} - xls_dict['geom'] = {} - xls_dict['geom']['datarow1'] = 6 - xls_dict['geom']['span'] = 0 - xls_dict['geom']['twist'] = 1 - xls_dict['geom']['chord'] = 2 - xls_dict['geom']['thick'] = 3 - xls_dict['geom']['offset'] = 4 - xls_dict['geom']['aerocenter'] = 5 - xls_dict['geom']['afspan'] = 7 - xls_dict['geom']['afname'] = 8 - xls_dict['geom']['ispan'] = 10 - - xls_dict['cmpt'] = {} - xls_dict['cmpt']['paramcol'] = 2 - xls_dict['cmpt']['paramrow1'] = 1 - xls_dict['cmpt']['datarow1'] = 6 - xls_dict['cmpt']['group'] = 0 - xls_dict['cmpt']['name'] = 1 - xls_dict['cmpt']['matid'] = 2 - xls_dict['cmpt']['angle'] = 3 - xls_dict['cmpt']['hpext'] = 4 - xls_dict['cmpt']['lpext'] = 5 - xls_dict['cmpt']['cpspan'] = 6 - xls_dict['cmpt']['cpnlay'] = 7 - xls_dict['cmpt']['imethod'] = 8 - - xls_dict['mtrl'] = {} - xls_dict['mtrl']['datarow1'] = 3 - xls_dict['mtrl']['id'] = 0 - xls_dict['mtrl']['name'] = 1 - xls_dict['mtrl']['type'] = 2 - xls_dict['mtrl']['thickness'] = 3 - xls_dict['mtrl']['ex'] = 4 - xls_dict['mtrl']['ey'] = 5 - xls_dict['mtrl']['ez'] = 6 - xls_dict['mtrl']['gxy'] = 7 - xls_dict['mtrl']['gyz'] = 8 - xls_dict['mtrl']['gxz'] = 9 - xls_dict['mtrl']['prxy'] = 10 - xls_dict['mtrl']['pryz'] = 11 - xls_dict['mtrl']['prxz'] = 12 - xls_dict['mtrl']['density'] = 13 - xls_dict['mtrl']['drydensity'] = 14 - xls_dict['mtrl']['uts'] = 15 - xls_dict['mtrl']['ucs'] = 16 - xls_dict['mtrl']['reference'] = 17 + xls_dict["geom"] = {} + xls_dict["geom"]["datarow1"] = 6 + xls_dict["geom"]["span"] = 0 + xls_dict["geom"]["twist"] = 1 + xls_dict["geom"]["chord"] = 2 + xls_dict["geom"]["thick"] = 3 + xls_dict["geom"]["offset"] = 4 + xls_dict["geom"]["aerocenter"] = 5 + xls_dict["geom"]["afspan"] = 7 + xls_dict["geom"]["afname"] = 8 + xls_dict["geom"]["ispan"] = 10 + + xls_dict["cmpt"] = {} + xls_dict["cmpt"]["paramcol"] = 2 + xls_dict["cmpt"]["paramrow1"] = 1 + xls_dict["cmpt"]["datarow1"] = 6 + xls_dict["cmpt"]["group"] = 0 + xls_dict["cmpt"]["name"] = 1 + xls_dict["cmpt"]["matid"] = 2 + xls_dict["cmpt"]["angle"] = 3 + xls_dict["cmpt"]["hpext"] = 4 + xls_dict["cmpt"]["lpext"] = 5 + xls_dict["cmpt"]["cpspan"] = 6 + xls_dict["cmpt"]["cpnlay"] = 7 + xls_dict["cmpt"]["imethod"] = 8 + + xls_dict["mtrl"] = {} + xls_dict["mtrl"]["datarow1"] = 3 + xls_dict["mtrl"]["id"] = 0 + xls_dict["mtrl"]["name"] = 1 + xls_dict["mtrl"]["type"] = 2 + xls_dict["mtrl"]["thickness"] = 3 + xls_dict["mtrl"]["ex"] = 4 + xls_dict["mtrl"]["ey"] = 5 + xls_dict["mtrl"]["ez"] = 6 + xls_dict["mtrl"]["gxy"] = 7 + xls_dict["mtrl"]["gyz"] = 8 + xls_dict["mtrl"]["gxz"] = 9 + xls_dict["mtrl"]["prxy"] = 10 + xls_dict["mtrl"]["pryz"] = 11 + xls_dict["mtrl"]["prxz"] = 12 + xls_dict["mtrl"]["density"] = 13 + xls_dict["mtrl"]["drydensity"] = 14 + xls_dict["mtrl"]["uts"] = 15 + xls_dict["mtrl"]["ucs"] = 16 + xls_dict["mtrl"]["reference"] = 17 ## GEOMETRY # Read the Geometry tab of the xls file - num = pd.read_excel(filename, sheet_name = 'Geometry', header = None) - txt = pd.read_excel(filename, sheet_name = 'Geometry', dtype=str, header = None) + num = pd.read_excel(filename, sheet_name="Geometry", header=None) + txt = pd.read_excel(filename, sheet_name="Geometry", dtype=str, header=None) - if txt.iloc[1,1] == 'T': + if txt.iloc[1, 1] == "T": blade.naturaloffset = 1 else: blade.naturaloffset = 0 - - if txt.iloc[2,1] == 'CW': + + if txt.iloc[2, 1] == "CW": blade.rotorspin = 1 else: - blade.rotorspin = - 1 - - if txt.iloc[3,1] == 'T': + blade.rotorspin = -1 + + if txt.iloc[3, 1] == "T": blade.swtwisted = 1 else: blade.swtwisted = 0 - - blade.span = np.array(num.iloc[xls_dict['geom']['datarow1']:,xls_dict['geom']['span']],dtype=float) + + blade.span = np.array( + num.iloc[xls_dict["geom"]["datarow1"] :, xls_dict["geom"]["span"]], dtype=float + ) # Next two lines handle the case where the spreadsheet tab has data # elsewhere on the tab below the last span value nans = np.nonzero(np.isnan(blade.span)) @@ -112,58 +115,90 @@ def excel_to_blade(blade, filename): blade.span = blade.span[0:lastrow] if np.any(np.isnan(blade.span)): raise Exception('xlsBlade: span column must not have "holes" in data') - - lastrow = lastrow + xls_dict['geom']['datarow1'] - blade.degreestwist = np.array(num.iloc[xls_dict['geom']['datarow1']:lastrow,xls_dict['geom']['twist']],dtype= float) - blade.chord = np.array(num.iloc[xls_dict['geom']['datarow1']:lastrow,xls_dict['geom']['chord']],dtype=float) - blade.percentthick = np.array(num.iloc[xls_dict['geom']['datarow1']:lastrow,xls_dict['geom']['thick']], dtype=float) - blade.chordoffset = np.array(num.iloc[xls_dict['geom']['datarow1']:lastrow,xls_dict['geom']['offset']], dtype=float) - blade.aerocenter = np.array(num.iloc[xls_dict['geom']['datarow1']:lastrow,xls_dict['geom']['aerocenter']], dtype=float) + + lastrow = lastrow + xls_dict["geom"]["datarow1"] + blade.degreestwist = np.array( + num.iloc[xls_dict["geom"]["datarow1"] : lastrow, xls_dict["geom"]["twist"]], + dtype=float, + ) + blade.chord = np.array( + num.iloc[xls_dict["geom"]["datarow1"] : lastrow, xls_dict["geom"]["chord"]], + dtype=float, + ) + blade.percentthick = np.array( + num.iloc[xls_dict["geom"]["datarow1"] : lastrow, xls_dict["geom"]["thick"]], + dtype=float, + ) + blade.chordoffset = np.array( + num.iloc[xls_dict["geom"]["datarow1"] : lastrow, xls_dict["geom"]["offset"]], + dtype=float, + ) + blade.aerocenter = np.array( + num.iloc[ + xls_dict["geom"]["datarow1"] : lastrow, xls_dict["geom"]["aerocenter"] + ], + dtype=float, + ) blade.sweep = np.zeros(blade.span.shape) blade.prebend = np.zeros(blade.span.shape) - props = ['degreestwist','chord','percentthick','chordoffset','aerocenter'] + props = ["degreestwist", "chord", "percentthick", "chordoffset", "aerocenter"] for prop in props: # For each of the input properties, interpolate where ever values # are missing. - ind = np.isnan(getattr(blade,prop)) + ind = np.isnan(getattr(blade, prop)) if np.any(ind): - if not prop == 'percentthick' : - getattr(blade,prop)[ind] = interpolator_wrap( - np.delete(blade.span, ind),np.delete(getattr(blade,prop), ind), - blade.span[ind],'pchip' - ) + if not prop == "percentthick": + getattr(blade, prop)[ind] = interpolator_wrap( + np.delete(blade.span, ind), + np.delete(getattr(blade, prop), ind), + blade.span[ind], + "pchip", + ) else: # jcb: note that blade.chord must be interpolated before # blade.percentthick - absthick = np.multiply(blade.percentthick,blade.chord) / 100 + absthick = np.multiply(blade.percentthick, blade.chord) / 100 iabsthick = interpolator_wrap( - np.delete(blade.span, ind),np.delete(absthick,ind),blade.span[ind],'pchip' - ) + np.delete(blade.span, ind), + np.delete(absthick, ind), + blade.span[ind], + "pchip", + ) blade.percentthick[ind] = iabsthick / blade.chord[ind] * 100 # The next two lines report when a property has been # interpolated. # rowstr = sprintf('#d,',find(ind==1)); # fprintf('Interpolating "#s" on rows [#s]\n',props{k},rowstr(1:end-1)) - - afspan = np.array(num.iloc[xls_dict['geom']['datarow1']:,xls_dict['geom']['afspan']],dtype=float) + + afspan = np.array( + num.iloc[xls_dict["geom"]["datarow1"] :, xls_dict["geom"]["afspan"]], + dtype=float, + ) nans = np.nonzero(np.isnan(afspan)) if nans: lastrow = nans[0][0] else: lastrow = blade.span.shape[0] afspan = afspan[0:lastrow] - lastrow = lastrow + xls_dict['geom']['datarow1'] - afname = list(txt.iloc[xls_dict['geom']['datarow1']:lastrow,xls_dict['geom']['afname']]) + lastrow = lastrow + xls_dict["geom"]["datarow1"] + afname = list( + txt.iloc[xls_dict["geom"]["datarow1"] : lastrow, xls_dict["geom"]["afname"]] + ) for k in range(len(afspan)): if afspan[k] < np.amin(blade.span) or afspan[k] > np.amax(blade.span): - raise Exception('xlsBlade: location of airfoil #%d is outside given span distribution',k) - affile = data_dir + '/airfoils/{}.txt'.format(afname[k]) - blade.addStation(affile,afspan[k]) - blade.stations[-1].airfoil.resample(175,'cosine') - + raise Exception( + "xlsBlade: location of airfoil #%d is outside given span distribution", + k, + ) + affile = data_dir + "/airfoils/{}.txt".format(afname[k]) + blade.addStation(affile, afspan[k]) + blade.stations[-1].airfoil.resample(175, "cosine") + # afdb = np.array([blade.stations.airfoil]) # afdb.resample(175,'cosine') - blade.ispan = np.array(num.iloc[xls_dict['geom']['datarow1']:,xls_dict['geom']['ispan']],dtype=float) + blade.ispan = np.array( + num.iloc[xls_dict["geom"]["datarow1"] :, xls_dict["geom"]["ispan"]], dtype=float + ) nans = np.nonzero(np.isnan(blade.ispan)) try: lastrow = nans[0][0] @@ -174,66 +209,138 @@ def excel_to_blade(blade, filename): ## COMPONENTS # Read the Components tab of the xls file - num = pd.read_excel(filename, sheet_name = 'Components', header = None) - txt = pd.read_excel(filename, sheet_name = 'Components', dtype=str, header = None) - raw = pd.read_excel(filename, sheet_name = 'Components', dtype=object, header = None) + num = pd.read_excel(filename, sheet_name="Components", header=None) + txt = pd.read_excel(filename, sheet_name="Components", dtype=str, header=None) + raw = pd.read_excel(filename, sheet_name="Components", dtype=object, header=None) - blade.sparcapwidth = num.iloc[xls_dict['cmpt']['paramrow1'],xls_dict['cmpt']['paramcol']] - blade.leband = num.iloc[xls_dict['cmpt']['paramrow1'] + 1,xls_dict['cmpt']['paramcol']] - blade.teband = num.iloc[xls_dict['cmpt']['paramrow1'] + 2,xls_dict['cmpt']['paramcol']] + blade.sparcapwidth = num.iloc[ + xls_dict["cmpt"]["paramrow1"], xls_dict["cmpt"]["paramcol"] + ] + blade.leband = num.iloc[ + xls_dict["cmpt"]["paramrow1"] + 1, xls_dict["cmpt"]["paramcol"] + ] + blade.teband = num.iloc[ + xls_dict["cmpt"]["paramrow1"] + 2, xls_dict["cmpt"]["paramcol"] + ] # ble: <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<< - blade.sparcapoffset = num.iloc[xls_dict['cmpt']['paramrow1'] + 3,xls_dict['cmpt']['paramcol']] + blade.sparcapoffset = num.iloc[ + xls_dict["cmpt"]["paramrow1"] + 3, xls_dict["cmpt"]["paramcol"] + ] if np.isnan(blade.sparcapoffset): blade.sparcapoffset = 0 - + # ble: >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> blade.components = [] - N = num.shape[0] - xls_dict['cmpt']['datarow1'] + N = num.shape[0] - xls_dict["cmpt"]["datarow1"] for k in range(N): comp = Component() - comp.group = num.iloc[xls_dict['cmpt']['datarow1'] + k,xls_dict['cmpt']['group']] - comp.name = txt.iloc[xls_dict['cmpt']['datarow1'] + k,xls_dict['cmpt']['name']] - comp.materialid = num.iloc[xls_dict['cmpt']['datarow1'] + k,xls_dict['cmpt']['matid']] - comp.fabricangle = readnumlist(raw.iloc[xls_dict['cmpt']['datarow1'] + k,xls_dict['cmpt']['angle']]) - comp.hpextents = readstrlist(txt.iloc[xls_dict['cmpt']['datarow1'] + k,xls_dict['cmpt']['hpext']]) - comp.lpextents = readstrlist(txt.iloc[xls_dict['cmpt']['datarow1'] + k,xls_dict['cmpt']['lpext']]) - comp.cp = readnumlist(raw.iloc[xls_dict['cmpt']['datarow1'] + k,xls_dict['cmpt']['cpspan']]) - comp.cp = np.stack((comp.cp,readnumlist(raw.iloc[xls_dict['cmpt']['datarow1'] + k,xls_dict['cmpt']['cpnlay']])),axis=1) - comp.imethod = txt.iloc[xls_dict['cmpt']['datarow1'] + k,xls_dict['cmpt']['imethod']] - if not np.any(len(comp.hpextents) == np.array([0,1,2])) : - raise Exception('xlsBlade: component #%d, length of hpextents must be 0, 1, or 2',k + 1) - if not np.any(len(comp.lpextents) == np.array([0,1,2])) : - raise Exception('xlsBlade: component #%d, length of lpextents must be 0, 1, or 2',k + 1) + comp.group = num.iloc[ + xls_dict["cmpt"]["datarow1"] + k, xls_dict["cmpt"]["group"] + ] + comp.name = txt.iloc[xls_dict["cmpt"]["datarow1"] + k, xls_dict["cmpt"]["name"]] + comp.materialid = num.iloc[ + xls_dict["cmpt"]["datarow1"] + k, xls_dict["cmpt"]["matid"] + ] + comp.fabricangle = readnumlist( + raw.iloc[xls_dict["cmpt"]["datarow1"] + k, xls_dict["cmpt"]["angle"]] + ) + comp.hpextents = readstrlist( + txt.iloc[xls_dict["cmpt"]["datarow1"] + k, xls_dict["cmpt"]["hpext"]] + ) + comp.lpextents = readstrlist( + txt.iloc[xls_dict["cmpt"]["datarow1"] + k, xls_dict["cmpt"]["lpext"]] + ) + comp.cp = readnumlist( + raw.iloc[xls_dict["cmpt"]["datarow1"] + k, xls_dict["cmpt"]["cpspan"]] + ) + comp.cp = np.stack( + ( + comp.cp, + readnumlist( + raw.iloc[ + xls_dict["cmpt"]["datarow1"] + k, xls_dict["cmpt"]["cpnlay"] + ] + ), + ), + axis=1, + ) + comp.imethod = txt.iloc[ + xls_dict["cmpt"]["datarow1"] + k, xls_dict["cmpt"]["imethod"] + ] + if not np.any(len(comp.hpextents) == np.array([0, 1, 2])): + raise Exception( + "xlsBlade: component #%d, length of hpextents must be 0, 1, or 2", k + 1 + ) + if not np.any(len(comp.lpextents) == np.array([0, 1, 2])): + raise Exception( + "xlsBlade: component #%d, length of lpextents must be 0, 1, or 2", k + 1 + ) blade.components.append(comp) - + ## MATERIALS # Read the Materials tab of the xls file - num = pd.read_excel(filename, sheet_name = 'Materials', header = None) - txt = pd.read_excel(filename, sheet_name = 'Materials', dtype=str, header = None) + num = pd.read_excel(filename, sheet_name="Materials", header=None) + txt = pd.read_excel(filename, sheet_name="Materials", dtype=str, header=None) blade.materials = [] - N = num.shape[0] - xls_dict['mtrl']['datarow1'] + N = num.shape[0] - xls_dict["mtrl"]["datarow1"] for k in range(N): mat = Material() - mat.name = txt.iloc[xls_dict['mtrl']['datarow1'] + k,xls_dict['mtrl']['name']] - mat.type = txt.iloc[xls_dict['mtrl']['datarow1'] + k,xls_dict['mtrl']['type']] - mat.layerthickness = num.iloc[xls_dict['mtrl']['datarow1'] + k,xls_dict['mtrl']['thickness']] - mat.ex = MPa_to_Pa * num.iloc[xls_dict['mtrl']['datarow1'] + k,xls_dict['mtrl']['ex']] - mat.prxy = num.iloc[xls_dict['mtrl']['datarow1'] + k,xls_dict['mtrl']['prxy']] - mat.density = num.iloc[xls_dict['mtrl']['datarow1'] + k,xls_dict['mtrl']['density']] - mat.drydensity = num.iloc[xls_dict['mtrl']['datarow1'] + k,xls_dict['mtrl']['drydensity']] - mat.uts = MPa_to_Pa * num.iloc[xls_dict['mtrl']['datarow1'] + k,xls_dict['mtrl']['uts']] - mat.ucs = MPa_to_Pa * num.iloc[xls_dict['mtrl']['datarow1'] + k,xls_dict['mtrl']['ucs']] - mat.reference = txt.iloc[xls_dict['mtrl']['datarow1'] + k,xls_dict['mtrl']['reference']] - if mat.type=='orthotropic': - mat.ey = MPa_to_Pa * num.iloc[xls_dict['mtrl']['datarow1'] + k,xls_dict['mtrl']['ey']] - mat.ez = MPa_to_Pa * num.iloc[xls_dict['mtrl']['datarow1'] + k,xls_dict['mtrl']['ez']] - mat.gxy = MPa_to_Pa * num.iloc[xls_dict['mtrl']['datarow1'] + k,xls_dict['mtrl']['gxy']] - mat.gyz = MPa_to_Pa * num.iloc[xls_dict['mtrl']['datarow1'] + k,xls_dict['mtrl']['gyz']] - mat.gxz = MPa_to_Pa * num.iloc[xls_dict['mtrl']['datarow1'] + k,xls_dict['mtrl']['gxz']] - mat.pryz = num.iloc[xls_dict['mtrl']['datarow1'] + k,xls_dict['mtrl']['pryz']] - mat.prxz = num.iloc[xls_dict['mtrl']['datarow1'] + k,xls_dict['mtrl']['prxz']] + mat.name = txt.iloc[xls_dict["mtrl"]["datarow1"] + k, xls_dict["mtrl"]["name"]] + mat.type = txt.iloc[xls_dict["mtrl"]["datarow1"] + k, xls_dict["mtrl"]["type"]] + mat.layerthickness = num.iloc[ + xls_dict["mtrl"]["datarow1"] + k, xls_dict["mtrl"]["thickness"] + ] + mat.ex = ( + MPa_to_Pa + * num.iloc[xls_dict["mtrl"]["datarow1"] + k, xls_dict["mtrl"]["ex"]] + ) + mat.prxy = num.iloc[xls_dict["mtrl"]["datarow1"] + k, xls_dict["mtrl"]["prxy"]] + mat.density = num.iloc[ + xls_dict["mtrl"]["datarow1"] + k, xls_dict["mtrl"]["density"] + ] + mat.drydensity = num.iloc[ + xls_dict["mtrl"]["datarow1"] + k, xls_dict["mtrl"]["drydensity"] + ] + mat.uts = ( + MPa_to_Pa + * num.iloc[xls_dict["mtrl"]["datarow1"] + k, xls_dict["mtrl"]["uts"]] + ) + mat.ucs = ( + MPa_to_Pa + * num.iloc[xls_dict["mtrl"]["datarow1"] + k, xls_dict["mtrl"]["ucs"]] + ) + mat.reference = txt.iloc[ + xls_dict["mtrl"]["datarow1"] + k, xls_dict["mtrl"]["reference"] + ] + if mat.type == "orthotropic": + mat.ey = ( + MPa_to_Pa + * num.iloc[xls_dict["mtrl"]["datarow1"] + k, xls_dict["mtrl"]["ey"]] + ) + mat.ez = ( + MPa_to_Pa + * num.iloc[xls_dict["mtrl"]["datarow1"] + k, xls_dict["mtrl"]["ez"]] + ) + mat.gxy = ( + MPa_to_Pa + * num.iloc[xls_dict["mtrl"]["datarow1"] + k, xls_dict["mtrl"]["gxy"]] + ) + mat.gyz = ( + MPa_to_Pa + * num.iloc[xls_dict["mtrl"]["datarow1"] + k, xls_dict["mtrl"]["gyz"]] + ) + mat.gxz = ( + MPa_to_Pa + * num.iloc[xls_dict["mtrl"]["datarow1"] + k, xls_dict["mtrl"]["gxz"]] + ) + mat.pryz = num.iloc[ + xls_dict["mtrl"]["datarow1"] + k, xls_dict["mtrl"]["pryz"] + ] + mat.prxz = num.iloc[ + xls_dict["mtrl"]["datarow1"] + k, xls_dict["mtrl"]["prxz"] + ] else: mat.ey = [] mat.ez = [] @@ -253,11 +360,11 @@ def excel_to_blade(blade, filename): if not mat.prxz: mat.prxz = mat.prxy blade.materials.append(mat) - + return blade - - -def readnumlist(strings): + + +def readnumlist(strings): # read a list of numeric values # check if numeric @@ -265,25 +372,26 @@ def readnumlist(strings): strings - 1 # handle string list case except TypeError: - #drop brackets on either end '[string]' -> 'string' - if strings[0] == '[': + # drop brackets on either end '[string]' -> 'string' + if strings[0] == "[": strings = strings[1:] - if strings[-1] == ']': + if strings[-1] == "]": strings = strings[:-1] # get list of numbers - strings = strings.split(',') + strings = strings.split(",") strings = [float(num) for num in strings] # could return as array - not sure. return np.array(strings) -def readstrlist(strings = None): + +def readstrlist(strings=None): if len(strings) == 0: strlist = strings else: - strlist = strings.split(',') + strlist = strings.split(",") return strlist - - + + """ readnumlist scratch code @@ -310,4 +418,4 @@ def strreps(strin = None,oldsubstrcell = None,newsubstrcell = None): strout = strout.replace(oldsubstrcell[k],newsubstrcell[k]) return strout -""" \ No newline at end of file +""" diff --git a/src/pynumad/io/mesh_to_yaml.py b/src/pynumad/io/mesh_to_yaml.py index 4bb3f0c..dea455f 100644 --- a/src/pynumad/io/mesh_to_yaml.py +++ b/src/pynumad/io/mesh_to_yaml.py @@ -2,77 +2,78 @@ import numpy as np import os -def mesh_to_yaml(meshData,fileName): + +def mesh_to_yaml(meshData, fileName): """ TODO docstring """ mDataOut = dict() nodes = list() - for nd in meshData['nodes']: + for nd in meshData["nodes"]: ndstr = str(nd) nodes.append(ndstr) elements = list() - for el in meshData['elements']: + for el in meshData["elements"]: elstr = str(el) elements.append(elstr) esList = list() - for es in meshData['sets']['element']: + for es in meshData["sets"]["element"]: newSet = dict() - newSet['name'] = es['name'] + newSet["name"] = es["name"] labels = list() - for el in es['labels']: + for el in es["labels"]: labels.append(int(el)) - newSet['labels'] = labels + newSet["labels"] = labels esList.append(newSet) sections = list() - for sec in meshData['sections']: + for sec in meshData["sections"]: newSec = dict() - newSec['type'] = sec['type'] - newSec['elementSet'] = sec['elementSet'] - if(sec['type'] == 'shell'): + newSec["type"] = sec["type"] + newSec["elementSet"] = sec["elementSet"] + if sec["type"] == "shell": newLayup = list() - for lay in sec['layup']: + for lay in sec["layup"]: laystr = str(lay) newLayup.append(laystr) - newSec['layup'] = newLayup + newSec["layup"] = newLayup else: - newSec['material'] = sec['material'] + newSec["material"] = sec["material"] sections.append(newSec) - mDataOut['nodes'] = nodes - mDataOut['elements'] = elements - mDataOut['sets'] = dict() - mDataOut['sets']['element'] = esList - mDataOut['sections'] = sections + mDataOut["nodes"] = nodes + mDataOut["elements"] = elements + mDataOut["sets"] = dict() + mDataOut["sets"]["element"] = esList + mDataOut["sections"] = sections try: adNds = list() - for nd in meshData['adhesiveNds']: + for nd in meshData["adhesiveNds"]: ndstr = str(nd) adNds.append(ndstr) adEls = list() - for el in meshData['adhesiveEls']: + for el in meshData["adhesiveEls"]: elstr = str(el) adEls.append(elstr) - mDataOut['adhesiveNds'] = adNds - mDataOut['adhesiveEls'] = adEls - mDataOut['adhesiveElSet'] = meshData['adhesiveElSet'] + mDataOut["adhesiveNds"] = adNds + mDataOut["adhesiveEls"] = adEls + mDataOut["adhesiveElSet"] = meshData["adhesiveElSet"] except: pass - outStream = open('temp.yaml','w') - yaml.dump(mDataOut,stream=outStream,sort_keys=False) + outStream = open("temp.yaml", "w") + yaml.dump(mDataOut, stream=outStream, sort_keys=False) outStream.close() - - inFile = open('temp.yaml','r') - outFile = open(fileName,'w') + + inFile = open("temp.yaml", "r") + outFile = open(fileName, "w") fLine = inFile.readline() - while(fLine != ''): - newSt = fLine.replace("'","") - newSt = newSt.replace('"','') + while fLine != "": + newSt = fLine.replace("'", "") + newSt = newSt.replace('"', "") outFile.write(newSt) fLine = inFile.readline() inFile.close() outFile.close() - - os.remove('temp.yaml') + + os.remove("temp.yaml") diff --git a/src/pynumad/io/xml_to_airfoil.py b/src/pynumad/io/xml_to_airfoil.py index 6f57cbf..2d43636 100644 --- a/src/pynumad/io/xml_to_airfoil.py +++ b/src/pynumad/io/xml_to_airfoil.py @@ -1,9 +1,10 @@ import re import numpy as np -def xml_to_airfoil(airfoil, filecontents = None): + +def xml_to_airfoil(airfoil, filecontents=None): """TODO docstring - + Parameters ---------- @@ -18,26 +19,26 @@ def xml_to_airfoil(airfoil, filecontents = None): # The following regular expression pattern matches any number of characters # found between the opening and closing "reference" tags - fulltext = ''.join(filecontents) - - pattern = '(.*)' - t = re.search(pattern,fulltext) + fulltext = "".join(filecontents) + + pattern = "(.*)" + t = re.search(pattern, fulltext) reference = t.group(1) for line in filecontents: - #check if there is a tag - if re.search('<',line): + # check if there is a tag + if re.search("<", line): continue # otherwise, assume coordinate data else: - if re.search('\t',line): - line = line.replace('\t', ' ') - x, y = line.split(' ') + if re.search("\t", line): + line = line.replace("\t", " ") + x, y = line.split(" ") x = float(x) y = float(y) try: - coords = np.append(coords, [[x,y]],axis=0) + coords = np.append(coords, [[x, y]], axis=0) except UnboundLocalError: - coords = np.array([[x,y]]) + coords = np.array([[x, y]]) airfoil.reference = reference - airfoil.coordinates = coords \ No newline at end of file + airfoil.coordinates = coords diff --git a/src/pynumad/io/yaml_to_blade.py b/src/pynumad/io/yaml_to_blade.py index e8cd4d6..b4a7789 100644 --- a/src/pynumad/io/yaml_to_blade.py +++ b/src/pynumad/io/yaml_to_blade.py @@ -8,7 +8,12 @@ import numpy as np from scipy.stats import mode -from pynumad.utils.misc_utils import LARCetaT, LARCetaL, _parse_data,fullKeysFromSubStrings +from pynumad.utils.misc_utils import ( + LARCetaT, + LARCetaL, + _parse_data, + fullKeysFromSubStrings, +) from pynumad.utils.interpolation import interpolator_wrap from pynumad.objects.Component import Component from pynumad.objects.Airfoil import Airfoil @@ -24,7 +29,7 @@ def yaml_to_blade(blade, filename: str, write_airfoils: bool = False): Parameters ---------- blade : Blade - filename : string + filename : string path to .yaml file write_airfoils : bool Set true to write airfoil files while reading in data. Defaults to false. @@ -38,30 +43,36 @@ def yaml_to_blade(blade, filename: str, write_airfoils: bool = False): # Read in yaml file as a nested dictionary with open(filename) as blade_yaml: # data = yaml.load(blade_yaml,Loader=yaml.FullLoader) - data = yaml.load(blade_yaml,Loader=yaml.Loader) + data = yaml.load(blade_yaml, Loader=yaml.Loader) # Obtain blade outer shape bem - blade_outer_shape_bem = data['components']['blade']['outer_shape_bem'] + blade_outer_shape_bem = data["components"]["blade"]["outer_shape_bem"] # obtain hub outer shape bem try: - hub_outer_shape_bem = data['components']['hub']['outer_shape_bem'] + hub_outer_shape_bem = data["components"]["hub"]["outer_shape_bem"] except KeyError: # older versions of wind ontology do not have 'outer_shape_bem' subsection for hub data - hub_outer_shape_bem = data['components']['hub'] + hub_outer_shape_bem = data["components"]["hub"] # obtain blade internal structure - blade_internal_structure = data['components']['blade']['internal_structure_2d_fem'] + blade_internal_structure = data["components"]["blade"]["internal_structure_2d_fem"] # obtain airfoil data - af_data = data['airfoils'] + af_data = data["airfoils"] # obtain material data - mat_data = data['materials'] + mat_data = data["materials"] ### STATIONS / AIRFOILS - _add_stations(blade, blade_outer_shape_bem, hub_outer_shape_bem, - af_data, filename, write_airfoils) + _add_stations( + blade, + blade_outer_shape_bem, + hub_outer_shape_bem, + af_data, + filename, + write_airfoils, + ) ### MATERIALS _add_materials(blade, mat_data) @@ -69,14 +80,17 @@ def yaml_to_blade(blade, filename: str, write_airfoils: bool = False): ## Blade Components # Update "grid" and "values" keys to cover the whole span of the blade - blade_internal_structure = update_internal_structure(blade_internal_structure, blade_outer_shape_bem) + blade_internal_structure = update_internal_structure( + blade_internal_structure, blade_outer_shape_bem + ) blade_structure_dict = { - blade_internal_structure['layers'][i]['name'].lower(): - blade_internal_structure['layers'][i] - for i in range(len(blade_internal_structure['layers'])) + blade_internal_structure["layers"][i]["name"].lower(): blade_internal_structure[ + "layers" + ][i] + for i in range(len(blade_internal_structure["layers"])) } - #Spar caps + # Spar caps _add_spar_caps(blade, blade_structure_dict) # TE Bands @@ -85,7 +99,6 @@ def yaml_to_blade(blade, filename: str, write_airfoils: bool = False): # LE Bands _add_le_bands(blade, blade_structure_dict) - ### COMPONENTS _add_components(blade, blade_internal_structure, blade_structure_dict) @@ -95,92 +108,139 @@ def yaml_to_blade(blade, filename: str, write_airfoils: bool = False): return blade -def _add_stations(blade,blade_outer_shape_bem, hub_outer_shape_bem, - af_data, file: str, write_airfoils): - +def _add_stations( + blade, + blade_outer_shape_bem, + hub_outer_shape_bem, + af_data, + file: str, + write_airfoils, +): # Obtaining some parameters not explicitly given in YAML file - L = np.ceil(blade_outer_shape_bem['reference_axis']['z']['values'][-1]) - R = L + hub_outer_shape_bem['diameter'] / 2 - L = R - hub_outer_shape_bem['diameter'] / 2 - blade.ispan = np.multiply(np.transpose(blade_outer_shape_bem['chord']['grid']),L) - - - #Aerodynamic properties + L = np.ceil(blade_outer_shape_bem["reference_axis"]["z"]["values"][-1]) + R = L + hub_outer_shape_bem["diameter"] / 2 + L = R - hub_outer_shape_bem["diameter"] / 2 + blade.ispan = np.multiply(np.transpose(blade_outer_shape_bem["chord"]["grid"]), L) + + # Aerodynamic properties # using interp because yaml can have different r/R for twist and chord - temp_x = np.transpose(blade_outer_shape_bem['twist']['grid']) - temp_y = blade_outer_shape_bem['twist']['values'] - blade.degreestwist = interpolator_wrap(np.multiply(temp_x,L),np.transpose(temp_y),blade.ispan) * 180.0 / np.pi + temp_x = np.transpose(blade_outer_shape_bem["twist"]["grid"]) + temp_y = blade_outer_shape_bem["twist"]["values"] + blade.degreestwist = ( + interpolator_wrap(np.multiply(temp_x, L), np.transpose(temp_y), blade.ispan) + * 180.0 + / np.pi + ) blade.chord = interpolator_wrap( - np.multiply(np.transpose(blade_outer_shape_bem['chord']['grid']),L), - np.transpose(blade_outer_shape_bem['chord']['values']),blade.ispan) + np.multiply(np.transpose(blade_outer_shape_bem["chord"]["grid"]), L), + np.transpose(blade_outer_shape_bem["chord"]["values"]), + blade.ispan, + ) af_dir_names = [] for i in range(len(af_data)): - af_dir_names.append(af_data[i]['name']) - numstations = len(blade_outer_shape_bem['airfoil_position']['labels']) - tc = [None]*numstations - aero_cent = [None]*numstations + af_dir_names.append(af_data[i]["name"]) + numstations = len(blade_outer_shape_bem["airfoil_position"]["labels"]) + tc = [None] * numstations + aero_cent = [None] * numstations for i in range(numstations): - _,_,iaf_temp = np.intersect1d(blade_outer_shape_bem['airfoil_position']['labels'][i],af_dir_names,'stable',return_indices=True) - IAF = iaf_temp[0] # Expect only one index of intersection - tc[i] = af_data[IAF]['relative_thickness'] - tc_xL = blade_outer_shape_bem['airfoil_position']['grid'][i] - aero_cent[i] = af_data[IAF]['aerodynamic_center'] - x = np.array(af_data[IAF]['coordinates']['x'], dtype=float) - y = np.array(af_data[IAF]['coordinates']['y'], dtype=float) - xf_coords = np.stack((x,y),1) + _, _, iaf_temp = np.intersect1d( + blade_outer_shape_bem["airfoil_position"]["labels"][i], + af_dir_names, + "stable", + return_indices=True, + ) + IAF = iaf_temp[0] # Expect only one index of intersection + tc[i] = af_data[IAF]["relative_thickness"] + tc_xL = blade_outer_shape_bem["airfoil_position"]["grid"][i] + aero_cent[i] = af_data[IAF]["aerodynamic_center"] + x = np.array(af_data[IAF]["coordinates"]["x"], dtype=float) + y = np.array(af_data[IAF]["coordinates"]["y"], dtype=float) + xf_coords = np.stack((x, y), 1) # find coordinate direction (clockwise or counter-clockwise) Winding # Number. clockwise starting at (1,0) is correct - with np.errstate(divide='ignore', invalid='ignore'): - if np.nanmean(np.gradient(np.arctan(xf_coords[:,1] / xf_coords[:,0]))) > 0: + with np.errstate(divide="ignore", invalid="ignore"): + if ( + np.nanmean(np.gradient(np.arctan(xf_coords[:, 1] / xf_coords[:, 0]))) + > 0 + ): xf_coords = np.flipud(xf_coords) if write_airfoils: import os - out_folder = 'yaml2BladeDef_' + file.replace('.yaml','') + + out_folder = "yaml2BladeDef_" + file.replace(".yaml", "") # blade_name = out_folder + '/' + file.replace('.yaml','') + '_blade.mat' # matdb_name =... # numade_name =... # Creating folders - os.makedirs(out_folder+'/af_coords/', exist_ok = True) + os.makedirs(out_folder + "/af_coords/", exist_ok=True) # os.makedirs(out_folder+'/af_polars/', exist_ok = True) - os.makedirs(out_folder+'/airfoil/', exist_ok = True) - writeNuMADAirfoil(xf_coords, - blade_outer_shape_bem['airfoil_position']['labels'][i], - out_folder + '/af_coords/' + - blade_outer_shape_bem['airfoil_position']['labels'][i]+'.txt') - - ref = blade_outer_shape_bem['airfoil_position']['labels'][i] - af = Airfoil(coords = xf_coords, ref = ref) - af.resample(spacing='half-cosine') - blade.addStation(af,tc_xL*L) + os.makedirs(out_folder + "/airfoil/", exist_ok=True) + writeNuMADAirfoil( + xf_coords, + blade_outer_shape_bem["airfoil_position"]["labels"][i], + out_folder + + "/af_coords/" + + blade_outer_shape_bem["airfoil_position"]["labels"][i] + + ".txt", + ) + + ref = blade_outer_shape_bem["airfoil_position"]["labels"][i] + af = Airfoil(coords=xf_coords, ref=ref) + af.resample(spacing="half-cosine") + blade.addStation(af, tc_xL * L) # Obtain some key blade attributes blade.span = blade.ispan - blade.percentthick = np.multiply(interpolator_wrap(np.multiply(blade_outer_shape_bem['airfoil_position']['grid'],L),tc,blade.ispan),100) - blade.aerocenter = interpolator_wrap(np.multiply(blade_outer_shape_bem['airfoil_position']['grid'],L),aero_cent,blade.span) - blade.chordoffset = interpolator_wrap(np.multiply(np.transpose(blade_outer_shape_bem['pitch_axis']['grid']),L), - np.transpose(blade_outer_shape_bem['pitch_axis']['values']),blade.span) + blade.percentthick = np.multiply( + interpolator_wrap( + np.multiply(blade_outer_shape_bem["airfoil_position"]["grid"], L), + tc, + blade.ispan, + ), + 100, + ) + blade.aerocenter = interpolator_wrap( + np.multiply(blade_outer_shape_bem["airfoil_position"]["grid"], L), + aero_cent, + blade.span, + ) + blade.chordoffset = interpolator_wrap( + np.multiply(np.transpose(blade_outer_shape_bem["pitch_axis"]["grid"]), L), + np.transpose(blade_outer_shape_bem["pitch_axis"]["values"]), + blade.span, + ) blade.naturaloffset = 0 - blade.prebend = interpolator_wrap(np.multiply(np.transpose(blade_outer_shape_bem['reference_axis']['x']['grid']),L), - np.transpose(blade_outer_shape_bem['reference_axis']['x']['values']),blade.span) - blade.sweep = interpolator_wrap(np.multiply(np.transpose(blade_outer_shape_bem['reference_axis']['y']['grid']),L), - np.transpose(blade_outer_shape_bem['reference_axis']['y']['values']),blade.span) + blade.prebend = interpolator_wrap( + np.multiply( + np.transpose(blade_outer_shape_bem["reference_axis"]["x"]["grid"]), L + ), + np.transpose(blade_outer_shape_bem["reference_axis"]["x"]["values"]), + blade.span, + ) + blade.sweep = interpolator_wrap( + np.multiply( + np.transpose(blade_outer_shape_bem["reference_axis"]["y"]["grid"]), L + ), + np.transpose(blade_outer_shape_bem["reference_axis"]["y"]["values"]), + blade.span, + ) # for i in range(len(tc)): - # afc = AirfoilDef(out_folder + - # '/af_coords/' + + # afc = AirfoilDef(out_folder + + # '/af_coords/' + # blade_outer_shape_bem['airfoil_position']['labels'][i] + # '.txt') # blade.addStation(afc,np.multiply(tc_xL[i],L)) - #NOTE nothing happens to afc? Tentatively ignoring... + # NOTE nothing happens to afc? Tentatively ignoring... # If i return to this make sure to listify the afcs ### AIRFOILS # for i in range(len(tc)): - # afc = AirfoilDef(out_folder + '/af_coords/' + - # blade_outer_shape_bem['airfoil_position']['labels'][i] + + # afc = AirfoilDef(out_folder + '/af_coords/' + + # blade_outer_shape_bem['airfoil_position']['labels'][i] + # '.txt') # blade.addStation(afc,np.multiply(tc_xL[i],L)) # afc.resample #NOTE afc isn't used after this... why resample? @@ -188,79 +248,90 @@ def _add_stations(blade,blade_outer_shape_bem, hub_outer_shape_bem, def _add_materials(blade, material_data): - materials_dict =dict() + materials_dict = dict() for i in range(len(material_data)): cur_mat = Material() - cur_mat.name = material_data[i]['name'] - if material_data[i]['orth'] == 1: - cur_mat.type = 'orthotropic' + cur_mat.name = material_data[i]["name"] + if material_data[i]["orth"] == 1: + cur_mat.type = "orthotropic" else: - cur_mat.type = 'isotropic' + cur_mat.type = "isotropic" # Add ply thickness option if ply thickness exists in yaml try: - cur_mat.layerthickness = material_data[i]['ply_t'] * 1000 + cur_mat.layerthickness = material_data[i]["ply_t"] * 1000 except KeyError: - print('Warning! material ply thickness ' + - material_data[i]['name'] + - ' not defined, assuming 1 mm thickness') - cur_mat.layerthickness = 1 - + print( + "Warning! material ply thickness " + + material_data[i]["name"] + + " not defined, assuming 1 mm thickness" + ) + cur_mat.layerthickness = 1 + finally: pass # first - cur_mat.uts = _parse_data(material_data[i]['Xt']) - cur_mat.ucs = -_parse_data(material_data[i]['Xc']) - cur_mat.uss = _parse_data(material_data[i]['S']) + cur_mat.uts = _parse_data(material_data[i]["Xt"]) + cur_mat.ucs = -_parse_data(material_data[i]["Xc"]) + cur_mat.uss = _parse_data(material_data[i]["S"]) cur_mat.xzit = 0.3 cur_mat.xzic = 0.25 cur_mat.yzit = 0.3 cur_mat.yzic = 0.25 - try: - cur_mat.g1g2 = material_data[i].get('GIc',0) / material_data[i].get('GIIc',0) + try: + cur_mat.g1g2 = material_data[i].get("GIc", 0) / material_data[i].get( + "GIIc", 0 + ) except ZeroDivisionError: cur_mat.g1g2 = np.nan - if 'alp0' in material_data[i]: - cur_mat.alp0 = _parse_data(material_data[i]['alp0']) + if "alp0" in material_data[i]: + cur_mat.alp0 = _parse_data(material_data[i]["alp0"]) cur_mat.etat = LARCetaT(cur_mat.alp0) - else: + else: cur_mat.alp0 = None cur_mat.etat = None try: - #test if property is a list - material_data[i]['E']+[] + # test if property is a list + material_data[i]["E"] + [] except TypeError: - cur_mat.ex = _parse_data(material_data[i]['E']) - cur_mat.ey = _parse_data(material_data[i]['E']) - cur_mat.ez = _parse_data(material_data[i]['E']) - cur_mat.gxy = _parse_data(material_data[i]['G']) - cur_mat.gxz = _parse_data(material_data[i]['G']) - cur_mat.gyz = _parse_data(material_data[i]['G']) - cur_mat.prxy = _parse_data(material_data[i]['nu']) - cur_mat.prxz = _parse_data(material_data[i]['nu']) - cur_mat.pryz = _parse_data(material_data[i]['nu']) - cur_mat.etal = LARCetaL(cur_mat.uss,cur_mat.ucs,cur_mat.alp0) + cur_mat.ex = _parse_data(material_data[i]["E"]) + cur_mat.ey = _parse_data(material_data[i]["E"]) + cur_mat.ez = _parse_data(material_data[i]["E"]) + cur_mat.gxy = _parse_data(material_data[i]["G"]) + cur_mat.gxz = _parse_data(material_data[i]["G"]) + cur_mat.gyz = _parse_data(material_data[i]["G"]) + cur_mat.prxy = _parse_data(material_data[i]["nu"]) + cur_mat.prxz = _parse_data(material_data[i]["nu"]) + cur_mat.pryz = _parse_data(material_data[i]["nu"]) + cur_mat.etal = LARCetaL(cur_mat.uss, cur_mat.ucs, cur_mat.alp0) else: - cur_mat.ex = _parse_data(material_data[i]['E'][0]) - cur_mat.ey = _parse_data(material_data[i]['E'][1]) - cur_mat.ez = _parse_data(material_data[i]['E'][2]) - cur_mat.gxy = _parse_data(material_data[i]['G'][0]) - cur_mat.gxz = _parse_data(material_data[i]['G'][1]) - cur_mat.gyz = _parse_data(material_data[i]['G'][2]) - cur_mat.prxy = _parse_data(material_data[i]['nu'][0]) - cur_mat.prxz = _parse_data(material_data[i]['nu'][1]) - cur_mat.pryz = _parse_data(material_data[i]['nu'][2]) - cur_mat.etal = LARCetaL(cur_mat.uss[0],cur_mat.ucs[1],cur_mat.alp0) + cur_mat.ex = _parse_data(material_data[i]["E"][0]) + cur_mat.ey = _parse_data(material_data[i]["E"][1]) + cur_mat.ez = _parse_data(material_data[i]["E"][2]) + cur_mat.gxy = _parse_data(material_data[i]["G"][0]) + cur_mat.gxz = _parse_data(material_data[i]["G"][1]) + cur_mat.gyz = _parse_data(material_data[i]["G"][2]) + cur_mat.prxy = _parse_data(material_data[i]["nu"][0]) + cur_mat.prxz = _parse_data(material_data[i]["nu"][1]) + cur_mat.pryz = _parse_data(material_data[i]["nu"][2]) + cur_mat.etal = LARCetaL(cur_mat.uss[0], cur_mat.ucs[1], cur_mat.alp0) try: - cur_mat.m = material_data[i]['m'] + cur_mat.m = material_data[i]["m"] except KeyError: print(f"No fatigue exponent found for material: {material_data[i]['name']}") - cur_mat.density = material_data[i]['rho'] + cur_mat.density = material_data[i]["rho"] # cur_mat.dens = mat_data[i]['rho'] - cur_mat.drydensity = material_data[i]['rho'] - if 'description' in material_data[i].keys() and 'source' in material_data[i].keys(): - desc_sourc = [material_data[i]['description'],', ',material_data[i]['source']] - cur_mat.reference = ''.join(desc_sourc) + cur_mat.drydensity = material_data[i]["rho"] + if ( + "description" in material_data[i].keys() + and "source" in material_data[i].keys() + ): + desc_sourc = [ + material_data[i]["description"], + ", ", + material_data[i]["source"], + ] + cur_mat.reference = "".join(desc_sourc) else: cur_mat.reference = [] @@ -270,31 +341,39 @@ def _add_materials(blade, material_data): def _add_components(blade, blade_internal_structure, blade_structure_dict): - N_layer_comp = len(blade_internal_structure['layers']) + N_layer_comp = len(blade_internal_structure["layers"]) component_list = list() for i in range(N_layer_comp): - i_component_data = blade_internal_structure['layers'][i] + i_component_data = blade_internal_structure["layers"][i] cur_comp = Component() cur_comp.group = 0 - cur_comp.name = i_component_data['name'] + cur_comp.name = i_component_data["name"] # comp['material'] = blade_internal_structure['layers']{i}['material']; # mat_names = [mat.name for mat in blade.materials] # C,IA,IB = np.intersect1d(mat_names,i_component_data['material'],return_indices=True) - cur_comp.materialid = i_component_data['material'] + cur_comp.materialid = i_component_data["material"] try: - cur_comp.fabricangle = np.mean(i_component_data['fiber_orientation']['values']) + cur_comp.fabricangle = np.mean( + i_component_data["fiber_orientation"]["values"] + ) finally: pass - if 'spar' in i_component_data['name'].lower(): - cur_comp.imethod = 'pchip' + if "spar" in i_component_data["name"].lower(): + cur_comp.imethod = "pchip" else: - cur_comp.imethod = 'linear' + cur_comp.imethod = "linear" # cur_comp.cp[:,0] = np.transpose(i_component_data['thickness']['grid']) - cptemp1 = np.transpose(i_component_data['thickness']['grid']) - temp_n_layer = np.multiply(np.transpose(i_component_data['thickness']['values']),1000.0) / blade.materials[cur_comp.materialid].layerthickness + cptemp1 = np.transpose(i_component_data["thickness"]["grid"]) + temp_n_layer = ( + np.multiply(np.transpose(i_component_data["thickness"]["values"]), 1000.0) + / blade.materials[cur_comp.materialid].layerthickness + ) I_round_up = np.flatnonzero((temp_n_layer > 0.05) & (temp_n_layer < 0.5)) - cptemp2 = np.round(np.multiply(np.transpose(i_component_data['thickness']['values']),1000.0) / blade.materials[cur_comp.materialid].layerthickness) - cur_comp.cp = np.stack((cptemp1,cptemp2),axis=1) + cptemp2 = np.round( + np.multiply(np.transpose(i_component_data["thickness"]["values"]), 1000.0) + / blade.materials[cur_comp.materialid].layerthickness + ) + cur_comp.cp = np.stack((cptemp1, cptemp2), axis=1) # if I_round_up.size > 0: # cur_comp.cp[I_round_up,1] = 1 # increase n_layers from 0 to 1 for 0.050: + if len(keyList) > 0: for key in keyList: - component_dict[key].hpextents = ['b'] - component_dict[key].lpextents = ['b'] + component_dict[key].hpextents = ["b"] + component_dict[key].lpextents = ["b"] component_dict[key].group = 1 - elif len(keyList)==0: - raise ValueError('No fore web layers found found') - - - keyList=fullKeysFromSubStrings(component_dict.keys(),['web','aft']) #Try 1 - if len(keyList)==0: - keyList=fullKeysFromSubStrings(component_dict.keys(),['web','0']) #Try 2 - if len(keyList)==0: - keyList=fullKeysFromSubStrings(component_dict.keys(),['web','rear']) #Try 3 - - if len(keyList)>0: + elif len(keyList) == 0: + raise ValueError("No fore web layers found found") + + keyList = fullKeysFromSubStrings(component_dict.keys(), ["web", "aft"]) # Try 1 + if len(keyList) == 0: + keyList = fullKeysFromSubStrings(component_dict.keys(), ["web", "0"]) # Try 2 + if len(keyList) == 0: + keyList = fullKeysFromSubStrings( + component_dict.keys(), ["web", "rear"] + ) # Try 3 + + if len(keyList) > 0: for key in keyList: - component_dict[key].hpextents = ['c'] - component_dict[key].lpextents = ['c'] + component_dict[key].hpextents = ["c"] + component_dict[key].lpextents = ["c"] component_dict[key].group = 2 - elif len(keyList)==0: - raise ValueError('No rear web layers found found') - - + elif len(keyList) == 0: + raise ValueError("No rear web layers found found") ### add components to blade blade.components = component_dict @@ -444,7 +519,7 @@ def _add_components(blade, blade_internal_structure, blade_structure_dict): def writeNuMADAirfoil(coords, reftext, fname): """WriteNuMADAirfoil Write NuMAD airfoil files - + Parameters ---------- coords : array @@ -456,93 +531,149 @@ def writeNuMADAirfoil(coords, reftext, fname): string representing reference text fname : string full filename, incl extension, of NuMAD airfoil file to write - + Returns ------- None """ - with open(fname,'wt') as fid: - fid.write('\n%s\n' % (reftext)) - fid.write('\n' % ()) + with open(fname, "wt") as fid: + fid.write("\n%s\n" % (reftext)) + fid.write("\n" % ()) for i in range(coords.shape[0]): - fid.write('%8.12f\t%8.12f\n' % tuple(coords[i,:])) - fid.write('' % ()) - - + fid.write("%8.12f\t%8.12f\n" % tuple(coords[i, :])) + fid.write("" % ()) + + def update_internal_structure(blade_internal_structure, blade_outer_shape_bem): - bladeParts=['layers','webs'] + bladeParts = ["layers", "webs"] # Make sure each blade.ispan has layer thicknesses and widths - fullSpanGrid=np.array(blade_outer_shape_bem['chord']['grid']) - nStations=len(fullSpanGrid) - keysToModify={'offset_y_pa','thickness','fiber_orientation','width', 'start_nd_arc', 'end_nd_arc'} + fullSpanGrid = np.array(blade_outer_shape_bem["chord"]["grid"]) + nStations = len(fullSpanGrid) + keysToModify = { + "offset_y_pa", + "thickness", + "fiber_orientation", + "width", + "start_nd_arc", + "end_nd_arc", + } for partName in bladeParts: N_layer_comp = len(blade_internal_structure[partName]) for currentLayer in range(N_layer_comp): - layerKeys=set(blade_internal_structure[partName][currentLayer].keys()) + layerKeys = set(blade_internal_structure[partName][currentLayer].keys()) for currentKey in keysToModify.intersection(layerKeys): - grid=blade_internal_structure[partName][currentLayer][currentKey]['grid'] - values=blade_internal_structure[partName][currentLayer][currentKey]['values'] - startStationLoc=grid[0] - endStationLoc=grid[-1] - - subSpanGridIndex=np.where((fullSpanGrid>=startStationLoc) & (fullSpanGrid <=endStationLoc))[0] - - #iterpolate fullSpanGrid locations onto layer grid defined in the yamle file for the layer - subSpanValues=interpolator_wrap(grid,values,fullSpanGrid[subSpanGridIndex],'pchip') - fullSpanValues=np.zeros(nStations) + grid = blade_internal_structure[partName][currentLayer][currentKey][ + "grid" + ] + values = blade_internal_structure[partName][currentLayer][currentKey][ + "values" + ] + startStationLoc = grid[0] + endStationLoc = grid[-1] + + subSpanGridIndex = np.where( + (fullSpanGrid >= startStationLoc) & (fullSpanGrid <= endStationLoc) + )[0] + + # iterpolate fullSpanGrid locations onto layer grid defined in the yamle file for the layer + subSpanValues = interpolator_wrap( + grid, values, fullSpanGrid[subSpanGridIndex], "pchip" + ) + fullSpanValues = np.zeros(nStations) + + fullSpanValues[subSpanGridIndex] = subSpanValues + + # Reset + blade_internal_structure[partName][currentLayer][currentKey][ + "grid" + ] = fullSpanGrid + blade_internal_structure[partName][currentLayer][currentKey][ + "values" + ] = fullSpanValues + return blade_internal_structure - fullSpanValues[subSpanGridIndex]=subSpanValues - #Reset - blade_internal_structure[partName][currentLayer][currentKey]['grid']=fullSpanGrid - blade_internal_structure[partName][currentLayer][currentKey]['values']=fullSpanValues - return blade_internal_structure - def _add_spar_caps(blade, blade_structure_dict): - sparCapKeys=fullKeysFromSubStrings(blade_structure_dict.keys(),['spar']) + sparCapKeys = fullKeysFromSubStrings(blade_structure_dict.keys(), ["spar"]) if len(sparCapKeys) != 2: - raise ValueError('Incorrect number of spar cap components') + raise ValueError("Incorrect number of spar cap components") for iSparCap in range(2): - if 'suc' in blade_structure_dict[sparCapKeys[iSparCap]]['side'].lower(): - lpSideIndex=iSparCap - if 'pres' in blade_structure_dict[sparCapKeys[iSparCap]]['side'].lower(): - hpSideIndex=iSparCap - - blade.sparcapwidth_lp = blade_structure_dict[sparCapKeys[lpSideIndex]]['width']['values']*1000 + if "suc" in blade_structure_dict[sparCapKeys[iSparCap]]["side"].lower(): + lpSideIndex = iSparCap + if "pres" in blade_structure_dict[sparCapKeys[iSparCap]]["side"].lower(): + hpSideIndex = iSparCap + + blade.sparcapwidth_lp = ( + blade_structure_dict[sparCapKeys[lpSideIndex]]["width"]["values"] * 1000 + ) try: - blade.sparcapoffset_lp = blade_structure_dict[sparCapKeys[lpSideIndex]]['offset_y_pa']['values']*1000 + blade.sparcapoffset_lp = ( + blade_structure_dict[sparCapKeys[lpSideIndex]]["offset_y_pa"]["values"] + * 1000 + ) except KeyError: - blade.sparcap_start_nd_arc = blade_structure_dict[sparCapKeys[lpSideIndex]]['start_nd_arc']['values'] - blade.sparcap_end_nd_arc = blade_structure_dict[sparCapKeys[lpSideIndex]]['end_nd_arc']['values'] - - blade.sparcapwidth_hp = blade_structure_dict[sparCapKeys[hpSideIndex]]['width']['values']*1000 + blade.sparcap_start_nd_arc = blade_structure_dict[sparCapKeys[lpSideIndex]][ + "start_nd_arc" + ]["values"] + blade.sparcap_end_nd_arc = blade_structure_dict[sparCapKeys[lpSideIndex]][ + "end_nd_arc" + ]["values"] + + blade.sparcapwidth_hp = ( + blade_structure_dict[sparCapKeys[hpSideIndex]]["width"]["values"] * 1000 + ) try: - blade.sparcapoffset_hp = blade_structure_dict[sparCapKeys[hpSideIndex]]['offset_y_pa']['values']*1000 + blade.sparcapoffset_hp = ( + blade_structure_dict[sparCapKeys[hpSideIndex]]["offset_y_pa"]["values"] + * 1000 + ) except KeyError: - blade.sparcap_start_nd_arc = blade_structure_dict[sparCapKeys[hpSideIndex]]['start_nd_arc']['values'] - blade.sparcap_end_nd_arc = blade_structure_dict[sparCapKeys[hpSideIndex]]['end_nd_arc']['values'] + blade.sparcap_start_nd_arc = blade_structure_dict[sparCapKeys[hpSideIndex]][ + "start_nd_arc" + ]["values"] + blade.sparcap_end_nd_arc = blade_structure_dict[sparCapKeys[hpSideIndex]][ + "end_nd_arc" + ]["values"] return blade + def _add_te_bands(blade, blade_structure_dict): - teReinfKeys=fullKeysFromSubStrings(blade_structure_dict.keys(),['te','reinf']) - if len(teReinfKeys)==1: - blade.teband = blade_structure_dict[teReinfKeys[0]]['width']['values']*1000 / 2 - elif len(teReinfKeys)==2: - blade.teband = (blade_structure_dict[teReinfKeys[0]]['width']['values'] + - blade_structure_dict[teReinfKeys[1]]['width']['values'])*1000 / 2 + teReinfKeys = fullKeysFromSubStrings(blade_structure_dict.keys(), ["te", "reinf"]) + if len(teReinfKeys) == 1: + blade.teband = ( + blade_structure_dict[teReinfKeys[0]]["width"]["values"] * 1000 / 2 + ) + elif len(teReinfKeys) == 2: + blade.teband = ( + ( + blade_structure_dict[teReinfKeys[0]]["width"]["values"] + + blade_structure_dict[teReinfKeys[1]]["width"]["values"] + ) + * 1000 + / 2 + ) else: - raise ValueError('Unknown number of TE reinforcements') + raise ValueError("Unknown number of TE reinforcements") return blade + def _add_le_bands(blade, blade_structure_dict): - leReinfKeys=fullKeysFromSubStrings(blade_structure_dict.keys(),['le','reinf']) - if len(leReinfKeys)==1: - blade.leband = blade_structure_dict[leReinfKeys[0]]['width']['values']*1000 / 2 - elif len(leReinfKeys)==2: - blade.leband = (blade_structure_dict[leReinfKeys[0]]['width']['values'] + - blade_structure_dict[leReinfKeys[1]]['width']['values'])*1000 / 2 + leReinfKeys = fullKeysFromSubStrings(blade_structure_dict.keys(), ["le", "reinf"]) + if len(leReinfKeys) == 1: + blade.leband = ( + blade_structure_dict[leReinfKeys[0]]["width"]["values"] * 1000 / 2 + ) + elif len(leReinfKeys) == 2: + blade.leband = ( + ( + blade_structure_dict[leReinfKeys[0]]["width"]["values"] + + blade_structure_dict[leReinfKeys[1]]["width"]["values"] + ) + * 1000 + / 2 + ) else: - raise ValueError('Invalid number of LE reinforcements') - return blade \ No newline at end of file + raise ValueError("Invalid number of LE reinforcements") + return blade diff --git a/src/pynumad/objects/Airfoil.py b/src/pynumad/objects/Airfoil.py index d4a8708..0e3fc99 100644 --- a/src/pynumad/objects/Airfoil.py +++ b/src/pynumad/objects/Airfoil.py @@ -10,8 +10,9 @@ from pynumad.utils.interpolation import interpolator_wrap from numpy import ndarray - -class Airfoil(): + + +class Airfoil: """Airfoil object Parameters @@ -33,12 +34,12 @@ class Airfoil(): c : array Computed by NuMAD camber : array - Camber line as a function of x. - Distance in percent chord between LP and HP curves. + Camber line as a function of x. + Distance in percent chord between LP and HP curves. Computed by pyNuMAD. thickness : float - Relative thickness as a function of the x coordinate. - Values between 0 and 1, where 1 corresponds to maximum thickness. + Relative thickness as a function of the x coordinate. + Values between 0 and 1, where 1 corresponds to maximum thickness. Computed by pyNuMAD. percentthick : float Maximum airfoil thickness as a percentage of chord length [#] @@ -47,25 +48,30 @@ class Airfoil(): TEtype : str Options, 'round', 'sharp', or 'flat' """ - def __init__(self, filename = None, coords = None, ref = None): - self.name : str = None - self.reference : str = None - self.coordinates : ndarray = None - self.c : ndarray = None - self.camber : ndarray = None - self.thickness : float = None - self.percentthick : float = None - self.maxthick : float = None - self.TEtype : str = None + + def __init__(self, filename=None, coords=None, ref=None): + self.name: str = None + self.reference: str = None + self.coordinates: ndarray = None + self.c: ndarray = None + self.camber: ndarray = None + self.thickness: float = None + self.percentthick: float = None + self.maxthick: float = None + self.TEtype: str = None if filename: # currently assuming XML format # Use the base filename as the airfoil name - #TODO fix finding fn - __,fn,__ = os.path.split(filename)[0],os.path.splitext(os.path.split(filename)[1])[0],os.path.splitext(os.path.split(filename)[1])[1] + # TODO fix finding fn + __, fn, __ = ( + os.path.split(filename)[0], + os.path.splitext(os.path.split(filename)[1])[0], + os.path.splitext(os.path.split(filename)[1])[1], + ) self.name = fn # Open the file and read the entire contents - with open(filename,'r') as f: + with open(filename, "r") as f: file_contents = f.read().splitlines() self.read_xml(file_contents) @@ -76,30 +82,29 @@ def __init__(self, filename = None, coords = None, ref = None): else: try: # check if ref is a string and coords an array - ref*5 - coords*5 + ref * 5 + coords * 5 # if so, assign as attributes self.name = ref self.reference = ref self.coordinates = coords except TypeError: # otherwise, provide default AF profile - self.name = 'circular' - self.reference = '' - theta = np.linspace(0,np.pi,50) - theta = np.concatenate((theta,[np.pi],theta[1:-1])) + self.name = "circular" + self.reference = "" + theta = np.linspace(0, np.pi, 50) + theta = np.concatenate((theta, [np.pi], theta[1:-1])) xcoord = 0.5 * np.cos(-1 * theta) + 0.5 ycoord = 0.5 * np.sin(-1 * theta) - self.coordinates = np.stack((xcoord,ycoord),axis=1) + self.coordinates = np.stack((xcoord, ycoord), axis=1) self.manageTE() - # Properties @property def x(self): - """Horizontal axis of Airfoil shape coordinates Working - clockwise starting from the TE to the LE and back to the TE. - LE must be at (1,0) and TE at (0,0). + """Horizontal axis of Airfoil shape coordinates Working + clockwise starting from the TE to the LE and back to the TE. + LE must be at (1,0) and TE at (0,0). Needed only by ``AirfoilDef.plot`` TODO docstring @@ -108,13 +113,12 @@ def x(self): xcoord = np.concatenate([[cc[-1]], np.flipud(cc), cc[1:], [cc[-1]]]) return xcoord - - #TODO check this func + # TODO check this func @property def y(self): - """Vertical axis of Airfoil shape coordinates - Working clockwise starting from the TE to the LE and back to the TE. - LE must be at (1,0) and TE at (0,0). + """Vertical axis of Airfoil shape coordinates + Working clockwise starting from the TE to the LE and back to the TE. + LE must be at (1,0) and TE at (0,0). Needed only by ``AirfoilDef.plot`` TODO docstring @@ -124,20 +128,18 @@ def y(self): ycoord = np.concatenate(([0], np.flipud(hp), lp[1:], [0])) return ycoord - ### IO def read_xml(self, filename): """ TODO docstring """ - xml_to_airfoil(self,filename) + xml_to_airfoil(self, filename) return self - def manageTE(self): """TODO docstring - + Parameters ---------- @@ -145,30 +147,31 @@ def manageTE(self): ------- """ - unitNormals=getAirfoilNormals(self.coordinates) - angleChange=getAirfoilNormalsAngleChange(unitNormals) - discontinuities = np.flatnonzero(angleChange>45) - + unitNormals = getAirfoilNormals(self.coordinates) + angleChange = getAirfoilNormalsAngleChange(unitNormals) + discontinuities = np.flatnonzero(angleChange > 45) + if discontinuities.shape[0] == 2: - #Flatback piece in airfoil. delete for resampling - if (discontinuities[0] == 0) & (discontinuities[1] == angleChange.shape[0] - 1): + # Flatback piece in airfoil. delete for resampling + if (discontinuities[0] == 0) & ( + discontinuities[1] == angleChange.shape[0] - 1 + ): indexToDelete = 0 else: indexToDelete = angleChange.shape[0] - 1 - newcoords = np.delete(self.coordinates,indexToDelete,0) + newcoords = np.delete(self.coordinates, indexToDelete, 0) self.coordinates = newcoords - + # ddof set to 1 to match default matlab behavior - if np.std(angleChange,ddof=1) < 1: - self.TEtype='round' + if np.std(angleChange, ddof=1) < 1: + self.TEtype = "round" return self - ### Geometry - - def resample(self,n_samples: int = 150,spacing: str = 'cosine'): + + def resample(self, n_samples: int = 150, spacing: str = "cosine"): """Resample airfoil coordinates - + Parameters ---------- n_samples : int @@ -176,43 +179,42 @@ def resample(self,n_samples: int = 150,spacing: str = 'cosine'): spacing : str spacing method for new samples spacing = 'auto' | 'half-cosine' | 'cosine' - + Returns ------- None - + Example ------- AirfoilDef.resample af.resample(n_samples,spacing) af.resample(200,'half-cosine'); """ - af_out = resampleAirfoil(self.coordinates,n_samples,spacing) - xcoord = af_out[:,0] - ycoord = af_out[:,1] + af_out = resampleAirfoil(self.coordinates, n_samples, spacing) + xcoord = af_out[:, 0] + ycoord = af_out[:, 1] # self(k).percentthick = (max(ycoord) - min(ycoord))*100; - self.c, self.camber, self.thickness = computeCamberAndThickness(xcoord,ycoord) + self.c, self.camber, self.thickness = computeCamberAndThickness(xcoord, ycoord) m = np.max(self.thickness) i = np.argmax(self.thickness) self.percentthick = m * 100 self.maxthick = self.c[i] - if not self.TEtype or ('round' not in self.TEtype): + if not self.TEtype or ("round" not in self.TEtype): if np.abs(self.thickness[-1]) < 1e-4: - self.TEtype = 'sharp' + self.TEtype = "sharp" else: - self.TEtype = 'flatback' + self.TEtype = "flatback" return self - - #currently unused + # currently unused def adjustTE(self, tet, tes, onset): """TODO docstring - + Parameters ---------- tet : the amount of TE thickness to add - tes : + tes : the slope of the added thickness profile at TE, defaults to 5/3 * TE_thick onset : @@ -231,35 +233,33 @@ def adjustTE(self, tet, tes, onset): """ if not tes: - tes = 5 / 3 * tet # slope of TE adjustment; 5/3*tet is "natural" - + tes = 5 / 3 * tet # slope of TE adjustment; 5/3*tet is "natural" + if not onset: USEMAXTHICK = True else: - USEMAXTHICK = False # use the given 'onset' instead + USEMAXTHICK = False # use the given 'onset' instead # continuous first & second derivatives at 'onset' # maintain second & third derivative at mc==1 (TE) # adjust slope at mc==1 (TE) by tes - A = np.array([[1,1,1,1],[3,4,5,6],[6,12,20,30],[6,24,60,120]]) - d = np.array([[tet],[tes],[0],[0]]) - p = np.linalg.solve(A,d) + A = np.array([[1, 1, 1, 1], [3, 4, 5, 6], [6, 12, 20, 30], [6, 24, 60, 120]]) + d = np.array([[tet], [tes], [0], [0]]) + p = np.linalg.solve(A, d) if USEMAXTHICK: onset = self.maxthick - mc = np.amax((self.c - onset) / (1 - onset),0) - temod = np.array([mc ** 3,mc ** 4,mc ** 5,mc ** 6]) * p + mc = np.amax((self.c - onset) / (1 - onset), 0) + temod = np.array([mc**3, mc**4, mc**5, mc**6]) * p self.thickness = self.thickness + temod return self + ### Plotting - ### Plotting - - def plotAirfoil(self): - """ Plot airfoil - """ + def plotAirfoil(self): + """Plot airfoil""" fig, ax = plt.subplots() # ax[0].plot(self.x,self.y,'.-') - ax.plot(self.coordinates[:,0],self.coordinates[:,1],'.-') - ax.plot(self.c,self.camber) + ax.plot(self.coordinates[:, 0], self.coordinates[:, 1], ".-") + ax.plot(self.c, self.camber) # mtx = self.maxthick * np.array([1,1]) # kn = find(self.c >= self.maxthick,1) # mty = self.camber(kn) + self.thickness(kn) * np.array([0.5,- 0.5]) @@ -271,7 +271,8 @@ def plotAirfoil(self): ### Helper functions -def resampleAirfoil(af_in, n_samples, spacing): + +def resampleAirfoil(af_in, n_samples, spacing): """Resample airfoil coordinates Parameters @@ -281,24 +282,24 @@ def resampleAirfoil(af_in, n_samples, spacing): n_samples : int number of points to be created around surface spacing : string - spacing routine to be used: + spacing routine to be used: 'cosine', 'half-cosine', 'constant', 'auto' - + Returns ------- - af_out : array + af_out : array array containing n_samples+1 airfoil points - + Cosine spacing: puts higher density of points at both LE and TE; constant arc length point spacing around a perfect circle. Half-cosine spacing: puts higher density of points at LE and lesser density of points at TE constant spacing: constant spacing of points along chord line auto: choose between Cosine and Half-cosine based on steepness of TE - + Assumes coordinates begin at trailing edge (x,y)=(1,0) and trace out the HP surface, then the LP surface. - + Flatback airfoil inputs are designated by ensuring the following: Point #1 = (1,0) (center of trailing edge) Point #2 = (1,-y) where y~=0 (HP corner of flatback trailing edge) @@ -327,102 +328,118 @@ def resampleAirfoil(af_in, n_samples, spacing): if af_in.shape[1] != 2: tmpN = af_in.shape[0] tmpM = af_in.shape[1] - warnings.warn('af_in array was defined in '+ str(tmpN)+'x'+str(tmpM)+' array. Automatically changing it to an '+str(tmpM)+'x'+str(tmpN)+' array.') + warnings.warn( + "af_in array was defined in " + + str(tmpN) + + "x" + + str(tmpM) + + " array. Automatically changing it to an " + + str(tmpM) + + "x" + + str(tmpN) + + " array." + ) af_in = np.transpose(af_in) - + # End error checking routines xy = af_in - - #Calculate arc length of xy points clockwise from trailing edge + + # Calculate arc length of xy points clockwise from trailing edge n_points = xy.shape[0] t = np.zeros(n_points) - for i in range(1,n_points): - #formula: t(i) = hypot( x(i)-x(i-1) , y(i)-y(i-1) ) + t(i-1); - t[i] = np.hypot(xy[i,0] - xy[i-1,0],xy[i,1] - xy[i-1,1]) + t[i-1] - - #Get total arc length + for i in range(1, n_points): + # formula: t(i) = hypot( x(i)-x(i-1) , y(i)-y(i-1) ) + t(i-1); + t[i] = np.hypot(xy[i, 0] - xy[i - 1, 0], xy[i, 1] - xy[i - 1, 1]) + t[i - 1] + + # Get total arc length arc_length = t[-1] - #Spline airfoil with many points + # Spline airfoil with many points oversample = 10000 delta = arc_length / (oversample - 1) # The manypoints range from 0 to total arc_length, adding a bit on each # side so that flatbacks extend past x=1 after rotation corrections. - manypoints = np.linspace(- delta,arc_length + delta,num = oversample+2) - spline_type = 'pchip' - if (np.array(['linear','pchip','spline']) == spline_type).any(): - xxyy = interpolator_wrap(t,xy,manypoints,spline_type) + manypoints = np.linspace(-delta, arc_length + delta, num=oversample + 2) + spline_type = "pchip" + if (np.array(["linear", "pchip", "spline"]) == spline_type).any(): + xxyy = interpolator_wrap(t, xy, manypoints, spline_type) else: - print('Airfoil oversampling algorithm specified is not an available option. Defaulting to "spline".' % ()) - xxyy = interpolator_wrap(t,xy,manypoints,'spline') - + print( + 'Airfoil oversampling algorithm specified is not an available option. Defaulting to "spline".' + % () + ) + xxyy = interpolator_wrap(t, xy, manypoints, "spline") + # Normalize the airfoil: # correct rotation so that LE is at (0,0) and TE is at (1,0). # jcb: Technially, the LE is at the point of max curvature, but that # definition can produce situations that break the interpolation step. # Instead, we define the LE as the point that is the furthest distance from # the TE. - xyTE = np.array((np.mean([xxyy[0,0],xxyy[-1,0]]), np.mean([xxyy[0,1],xxyy[-1,1]]))) - xxyy = xxyy - np.tile(xyTE,(xxyy.shape[0],1)) - rays = np.hypot(xxyy[:,0],xxyy[:,1]) # distance of each point from the TE + xyTE = np.array( + (np.mean([xxyy[0, 0], xxyy[-1, 0]]), np.mean([xxyy[0, 1], xxyy[-1, 1]])) + ) + xxyy = xxyy - np.tile(xyTE, (xxyy.shape[0], 1)) + rays = np.hypot(xxyy[:, 0], xxyy[:, 1]) # distance of each point from the TE max_ray = np.max(rays) max_point = np.argmax(rays) - ray_angle = np.arctan2(xxyy[max_point,1],- xxyy[max_point,0]) - xxyy = rotate2d(xxyy,ray_angle) - xxyy = xxyy / max_ray + np.tile(np.array([1,0]),(xxyy.shape[0],1)) - - #Separate into high and low pressure surfaces - HP = xxyy[0:max_point+1,:] # HP points progress from TE (x=1) to LE (x=0) - LP = xxyy[max_point:,:] # LP points progress from LE (x=0) to TE (x=1) - + ray_angle = np.arctan2(xxyy[max_point, 1], -xxyy[max_point, 0]) + xxyy = rotate2d(xxyy, ray_angle) + xxyy = xxyy / max_ray + np.tile(np.array([1, 0]), (xxyy.shape[0], 1)) + + # Separate into high and low pressure surfaces + HP = xxyy[0 : max_point + 1, :] # HP points progress from TE (x=1) to LE (x=0) + LP = xxyy[max_point:, :] # LP points progress from LE (x=0) to TE (x=1) + # if 'auto', determine which spacing algorithm to use - if spacing == 'auto': - dx = xxyy[1,0] - xxyy[2,0] + if spacing == "auto": + dx = xxyy[1, 0] - xxyy[2, 0] # If x-spacing of the oversampled data at the TE is below a threshold, # assume that cosine spacing would be best choice for the profile. if dx < 1 / (10 * oversample): - spacing = 'cosine' + spacing = "cosine" else: - spacing = 'half-cosine' - - #Calculate x points based on spacing algorithm specified + spacing = "half-cosine" + + # Calculate x points based on spacing algorithm specified n_panels = int(np.trunc(n_samples / 2) - 1) - #NOTE might need to workshop switch cases here - if 'cosine' == spacing: - beta = np.linspace(0,np.pi,n_panels + 1) + # NOTE might need to workshop switch cases here + if "cosine" == spacing: + beta = np.linspace(0, np.pi, n_panels + 1) x_fwd = 0.5 * (1 - np.cos(beta)) - elif 'half-cosine' == spacing: - beta = np.linspace(0,np.pi / 2,n_panels + 1) - x_fwd = (1 - np.cos(beta)) - elif 'constant' == spacing: - x_fwd = np.linspace(0,1,n_panels + 1) + elif "half-cosine" == spacing: + beta = np.linspace(0, np.pi / 2, n_panels + 1) + x_fwd = 1 - np.cos(beta) + elif "constant" == spacing: + x_fwd = np.linspace(0, 1, n_panels + 1) else: - raise Exception('Resampling algorithm specified is not an available option') - - x_fwd = x_fwd # make x_fwd a column vector - x_rev = np.flipud(x_fwd) # x_rev values are nominally 1 to 0 - - #Calculate interpolated airfoil points. For sharp trailing edge airfoils, - #the trailing edge point is not repeated - #NOTE need to address 'extrap' option used in matlab code below - LP_new = np.stack((x_fwd,interpolator_wrap(LP[:,0],LP[:,1],x_fwd)),axis=1) - HP_new = np.stack((x_rev,interpolator_wrap(HP[:,0],HP[:,1],x_rev)),axis=1) - + raise Exception("Resampling algorithm specified is not an available option") + + x_fwd = x_fwd # make x_fwd a column vector + x_rev = np.flipud(x_fwd) # x_rev values are nominally 1 to 0 + + # Calculate interpolated airfoil points. For sharp trailing edge airfoils, + # the trailing edge point is not repeated + # NOTE need to address 'extrap' option used in matlab code below + LP_new = np.stack((x_fwd, interpolator_wrap(LP[:, 0], LP[:, 1], x_fwd)), axis=1) + HP_new = np.stack((x_rev, interpolator_wrap(HP[:, 0], HP[:, 1], x_rev)), axis=1) + # Make sure that LE point is at (0,0) - HP_new[-1,:] = np.array([0,0]) - xyTE = np.array([1,0]) + HP_new[-1, :] = np.array([0, 0]) + xyTE = np.array([1, 0]) - #Assemble the two curves into a continuous line - af_out = np.concatenate((xyTE.reshape(1,-1),HP_new, - LP_new[1:,:],xyTE.reshape(1,-1)),axis=0) + # Assemble the two curves into a continuous line + af_out = np.concatenate( + (xyTE.reshape(1, -1), HP_new, LP_new[1:, :], xyTE.reshape(1, -1)), axis=0 + ) return af_out - + def getAirfoilNormals(coordinates): - """Method finds which airfoil is flatback. - + """Method finds which airfoil is flatback. + If points are placed in flatback region, they are removed for good resampling results. Currently this removal only works @@ -438,22 +455,24 @@ def getAirfoilNormals(coordinates): ------- """ nPoints = coordinates.shape[0] - unitNormals = np.zeros((nPoints - 1,2)) - for iPoint in range(0,nPoints - 1): - currentPoint = coordinates[iPoint,:] - nextPoint = coordinates[iPoint + 1,:] - r = nextPoint - currentPoint # Postion vector from currentPoint to nextPoint - if (np.abs(r[0]) + np.abs(r[1])) != 0: # Skip if points are coincedint - unitNorm = np.transpose(sp.linalg.null_space(r.reshape(1,-1))) - crossProduct = np.cross(np.concatenate((r,[0])),np.concatenate((unitNorm.reshape(-1),[0]))) + unitNormals = np.zeros((nPoints - 1, 2)) + for iPoint in range(0, nPoints - 1): + currentPoint = coordinates[iPoint, :] + nextPoint = coordinates[iPoint + 1, :] + r = nextPoint - currentPoint # Postion vector from currentPoint to nextPoint + if (np.abs(r[0]) + np.abs(r[1])) != 0: # Skip if points are coincedint + unitNorm = np.transpose(sp.linalg.null_space(r.reshape(1, -1))) + crossProduct = np.cross( + np.concatenate((r, [0])), np.concatenate((unitNorm.reshape(-1), [0])) + ) if crossProduct[2] < 0: - unitNorm = - unitNorm - unitNormals[iPoint,:] = unitNorm + unitNorm = -unitNorm + unitNormals[iPoint, :] = unitNorm else: - unitNormals[iPoint,:] = np.array([np.nan,np.nan]) - + unitNormals[iPoint, :] = np.array([np.nan, np.nan]) + return unitNormals - + # for iPoint=1:nPoints # text(coordinates(iPoint,1),coordinates(iPoint,2),num2str(iPoint),'Color','b') # end @@ -464,34 +483,34 @@ def getAirfoilNormalsAngleChange(unitNormals): TODO: Docstring TODO: Test """ - #Find the angle changes between adjacent unit vectors + # Find the angle changes between adjacent unit vectors nPoints = unitNormals.shape[0] angleChange = np.zeros(nPoints) - for iVector in range(0,nPoints-1): - currentVector = unitNormals[iVector,:] - nextVector = unitNormals[iVector + 1,:] - idotted = np.dot(currentVector,nextVector) + for iVector in range(0, nPoints - 1): + currentVector = unitNormals[iVector, :] + nextVector = unitNormals[iVector + 1, :] + idotted = np.dot(currentVector, nextVector) angleChange[iVector] = np.rad2deg(np.arccos(idotted)) - - #angle change between last point and first point - currentVector = unitNormals[-1,:] - nextVector = unitNormals[0,:] + + # angle change between last point and first point + currentVector = unitNormals[-1, :] + nextVector = unitNormals[0, :] dotted = np.dot(currentVector, nextVector) angleChange[-1] = np.rad2deg(np.arccos(dotted)) return angleChange -def rotate2d(xyin, angle): +def rotate2d(xyin, angle): """ NOTE: might be able to use affinetrans module here TODO: Docstring TODO: Test """ - xyout1 = np.cos(angle) * xyin[:,0] - np.sin(angle) * xyin[:,1] - xyout2 = np.sin(angle) * xyin[:,0] + np.cos(angle) * xyin[:,1] - xyout = np.stack((xyout1,xyout2),axis=1) + xyout1 = np.cos(angle) * xyin[:, 0] - np.sin(angle) * xyin[:, 1] + xyout2 = np.sin(angle) * xyin[:, 0] + np.cos(angle) * xyin[:, 1] + xyout = np.stack((xyout1, xyout2), axis=1) return xyout - + def computeCamberAndThickness(x, y): """ @@ -502,34 +521,33 @@ def computeCamberAndThickness(x, y): LE = int(np.trunc(n_samples / 2)) xhp = x[LE:0:-1] xlp = x[LE:-1] - assert len(xhp) == len(xlp),'Error computing camber and thickness.' - #NOTE unsure of how to translate `eps` from matlab -kb - #assert(sum(xhp - xlp) < np.finfo(1.0),'Upper and lower surface x-coordinates must align.') + assert len(xhp) == len(xlp), "Error computing camber and thickness." + # NOTE unsure of how to translate `eps` from matlab -kb + # assert(sum(xhp - xlp) < np.finfo(1.0),'Upper and lower surface x-coordinates must align.') yhp = y[LE:0:-1] ylp = y[LE:-1] c = xlp camber = (yhp + ylp) / 2 thickness = np.abs(ylp - yhp) - return c,camber,thickness - + return c, camber, thickness + def readAirfoilColumns(filecontents): - """ - """ + """ """ # All of these file formats assume that the # LE is at (0,0) and the TE is at (1,0) - raw = re.findall('[^\n\r]*',filecontents) # get lines - + raw = re.findall("[^\n\r]*", filecontents) # get lines + Nraw = np.asarray(raw).size kh = 1 # index counter for header lines kt = 1 # index counter for tables kr = 1 # index counter for rows header = [] table = [] - for k in range(0,Nraw+1): + for k in range(0, Nraw + 1): # try to read pairs of coordinates - pair = cell2mat(textscan(raw[k],'%f %f')) - if len(pair)==0: + pair = cell2mat(textscan(raw[k], "%f %f")) + if len(pair) == 0: if kt > 1 or kr > 1: # then move to a new table kt = kt + 1 @@ -540,52 +558,52 @@ def readAirfoilColumns(filecontents): kh = kh + 1 else: # place coordinate pair in table - table[kt][kr,:] = pair + table[kt][kr, :] = pair kr = kr + 1 - + if np.asarray(table).size == 1: # assume points wrap around either LE or TE coords = table[0] - if coords[0,0] < 0 or coords[0,0] > 1: - raise Exception('First x-coordinate not in range 0..1') - dc = np.diff(coords[:,1]) + if coords[0, 0] < 0 or coords[0, 0] > 1: + raise Exception("First x-coordinate not in range 0..1") + dc = np.diff(coords[:, 1]) dc = dc * np.sign(dc[1]) - k = np.find(dc < 0,1) - sideA = coords[0:k+1:2] + k = np.find(dc < 0, 1) + sideA = coords[0 : k + 1 : 2] sideB = coords[k:-1:2] if np.mean(sideA) > np.mean(sideB): # LP (upper) surface given first, so flipud # disp('LP first'); coords = np.flipud(coords) - k = coords.shape[1-1] - k + 1 - if (1 - coords[0,0]) > 0.5: + k = coords.shape[1 - 1] - k + 1 + if (1 - coords[0, 0]) > 0.5: # coordinates begin at LE and wrap around TE # disp('TE wrap'); - if coords[0,:]==coords[-1,:]: - coords[-1,:] = [] - coords = np.concatenate([ - [coords[k,0,-1,:]], - [coords[-1,k,-1,:]]]) + if coords[0, :] == coords[-1, :]: + coords[-1, :] = [] + coords = np.concatenate([[coords[k, 0, -1, :]], [coords[-1, k, -1, :]]]) else: if np.asarray(table).size == 3: # assume "Lednicer's" format - #(see http://www.ae.illinois.edu/m-selig/ads.html) + # (see http://www.ae.illinois.edu/m-selig/ads.html) npoints = table[0] lp = table[2] hp = table[3] - if npoints.shape[1-1] != 1: - raise Exception('Format similar to "Lednicers", but more than one row found for table sizes') - if hp[0,:]==lp[0,:]: - lp[1,:] = [] - coords = np.concatenate([[np.flipud(hp)],[lp]]) + if npoints.shape[1 - 1] != 1: + raise Exception( + 'Format similar to "Lednicers", but more than one row found for table sizes' + ) + if hp[0, :] == lp[0, :]: + lp[1, :] = [] + coords = np.concatenate([[np.flipud(hp)], [lp]]) else: - raise Exception('File format not recognized') - + raise Exception("File format not recognized") + if np.asarray(header).size >= 1: reference = header[0] - for k in np.arange(2,np.asarray(header).size+1).reshape(-1): - reference = '%s\n%s' % (reference,header[k]) + for k in np.arange(2, np.asarray(header).size + 1).reshape(-1): + reference = "%s\n%s" % (reference, header[k]) else: - reference = '' - - return coords,reference + reference = "" + + return coords, reference diff --git a/src/pynumad/objects/Blade.py b/src/pynumad/objects/Blade.py index 0a812d1..e79644a 100644 --- a/src/pynumad/objects/Blade.py +++ b/src/pynumad/objects/Blade.py @@ -14,7 +14,8 @@ # for type hints from numpy import ndarray -class Blade(): + +class Blade: """BladeDef A class definition for wind & water turbine blades. Parameters @@ -23,79 +24,79 @@ class Blade(): Attributes ---------- - aerocenter : array + aerocenter : array Aerodynamic center of airfoil (used only by NuMAD->FAST) - chord : array - Chord distribution [m] - chordoffset : array + chord : array + Chord distribution [m] + chordoffset : array Chordwise offset (in addition to natural offset) - components : list + components : list Blade components such as spar, panels, etc., refer to ``ComponentDef`` - degreestwist : array + degreestwist : array Twist distribution [degrees] - ispan : array + ispan : array Spanwise locations of interpolated output - leband : float + leband : float Location of keypoint a - materials : list + materials : list Material properties, refer to ``MaterialDef`` - mesh : float + mesh : float Approximate element edge size for FE model [m] - percentthick : array + percentthick : array Percent thickness of airfoil [%] - prebend : array + prebend : array Blade prebend, reference axis location along x2 [m] - span : array + span : array Spanwise location of distributed properties [m] - sparcapoffset : array + sparcapoffset : array (Does Nothing) - sparcapwidth : array - Locations of keypoints b & c, defines distance - between keypoints b & c [mm]. First entry is the HP spar cap. + sparcapwidth : array + Locations of keypoints b & c, defines distance + between keypoints b & c [mm]. First entry is the HP spar cap. Second entry is the LP spar cap - stations : list - Blade Stations, define the camber and thickness along the blade, + stations : list + Blade Stations, define the camber and thickness along the blade, refer to ``StationDef`` - sweep : array - Blade Sweep, Reference axis location along x1 [m] + sweep : array + Blade Sweep, Reference axis location along x1 [m] self.teband : float - idegreestwist : array + idegreestwist : array interpolated twist - ichord : array + ichord : array interpolated chord - ipercentthick : array + ipercentthick : array interpolated thickness self.ic : array self.icamber : array self.ithickness : array - ichordoffset : array + ichordoffset : array interpolated offset - iaerocenter : array + iaerocenter : array interpolated aerocenter - isweep : array + isweep : array interpolated sweep - iprebend : array + iprebend : array interpolated prebend - xoffset : array + xoffset : array natural offset - profiles : array + profiles : array normalized airfoil profiles - geometry : array + geometry : array actual x,y,z geometry - arclength : array + arclength : array surface distance from L.E. - cpos : array + cpos : array chordwise position - LEindex : int + LEindex : int HParcx0 : array LParcx0 : array keylabels : list - keypoints : array - keyarcs : array - keycpos : array - keyareas : array - LEbond : array - TEbond : array + keypoints : array + keyarcs : array + keycpos : array + keyareas : array + LEbond : array + TEbond : array webindices : list webpoints : list webarcs : list @@ -105,13 +106,13 @@ class Blade(): webbonds : list bom : dict bomIndices : dict - stacks : array + stacks : array array of StackDef - swstacks : list + swstacks : list contains StackDefs - matdb : dict + matdb : dict Composite definition for each region at each station - TEtype : list + TEtype : list trailing edge type; assigned in updateKeypoints shearweb : list bomPlot : dict @@ -122,106 +123,126 @@ class Blade(): ansys : dict generate ANSYS settings write_airfoils : bool - + Example ------- blade = BladeDef() """ - def __init__(self, filename: str = None): - - self.aerocenter : ndarray = None - self.chord : ndarray = None - self.chordoffset : ndarray = None - self.components : list = None - self.degreestwist : ndarray = None - self.ispan : ndarray = None - self.leband : float = None - self.materials : list = None - self.mesh : float = 0.45 - self.percentthick : ndarray = None - self.prebend : ndarray = None - self.span : ndarray = None - self.sparcapoffset : ndarray = None - self.sparcapwidth : ndarray = None - self.stations : list = None - self.sweep : ndarray = None - self.teband : float = None - self.idegreestwist : ndarray = None - self.ichord : ndarray = None - self.ipercentthick : ndarray = None - self.ic : ndarray = None - self.icamber : ndarray = None - self.ithickness : ndarray = None - self.ichordoffset : ndarray = None - self.iaerocenter : ndarray = None - self.isweep : ndarray = None - self.iprebend : ndarray = None - self.xoffset : ndarray = None - self.profiles : ndarray = None - self.geometry : ndarray = None - self.arclength : ndarray = None - self.cpos : ndarray = None - self.LEindex : int = None - self.HParcx0 : ndarray = None - self.LParcx0 : ndarray = None - self.keylabels : list = None - self.keypoints : ndarray = None - self.keyarcs : ndarray = None - self.keycpos : ndarray = None - self.keyareas : ndarray = None - self.LEbond : ndarray = None - self.TEbond : ndarray = None - self.webindices : list = None - self.webpoints : list = None - self.webarcs : list = None - self.webcpos : list = None - self.webareas : list = None - self.webwidth : list = None - self.webbonds : list = None - self.bom : dict = None - self.bomIndices : dict = None - self.stacks : ndarray = None - self.swstacks : list = None - self.hgGeometry : list = None - self.hgKeypoints : list = None - self.matdb : dict = None - self.TEtype : list = None - self.shearweb : list = None - self.bomPlot : dict = { - 'kLayer':1, 'hgLinesHP':[], 'hgLinesLP':[], - 'hgPatchHP':[], 'hgPatchLP':[], - 'uisliderHP':[], 'uisliderLP':[], - 'hTitleHP':[], 'hTitleLP':[] - } - self.job_name : str = 'numad.nmd' - self.paths : dict = { - 'job':'', 'numad':'', 'precomp':'', 'bmodes':'', - 'ansys':'','batch_run':0 - } - self.ansys : dict = { - 'BoundaryCondition':'','ElementSystem':'', - 'MultipleLayerBehavior':'', 'meshing':'','smartsize':[], - 'elementsize':[], 'shell7gen':[], - 'dbgen':[], 'FailureCriteria':[] - } - - #properties - self._naturaloffset = 1 # 1 = offset by max thickness location, 0= do not offset to max thickness - self._rotorspin = 1 # Rotor Spin, 1= CW rotation looking downwind, -1= CCW rotation - self._swtwisted = 0 # Shear Web, 0 = planar shear webs, 1= shear webs twisted by blade twist + def __init__(self, filename: str = None): + self.aerocenter: ndarray = None + self.chord: ndarray = None + self.chordoffset: ndarray = None + self.components: list = None + self.degreestwist: ndarray = None + self.ispan: ndarray = None + self.leband: float = None + self.materials: list = None + self.mesh: float = 0.45 + self.percentthick: ndarray = None + self.prebend: ndarray = None + self.span: ndarray = None + self.sparcapoffset: ndarray = None + self.sparcapwidth: ndarray = None + self.stations: list = None + self.sweep: ndarray = None + self.teband: float = None + self.idegreestwist: ndarray = None + self.ichord: ndarray = None + self.ipercentthick: ndarray = None + self.ic: ndarray = None + self.icamber: ndarray = None + self.ithickness: ndarray = None + self.ichordoffset: ndarray = None + self.iaerocenter: ndarray = None + self.isweep: ndarray = None + self.iprebend: ndarray = None + self.xoffset: ndarray = None + self.profiles: ndarray = None + self.geometry: ndarray = None + self.arclength: ndarray = None + self.cpos: ndarray = None + self.LEindex: int = None + self.HParcx0: ndarray = None + self.LParcx0: ndarray = None + self.keylabels: list = None + self.keypoints: ndarray = None + self.keyarcs: ndarray = None + self.keycpos: ndarray = None + self.keyareas: ndarray = None + self.LEbond: ndarray = None + self.TEbond: ndarray = None + self.webindices: list = None + self.webpoints: list = None + self.webarcs: list = None + self.webcpos: list = None + self.webareas: list = None + self.webwidth: list = None + self.webbonds: list = None + self.bom: dict = None + self.bomIndices: dict = None + self.stacks: ndarray = None + self.swstacks: list = None + self.hgGeometry: list = None + self.hgKeypoints: list = None + self.matdb: dict = None + self.TEtype: list = None + self.shearweb: list = None + self.bomPlot: dict = { + "kLayer": 1, + "hgLinesHP": [], + "hgLinesLP": [], + "hgPatchHP": [], + "hgPatchLP": [], + "uisliderHP": [], + "uisliderLP": [], + "hTitleHP": [], + "hTitleLP": [], + } + self.job_name: str = "numad.nmd" + self.paths: dict = { + "job": "", + "numad": "", + "precomp": "", + "bmodes": "", + "ansys": "", + "batch_run": 0, + } + self.ansys: dict = { + "BoundaryCondition": "", + "ElementSystem": "", + "MultipleLayerBehavior": "", + "meshing": "", + "smartsize": [], + "elementsize": [], + "shell7gen": [], + "dbgen": [], + "FailureCriteria": [], + } + + # properties + self._naturaloffset = ( + 1 # 1 = offset by max thickness location, 0= do not offset to max thickness + ) + self._rotorspin = ( + 1 # Rotor Spin, 1= CW rotation looking downwind, -1= CCW rotation + ) + self._swtwisted = ( + 0 # Shear Web, 0 = planar shear webs, 1= shear webs twisted by blade twist + ) try: - if 'yaml' in filename or 'yml' in filename: + if "yaml" in filename or "yml" in filename: self.read_yaml(filename) - elif 'xls' in filename or 'xlsx' in filename: + elif "xls" in filename or "xlsx" in filename: self.read_excel(filename) else: - raise Exception('Unknown filetype. Currently supported inputs are excel and yaml files.') + raise Exception( + "Unknown filetype. Currently supported inputs are excel and yaml files." + ) # To handle when filename == None except TypeError: pass - @property def naturaloffset(self): """ @@ -229,18 +250,16 @@ def naturaloffset(self): """ return self._naturaloffset - @naturaloffset.setter def naturaloffset(self, new_naturaloffset): """ TODO docstring """ - if not (new_naturaloffset==0 or new_naturaloffset==1): - raise Exception('naturaloffset must be 0 or 1') + if not (new_naturaloffset == 0 or new_naturaloffset == 1): + raise Exception("naturaloffset must be 0 or 1") else: self._naturaloffset = new_naturaloffset - @property def rotorspin(self): """ @@ -248,18 +267,16 @@ def rotorspin(self): """ return self._rotorspin - @rotorspin.setter - def rotorspin(self,new_rotorspin): + def rotorspin(self, new_rotorspin): """ TODO docstring - """ - if not (new_rotorspin==1 or new_rotorspin==-1): - raise Exception('rotorspin must be 1 (cw) or -1 (ccw)') + """ + if not (new_rotorspin == 1 or new_rotorspin == -1): + raise Exception("rotorspin must be 1 (cw) or -1 (ccw)") else: self._rotorspin = new_rotorspin - @property def swtwisted(self): """ @@ -268,31 +285,29 @@ def swtwisted(self): return self._swtwisted @swtwisted.setter - def swtwisted(self,new_swtwisted): + def swtwisted(self, new_swtwisted): """ TODO docstring - """ - if not (new_swtwisted==0 or new_swtwisted==1): - raise Exception('swtwisted must be 0 or 1') + """ + if not (new_swtwisted == 0 or new_swtwisted == 1): + raise Exception("swtwisted must be 0 or 1") else: self._swtwisted = new_swtwisted - ### Magic methods def __str__(self): - attributes = '' + attributes = "" for attr_name, attr_value in vars(self).items(): if isinstance(attr_value, list): - attributes += f'{attr_name}={len(attr_value)}, ' + attributes += f"{attr_name}={len(attr_value)}, " elif isinstance(attr_value, np.ndarray): - attributes += f'{attr_name}={attr_value.shape}, ' + attributes += f"{attr_name}={attr_value.shape}, " else: - attributes += f'{attr_name}={attr_value}, ' - return f'Blade with {attributes[:-2]}' + attributes += f"{attr_name}={attr_value}, " + return f"Blade with {attributes[:-2]}" - - ### IO + ### IO def read_yaml(self, filename): """Populate blade attributes with yaml file data @@ -312,7 +327,6 @@ def read_yaml(self, filename): yaml_to_blade(self, filename) return self - def read_excel(self, filename: str): """Populate blade attributes with excel file data @@ -331,7 +345,6 @@ def read_excel(self, filename: str): excel_to_blade(self, filename) return self - ### Update methods def updateBlade(self): @@ -343,127 +356,152 @@ def updateBlade(self): self.updateBOM() return self - def updateGeometry(self): - """This method updates the interpolated blade parameters - """ - + """This method updates the interpolated blade parameters""" + # update the interpolated station profiles nStations = len(self.stations) if nStations > 0: nPoints = len(self.stations[0].airfoil.c) else: raise Exception( - 'BladeDef must have at least one station before updating geometry.') - + "BladeDef must have at least one station before updating geometry." + ) + # add some error checking -- first station must be at blade # root to prevent extrapolation - assert self.stations[0].spanlocation==0, 'first station must be at the blade root' - + assert ( + self.stations[0].spanlocation == 0 + ), "first station must be at the blade root" + # Collect parameter tables from the stations. - spanlocation = np.array([self.stations[i].spanlocation for i in range(len(self.stations))]) - c = np.zeros((nPoints,nStations)) - camber = np.zeros((nPoints,nStations)) - thickness = np.zeros((nPoints,nStations)) - tetype = [None]*nStations - for k in range(0,nStations): + spanlocation = np.array( + [self.stations[i].spanlocation for i in range(len(self.stations))] + ) + c = np.zeros((nPoints, nStations)) + camber = np.zeros((nPoints, nStations)) + thickness = np.zeros((nPoints, nStations)) + tetype = [None] * nStations + for k in range(0, nStations): ck = self.stations[k].airfoil.c if len(ck) != nPoints: - raise Exception( - 'Station airfoils must have same number of samples.') - c[:,k] = ck - camber[:,k] = self.stations[k].airfoil.camber - thickness[:,k] = self.stations[k].airfoil.thickness + raise Exception("Station airfoils must have same number of samples.") + c[:, k] = ck + camber[:, k] = self.stations[k].airfoil.camber + thickness[:, k] = self.stations[k].airfoil.thickness tetype[k] = self.stations[k].airfoil.TEtype - + # fix numerical issue due to precision on camber calculation # camber should start and end at y-values of zero - camber[0,:] = np.zeros((1,nStations)) - camber[-1,:] = np.zeros((1,nStations)) - + camber[0, :] = np.zeros((1, nStations)) + camber[-1, :] = np.zeros((1, nStations)) + # Interpolate the station parameter tables. # Each column corresponds to an interpolated station. - ic = interpolator_wrap(spanlocation,c,self.ispan,'pchip', axis=1) - icamber = interpolator_wrap(spanlocation,camber,self.ispan,'pchip', axis=1) - ithickness = interpolator_wrap(spanlocation,thickness,self.ispan,'pchip', axis=1) - self.ic = ic #Export for TE opening + ic = interpolator_wrap(spanlocation, c, self.ispan, "pchip", axis=1) + icamber = interpolator_wrap(spanlocation, camber, self.ispan, "pchip", axis=1) + ithickness = interpolator_wrap( + spanlocation, thickness, self.ispan, "pchip", axis=1 + ) + self.ic = ic # Export for TE opening self.icamber = icamber self.ithickness = ithickness - self.cpos = np.concatenate((-ic[-1,:].reshape(1,-1),-np.flipud(ic), - ic[1:,:],ic[-1,:].reshape(1,-1)),axis=0) - + self.cpos = np.concatenate( + ( + -ic[-1, :].reshape(1, -1), + -np.flipud(ic), + ic[1:, :], + ic[-1, :].reshape(1, -1), + ), + axis=0, + ) + # Adjust the thickness profiles based on TEtype of stations. # This is mainly for transitions to flatbacks were the # interpolated airfoil needs to look like a round. for k in range(len(self.ispan)): try: ind = np.argwhere(self.ispan[k] < spanlocation)[0][0] - #maybe better: ind = np.flatnonzero(self.ispan[k] < spanlocation)[0] + # maybe better: ind = np.flatnonzero(self.ispan[k] < spanlocation)[0] except: continue else: if ind == 1: continue - if tetype[ind]=='flat' and tetype[ind - 1]=='round': - ithickness[-1,k] = 0 - + if tetype[ind] == "flat" and tetype[ind - 1] == "round": + ithickness[-1, k] = 0 + # Interpolate the blade parameter curves. # Results are row vectors. - self.idegreestwist = interpolator_wrap(self.span,self.degreestwist,self.ispan,'pchip') - self.ichord = interpolator_wrap(self.span,self.chord,self.ispan,'pchip') - absolutethick = np.multiply(self.percentthick,self.chord) / 100 - iabsolutethick = interpolator_wrap(self.span,absolutethick,self.ispan,'pchip') + self.idegreestwist = interpolator_wrap( + self.span, self.degreestwist, self.ispan, "pchip" + ) + self.ichord = interpolator_wrap(self.span, self.chord, self.ispan, "pchip") + absolutethick = np.multiply(self.percentthick, self.chord) / 100 + iabsolutethick = interpolator_wrap( + self.span, absolutethick, self.ispan, "pchip" + ) self.ipercentthick = iabsolutethick / self.ichord * 100 # ensure that the interpolation doesn't reduce the percent # thickness beneath the thinnest airfoil - self.ipercentthick[self.ipercentthick < np.amin(self.percentthick)] = np.amin(self.percentthick) - self.ichordoffset = interpolator_wrap(self.span,self.chordoffset,self.ispan,'pchip') - self.iaerocenter = interpolator_wrap(self.span,self.aerocenter,self.ispan,'pchip') - if len(self.sweep)==0: - self.sweep = np.zeros((self.span.shape,self.span.shape)) - if len(self.prebend)==0: - self.prebend = np.zeros((self.span.shape,self.span.shape)) - self.isweep = interpolator_wrap(self.span,self.sweep,self.ispan,'pchip') - self.iprebend = interpolator_wrap(self.span,self.prebend,self.ispan,'pchip') - + self.ipercentthick[self.ipercentthick < np.amin(self.percentthick)] = np.amin( + self.percentthick + ) + self.ichordoffset = interpolator_wrap( + self.span, self.chordoffset, self.ispan, "pchip" + ) + self.iaerocenter = interpolator_wrap( + self.span, self.aerocenter, self.ispan, "pchip" + ) + if len(self.sweep) == 0: + self.sweep = np.zeros((self.span.shape, self.span.shape)) + if len(self.prebend) == 0: + self.prebend = np.zeros((self.span.shape, self.span.shape)) + self.isweep = interpolator_wrap(self.span, self.sweep, self.ispan, "pchip") + self.iprebend = interpolator_wrap(self.span, self.prebend, self.ispan, "pchip") + # Generate the blade surface geometry. N = np.asarray(self.ispan).size M = nPoints * 2 + 1 - self.profiles = np.zeros((M,2,N)) - self.geometry = np.zeros((M,3,N)) - self.xoffset = np.zeros((1,N)) + self.profiles = np.zeros((M, 2, N)) + self.geometry = np.zeros((M, 3, N)) + self.xoffset = np.zeros((1, N)) self.LEindex = nPoints - - for k in range(0,N): + + for k in range(0, N): self.updateAirfoilProfile(k) - mtindex = np.argmax(ithickness[:,k]) - self.xoffset[0,k] = ic[mtindex,k] + mtindex = np.argmax(ithickness[:, k]) + self.xoffset[0, k] = ic[mtindex, k] self.updateOMLgeometry(k) - + # Calculate the arc length of each curve - self.arclength = np.zeros((M,N)) - self.HParcx0 = np.zeros((1,N)) - self.LParcx0 = np.zeros((1,N)) + self.arclength = np.zeros((M, N)) + self.HParcx0 = np.zeros((1, N)) + self.LParcx0 = np.zeros((1, N)) LE = self.LEindex for k in range(N): - xx = self.geometry[:,0,k] - yy = self.geometry[:,1,k] - zz = self.geometry[:,2,k] - arclen = np.sqrt(np.diff(xx)**2 + np.diff(yy)**2 + np.diff(zz)**2) - arclen = np.concatenate((np.array([0]),np.cumsum(arclen)),axis=0) + xx = self.geometry[:, 0, k] + yy = self.geometry[:, 1, k] + zz = self.geometry[:, 2, k] + arclen = np.sqrt(np.diff(xx) ** 2 + np.diff(yy) ** 2 + np.diff(zz) ** 2) + arclen = np.concatenate((np.array([0]), np.cumsum(arclen)), axis=0) # self.arclength(:,k) = interpolator_wrap(ptindover,arclenover,ptind); - self.arclength[:,k] = arclen - LEarcsum = self.arclength[self.LEindex,k] - self.arclength[:,k] = self.arclength[:,k] - LEarcsum + self.arclength[:, k] = arclen + LEarcsum = self.arclength[self.LEindex, k] + self.arclength[:, k] = self.arclength[:, k] - LEarcsum # find where x=0 intersects the surface - self.HParcx0[0,k] = interpolator_wrap(xx[1:LE+1],arclen[1:LE+1],0) - LEarcsum - self.LParcx0[0,k] = interpolator_wrap(xx[-2:LE-1:-1],arclen[-2:LE-1:-1],0) - LEarcsum - - return self + self.HParcx0[0, k] = ( + interpolator_wrap(xx[1 : LE + 1], arclen[1 : LE + 1], 0) - LEarcsum + ) + self.LParcx0[0, k] = ( + interpolator_wrap(xx[-2 : LE - 1 : -1], arclen[-2 : LE - 1 : -1], 0) + - LEarcsum + ) + return self def updateKeypoints(self): """This method updates the keypoints (a,b,c,...) which define the blade @@ -479,16 +517,33 @@ def updateKeypoints(self): find the curves which bound each blade region """ - - N = self.ispan.size # number of interpolated span stations - M = 12 # number of areas around airfoil profile; must be even (see calc of web areas) - - self.keypoints = np.zeros((M - 2,3,N)) # keypoints in xyz geometry - self.keyarcs = np.zeros((M + 1,N)) # surface arclength distance of keypoints from LE - self.keycpos = np.zeros((M + 1,N)) # chordwise position of keypoints - self.keyareas = np.zeros((M,N - 1)) # surface area of regions created by keypoints - self.keylabels = ['te','e','d','c','b','a','le', - 'a','b','c','d','e','te'] + + N = self.ispan.size # number of interpolated span stations + M = 12 # number of areas around airfoil profile; must be even (see calc of web areas) + + self.keypoints = np.zeros((M - 2, 3, N)) # keypoints in xyz geometry + self.keyarcs = np.zeros( + (M + 1, N) + ) # surface arclength distance of keypoints from LE + self.keycpos = np.zeros((M + 1, N)) # chordwise position of keypoints + self.keyareas = np.zeros( + (M, N - 1) + ) # surface area of regions created by keypoints + self.keylabels = [ + "te", + "e", + "d", + "c", + "b", + "a", + "le", + "a", + "b", + "c", + "d", + "e", + "te", + ] self.LEbond = np.zeros((N - 1)) self.TEbond = np.zeros((N - 1)) mm_to_m = 0.001 @@ -497,20 +552,19 @@ def updateKeypoints(self): ns = 1 nf = self.geometry.shape[0] - 2 - #keypoints, keyarcs, keycpos - self.TEtype = [] # reset TEtype - for k in range(0,N): + # keypoints, keyarcs, keycpos + self.TEtype = [] # reset TEtype + for k in range(0, N): # allow for separate definitions of HP and LP spar cap # width and offset [HP LP] - n1 = mm_to_m * self.leband[k] # no foam width - n2 = mm_to_m * self.teband[k] # no foam width + n1 = mm_to_m * self.leband[k] # no foam width + n2 = mm_to_m * self.teband[k] # no foam width + scwidth_hp = mm_to_m * self.sparcapwidth_hp[k] # type: float + scwidth_lp = mm_to_m * self.sparcapwidth_lp[k] # type: float - scwidth_hp = mm_to_m * self.sparcapwidth_hp[k] #type: float - scwidth_lp = mm_to_m * self.sparcapwidth_lp[k] #type: float - - scoffset_hp = mm_to_m * self.sparcapoffset_hp[k] #type: float - scoffset_lp = mm_to_m * self.sparcapoffset_lp[k] #type: float + scoffset_hp = mm_to_m * self.sparcapoffset_hp[k] # type: float + scoffset_lp = mm_to_m * self.sparcapoffset_lp[k] # type: float tempTE = self.getprofileTEtype(k) if self.TEtype: @@ -522,108 +576,112 @@ def updateKeypoints(self): # get angle of each xy pair w.r.t. pitch axis (0,0) xyangle = np.zeros(self.geometry.shape[0]) for j in range(len(xyangle)): - xy = self.geometry[j,0:2,k] - xyangle[j] = np.arctan2(self.rotorspin * xy[1],xy[0]) + xy = self.geometry[j, 0:2, k] + xyangle[j] = np.arctan2(self.rotorspin * xy[1], xy[0]) # unwrap and center around 0 xyangle = np.unwrap(xyangle) xyangle = xyangle - np.pi * np.round(xyangle[self.LEindex] / np.pi) - k_arclen = self.arclength[ns:nf+1,k] - k_geom = self.geometry[ns:nf+1,:,k] - k_cpos = self.cpos[ns:nf+1,k] + k_arclen = self.arclength[ns : nf + 1, k] + k_geom = self.geometry[ns : nf + 1, :, k] + k_cpos = self.cpos[ns : nf + 1, k] # ==================== HP surface ==================== if self.swtwisted: # find arclength where xyangle equals normal to chord - twistnorm = np.pi / 180 * (- self.idegreestwist[k] - 90) # angle normal to chord line - z = interpolator_wrap(xyangle[ns:nf+1],k_arclen,twistnorm) + twistnorm = ( + np.pi / 180 * (-self.idegreestwist[k] - 90) + ) # angle normal to chord line + z = interpolator_wrap(xyangle[ns : nf + 1], k_arclen, twistnorm) else: - z = self.HParcx0[0,k] + z = self.HParcx0[0, k] z0 = z z = z - scoffset_hp - a = np.amax(((0 - n1),0.1 * self.arclength[ns,k])) #type: float - a = np.amin((a,0.01 * self.arclength[ns,k])) - b = np.amin(((z + 0.5 * scwidth_hp),0.15 * self.arclength[ns,k])) - c = np.amax(((z - 0.5 * scwidth_hp),0.8 * self.arclength[ns,k])) - d = np.amin(((self.arclength[0,k] + n2),0.85 * self.arclength[ns,k])) - d = np.amax((d,0.98 * self.arclength[ns,k])) - if str(self.TEtype[k]) == 'flat': - e = self.arclength[ns,k] - self.keypoints[0,:,k] = self.geometry[ns,:,k] - self.keycpos[1,k] = -1 + a = np.amax(((0 - n1), 0.1 * self.arclength[ns, k])) # type: float + a = np.amin((a, 0.01 * self.arclength[ns, k])) + b = np.amin(((z + 0.5 * scwidth_hp), 0.15 * self.arclength[ns, k])) + c = np.amax(((z - 0.5 * scwidth_hp), 0.8 * self.arclength[ns, k])) + d = np.amin(((self.arclength[0, k] + n2), 0.85 * self.arclength[ns, k])) + d = np.amax((d, 0.98 * self.arclength[ns, k])) + if str(self.TEtype[k]) == "flat": + e = self.arclength[ns, k] + self.keypoints[0, :, k] = self.geometry[ns, :, k] + self.keycpos[1, k] = -1 else: # e = 0.5 * (d + self.arclength(ns,k)); - e = 0.99 * self.arclength[ns,k] - self.keypoints[0,:,k] = interpolator_wrap(k_arclen,k_geom,e) - self.keycpos[1,k] = interpolator_wrap(k_arclen,k_cpos,e) + e = 0.99 * self.arclength[ns, k] + self.keypoints[0, :, k] = interpolator_wrap(k_arclen, k_geom, e) + self.keycpos[1, k] = interpolator_wrap(k_arclen, k_cpos, e) # 1 -> e - self.keypoints[1,:,k] = interpolator_wrap(k_arclen,k_geom,d) - self.keypoints[2,:,k] = interpolator_wrap(k_arclen,k_geom,c) + self.keypoints[1, :, k] = interpolator_wrap(k_arclen, k_geom, d) + self.keypoints[2, :, k] = interpolator_wrap(k_arclen, k_geom, c) # self.keypoints( ,:,k) = interpolator_wrap(self.arclength(ns:nf,k),self.geometry(ns:nf,:,k),z); - self.keypoints[3,:,k] = interpolator_wrap(k_arclen,k_geom,b) - self.keypoints[4,:,k] = interpolator_wrap(k_arclen,k_geom,a) - self.keyarcs[0,k] = self.arclength[ns,k] - self.keyarcs[1,k] = e - self.keyarcs[2,k] = d - self.keyarcs[3,k] = c + self.keypoints[3, :, k] = interpolator_wrap(k_arclen, k_geom, b) + self.keypoints[4, :, k] = interpolator_wrap(k_arclen, k_geom, a) + self.keyarcs[0, k] = self.arclength[ns, k] + self.keyarcs[1, k] = e + self.keyarcs[2, k] = d + self.keyarcs[3, k] = c # self.keyarcs( ,k) = z; - self.keyarcs[4,k] = b - self.keyarcs[5,k] = a - self.keyarcs[6,k] = 0 # le - self.keycpos[0,k] = self.cpos[ns,k] #te, hp surface + self.keyarcs[4, k] = b + self.keyarcs[5, k] = a + self.keyarcs[6, k] = 0 # le + self.keycpos[0, k] = self.cpos[ns, k] # te, hp surface # 2 -> e - self.keycpos[2,k] = interpolator_wrap(k_arclen,k_cpos,d) - self.keycpos[3,k] = interpolator_wrap(k_arclen,k_cpos,c) + self.keycpos[2, k] = interpolator_wrap(k_arclen, k_cpos, d) + self.keycpos[3, k] = interpolator_wrap(k_arclen, k_cpos, c) # self.keycpos( ,k) = interpolator_wrap(self.arclength(ns:nf,k),self.cpos(ns:nf,k),z); - self.keycpos[4,k] = interpolator_wrap(k_arclen,k_cpos,b) - self.keycpos[5,k] = interpolator_wrap(k_arclen,k_cpos,a) - self.keycpos[6,k] = interpolator_wrap(k_arclen,k_cpos,0) + self.keycpos[4, k] = interpolator_wrap(k_arclen, k_cpos, b) + self.keycpos[5, k] = interpolator_wrap(k_arclen, k_cpos, a) + self.keycpos[6, k] = interpolator_wrap(k_arclen, k_cpos, 0) # ==================== LP surface ==================== if self.swtwisted: - twistnorm = np.pi / 180 * (-self.idegreestwist[k] + 90) # angle normal to chord line - z = interpolator_wrap(xyangle[ns:nf+1],k_arclen,twistnorm) + twistnorm = ( + np.pi / 180 * (-self.idegreestwist[k] + 90) + ) # angle normal to chord line + z = interpolator_wrap(xyangle[ns : nf + 1], k_arclen, twistnorm) else: - z = self.LParcx0[0,k] - z0 = z # ble: location where airfoil surface crosses Xglobal=0 - z = z + scoffset_lp # positive scoffset moves z toward t.e. - a = np.amin(((0 + n1),0.1 * self.arclength[nf,k])) - a = np.amax((a,0.01 * self.arclength[nf,k])) - b = np.amax(((z - 0.5 * scwidth_lp),0.15 * self.arclength[nf,k])) - c = np.amin((z + 0.5 * scwidth_lp,0.8 * self.arclength[nf,k])) - d = np.amax((self.arclength[-1,k] - n2,0.85 * self.arclength[nf,k])) - d = np.amin((d,0.96 * self.arclength[nf,k])) - if str(self.TEtype[k]) == str('flat'): - e = self.arclength[nf,k] - self.keypoints[9,:,k] = self.geometry[nf,:,k] - self.keycpos[11,k] = 1 + z = self.LParcx0[0, k] + z0 = z # ble: location where airfoil surface crosses Xglobal=0 + z = z + scoffset_lp # positive scoffset moves z toward t.e. + a = np.amin(((0 + n1), 0.1 * self.arclength[nf, k])) + a = np.amax((a, 0.01 * self.arclength[nf, k])) + b = np.amax(((z - 0.5 * scwidth_lp), 0.15 * self.arclength[nf, k])) + c = np.amin((z + 0.5 * scwidth_lp, 0.8 * self.arclength[nf, k])) + d = np.amax((self.arclength[-1, k] - n2, 0.85 * self.arclength[nf, k])) + d = np.amin((d, 0.96 * self.arclength[nf, k])) + if str(self.TEtype[k]) == str("flat"): + e = self.arclength[nf, k] + self.keypoints[9, :, k] = self.geometry[nf, :, k] + self.keycpos[11, k] = 1 else: # e = 0.5 * (d + self.arclength(nf,k)); - e = 0.98 * self.arclength[nf,k] - self.keypoints[9,:,k] = interpolator_wrap(k_arclen,k_geom,e) - self.keycpos[11,k] = interpolator_wrap(k_arclen,k_cpos,e) - self.keypoints[5,:,k] = interpolator_wrap(k_arclen,k_geom,a) - self.keypoints[6,:,k] = interpolator_wrap(k_arclen,k_geom,b) + e = 0.98 * self.arclength[nf, k] + self.keypoints[9, :, k] = interpolator_wrap(k_arclen, k_geom, e) + self.keycpos[11, k] = interpolator_wrap(k_arclen, k_cpos, e) + self.keypoints[5, :, k] = interpolator_wrap(k_arclen, k_geom, a) + self.keypoints[6, :, k] = interpolator_wrap(k_arclen, k_geom, b) # self.keypoints( ,:,k) = interpolator_wrap(self.arclength(ns:nf,k),self.geometry(ns:nf,:,k),z); - self.keypoints[7,:,k] = interpolator_wrap(k_arclen,k_geom,c) - self.keypoints[8,:,k] = interpolator_wrap(k_arclen,k_geom,d) + self.keypoints[7, :, k] = interpolator_wrap(k_arclen, k_geom, c) + self.keypoints[8, :, k] = interpolator_wrap(k_arclen, k_geom, d) # 10 -> e - self.keyarcs[7,k] = a - self.keyarcs[8,k] = b + self.keyarcs[7, k] = a + self.keyarcs[8, k] = b # self.keyarcs( ,k) = z; - self.keyarcs[9,k] = c - self.keyarcs[10,k] = d - self.keyarcs[11,k] = e - self.keyarcs[12,k] = self.arclength[nf,k] - self.keycpos[7,k] = interpolator_wrap(k_arclen,k_cpos,a) - self.keycpos[8,k] = interpolator_wrap(k_arclen,k_cpos,b) + self.keyarcs[9, k] = c + self.keyarcs[10, k] = d + self.keyarcs[11, k] = e + self.keyarcs[12, k] = self.arclength[nf, k] + self.keycpos[7, k] = interpolator_wrap(k_arclen, k_cpos, a) + self.keycpos[8, k] = interpolator_wrap(k_arclen, k_cpos, b) # self.keycpos( ,k) = interpolator_wrap(self.arclength(ns:nf,k),self.cpos(ns:nf,k),z); - self.keycpos[9,k] = interpolator_wrap(k_arclen,k_cpos,c) - self.keycpos[10,k] = interpolator_wrap(k_arclen,k_cpos,d) + self.keycpos[9, k] = interpolator_wrap(k_arclen, k_cpos, c) + self.keycpos[10, k] = interpolator_wrap(k_arclen, k_cpos, d) # 12 -> e - self.keycpos[12,k] = self.cpos[nf,k] # te, lp surface - + self.keycpos[12, k] = self.cpos[nf, k] # te, lp surface + # find the points used by each shear web component_groups = [self.components[name].group for name in self.components] self.webindices = [] @@ -633,187 +691,238 @@ def updateKeypoints(self): self.webareas = [] self.webwidth = [] self.webbonds = [] - for ksw in range(max(component_groups)): # for each shear web + for ksw in range(max(component_groups)): # for each shear web # pre-allocating arrays self.webindices.append([]) - self.webarcs.append(np.ndarray((2,N))) - self.webcpos.append(np.ndarray((2,N))) - self.webpoints.append(np.ndarray((2,3,N))) - self.webareas.append(np.ndarray((N-1))) + self.webarcs.append(np.ndarray((2, N))) + self.webcpos.append(np.ndarray((2, N))) + self.webpoints.append(np.ndarray((2, 3, N))) + self.webareas.append(np.ndarray((N - 1))) self.webwidth.append(np.ndarray((N))) - self.webbonds.append(np.ndarray((2,N-1))) - ksw_cmpts = [self.components[comp] for comp in self.components if self.components[comp].group == ksw+1] # find the components that are part of the shear web - hpextents = np.unique([comp.hpextents for comp in ksw_cmpts]).tolist() # get the hp extents - lpextents = np.unique([comp.lpextents for comp in ksw_cmpts]).tolist() # get the lp extents - assert len(hpextents) == 1,f'HP Extents for components in group {ksw} must be identical and contain no spaces or commas' - assert len(lpextents) == 1,f'LP Extents for components in group {ksw} must be identical and contain no spaces or commas' + self.webbonds.append(np.ndarray((2, N - 1))) + ksw_cmpts = [ + self.components[comp] + for comp in self.components + if self.components[comp].group == ksw + 1 + ] # find the components that are part of the shear web + hpextents = np.unique( + [comp.hpextents for comp in ksw_cmpts] + ).tolist() # get the hp extents + lpextents = np.unique( + [comp.lpextents for comp in ksw_cmpts] + ).tolist() # get the lp extents + assert ( + len(hpextents) == 1 + ), f"HP Extents for components in group {ksw} must be identical and contain no spaces or commas" + assert ( + len(lpextents) == 1 + ), f"LP Extents for components in group {ksw} must be identical and contain no spaces or commas" # match extents that have form of either '0.5b-c' or # 'b+/-100' or 'b' or 'z+/-100' # pat = '(?\d*[\.]?\d*)(?[a-zA-Z]+)-(?[a-zA-Z]+)|(?[a-zA-Z]+)(?[+-]\d+)|(?[a-zA-Z])' - pat = '(?P\d*[\.]?\d*)(?P[a-zA-Z]+)-(?P[a-zA-Z]+)|(?P[a-zA-Z]+)(?P[+-]\d+)|(?P[a-zA-Z])' - - hp = re.search(pat,hpextents[0]).groupdict() - lp = re.search(pat,lpextents[0]).groupdict() + pat = "(?P\d*[\.]?\d*)(?P[a-zA-Z]+)-(?P[a-zA-Z]+)|(?P[a-zA-Z]+)(?P[+-]\d+)|(?P[a-zA-Z])" + + hp = re.search(pat, hpextents[0]).groupdict() + lp = re.search(pat, lpextents[0]).groupdict() try: - le = self.keylabels.index('le') + le = self.keylabels.index("le") except: print(f"HP extent label \"{hp['pt']}\" not defined.") # get shear web placement on HP side - if hp['pt']: + if hp["pt"]: try: - n = self.keylabels[0:le+1].index(hp['pt'])## EMA + n = self.keylabels[0 : le + 1].index(hp["pt"]) ## EMA except: print(f"HP extent label \"{hp['pt']}\" not defined.") self.webindices[ksw].append(n) - self.webarcs[ksw][0,:] = self.keyarcs[n,:] - self.webcpos[ksw][0,:] = self.keycpos[n,:] + self.webarcs[ksw][0, :] = self.keyarcs[n, :] + self.webcpos[ksw][0, :] = self.keycpos[n, :] n = n - 1 - self.webpoints[ksw][0,:,:] = self.keypoints[n,:,:] - elif hp['pt1']: - f = float(hp['fraction']) + self.webpoints[ksw][0, :, :] = self.keypoints[n, :, :] + elif hp["pt1"]: + f = float(hp["fraction"]) if f <= 0 or f >= 1: - raise Exception(f'Component group {ksw}: HP extent fraction={f}, which is outside range (0..1)') + raise Exception( + f"Component group {ksw}: HP extent fraction={f}, which is outside range (0..1)" + ) try: - n1 = self.keylabels[0:le+1].index(hp['pt1']) + n1 = self.keylabels[0 : le + 1].index(hp["pt1"]) except: print(f"HP extent label \"{hp['pt1']}\" not defined.") try: - n2 = self.keylabels[0:le+1].index(hp['pt2']) + n2 = self.keylabels[0 : le + 1].index(hp["pt2"]) except: print(f"HP extent label \"{hp['pt2']}\" not defined.") self.webindices[ksw].append(np.nan) - p1 = self.keyarcs[n1,:] - p2 = self.keyarcs[n2,:] - p = (1-f)*p1 + f*p2 - self.webarcs[ksw][0,:] = p + p1 = self.keyarcs[n1, :] + p2 = self.keyarcs[n2, :] + p = (1 - f) * p1 + f * p2 + self.webarcs[ksw][0, :] = p for k in range(N): - self.webcpos[ksw][0,k] = interpolator_wrap(k_arclen,k_cpos,p[k]) - self.webpoints[ksw][0,:,k] = interpolator_wrap(k_arclen,k_geom,p[k]) - elif hp['pt3']: + self.webcpos[ksw][0, k] = interpolator_wrap(k_arclen, k_cpos, p[k]) + self.webpoints[ksw][0, :, k] = interpolator_wrap( + k_arclen, k_geom, p[k] + ) + elif hp["pt3"]: try: - n3 = self.keylabels[0:le+1].index(hp['pt3']) + n3 = self.keylabels[0 : le + 1].index(hp["pt3"]) except: print(f"HP extent label \"{hp['pt3']}\" not defined.") self.webindices[ksw].append(np.nan) - p3 = self.keycpos[n3,:] - p = p3 - float(hp['mm_offset']) / 1000 - iMax = self.keylabels[0,le+1].index('d') - #NOTE potential for error here - array shapes TBD -kb - pMax = np.multiply(self.keycpos[iMax,:],np.transpose(self.ichord)) + p3 = self.keycpos[n3, :] + p = p3 - float(hp["mm_offset"]) / 1000 + iMax = self.keylabels[0, le + 1].index("d") + # NOTE potential for error here - array shapes TBD -kb + pMax = np.multiply(self.keycpos[iMax, :], np.transpose(self.ichord)) p[np.abs(p) > np.abs(pMax)] = pMax[np.abs(p) > np.abs(pMax)] - iMin = self.keylabels[0:le+1].index('a') - #NOTE same issue here -kb - pMin = np.multiply(self.keycpos[iMin,:],np.transpose(self.ichord)) + iMin = self.keylabels[0 : le + 1].index("a") + # NOTE same issue here -kb + pMin = np.multiply(self.keycpos[iMin, :], np.transpose(self.ichord)) p[np.abs(p) < np.abs(pMin)] = pMin[np.abs(p) < np.abs(pMin)] - self.webcpos[ksw][0,:] = p + self.webcpos[ksw][0, :] = p for k in range(N): - self.webarcs[ksw][0,k] = interpolator_wrap(self.cpos[ns:nf+1,:,k],self.arclength[ns:nf+1,:,k],p[k]) - self.webpoints[ksw][0,:,k] = interpolator_wrap(k_cpos,k_geom,p[k]) + self.webarcs[ksw][0, k] = interpolator_wrap( + self.cpos[ns : nf + 1, :, k], + self.arclength[ns : nf + 1, :, k], + p[k], + ) + self.webpoints[ksw][0, :, k] = interpolator_wrap( + k_cpos, k_geom, p[k] + ) else: - raise Exception('Shear web geometry HP extents not defined correctly (e.g., 0.5b-c, b, b+200)') + raise Exception( + "Shear web geometry HP extents not defined correctly (e.g., 0.5b-c, b, b+200)" + ) # get shear web placement on LP side - if lp['pt']: + if lp["pt"]: try: - n = self.keylabels[le:].index(lp['pt']) + le + n = self.keylabels[le:].index(lp["pt"]) + le self.webindices[ksw].append(n) - self.webarcs[ksw][1,:] = self.keyarcs[n,:] - self.webcpos[ksw][1,:] = self.keycpos[n,:] - self.webpoints[ksw][1,:,:] = self.keypoints[n,:,:] + self.webarcs[ksw][1, :] = self.keyarcs[n, :] + self.webcpos[ksw][1, :] = self.keycpos[n, :] + self.webpoints[ksw][1, :, :] = self.keypoints[n, :, :] except: print(f"LP extent label \"{lp['pt']}\" not defined.") - elif lp['pt1']: - f = float(lp['fraction']) + elif lp["pt1"]: + f = float(lp["fraction"]) if f < 0 or f > 1: - raise Exception(f'Component group {ksw}: LP extent fraction={f}, which is outside range [0..1]') + raise Exception( + f"Component group {ksw}: LP extent fraction={f}, which is outside range [0..1]" + ) try: - n1 = self.keylabels[le:].index(lp['pt1']) + le + n1 = self.keylabels[le:].index(lp["pt1"]) + le except: print(f"LP extent label \"{lp['pt1']}\" not defined.") try: - n2 = self.keylabels[le:].index(lp['pt2']) + le + n2 = self.keylabels[le:].index(lp["pt2"]) + le except: print(f"LP extent label \"{lp['pt2']}\" not defined.") self.webindices[ksw].append(np.nan) - p1 = self.keyarcs[n1,:] - p2 = self.keyarcs[n2,:] + p1 = self.keyarcs[n1, :] + p2 = self.keyarcs[n2, :] p = (1 - f) * p1 + f * p2 - self.webarcs[ksw][1,:] = p + self.webarcs[ksw][1, :] = p for k in range(N): - self.webcpos[ksw][1,k] = interpolator_wrap(k_arclen,k_cpos,p[k]) - self.webpoints[ksw][1,:,k] = interpolator_wrap(k_arclen,k_geom,p[k]) - elif lp['pt3']: + self.webcpos[ksw][1, k] = interpolator_wrap(k_arclen, k_cpos, p[k]) + self.webpoints[ksw][1, :, k] = interpolator_wrap( + k_arclen, k_geom, p[k] + ) + elif lp["pt3"]: try: - n3 = self.keylabels[le:].index(lp['pt3']) + le + n3 = self.keylabels[le:].index(lp["pt3"]) + le except: - print(f"LP extent label \"{lp['pt3']}\" not defined.") + print(f"LP extent label \"{lp['pt3']}\" not defined.") self.webindices[ksw].append(np.nan) - p3 = self.keycpos[n3,:] - p = p3 + float(lp['mm_offset']) / 1000 - iMax = self.keylabels[le:].index('d') + le - pMax = np.multiply(self.keycpos[iMax,:],np.transpose(self.ichord)) + p3 = self.keycpos[n3, :] + p = p3 + float(lp["mm_offset"]) / 1000 + iMax = self.keylabels[le:].index("d") + le + pMax = np.multiply(self.keycpos[iMax, :], np.transpose(self.ichord)) p[np.abs(p) > np.abs(pMax)] = pMax[np.abs(p) > np.abs(pMax)] - iMin = self.keylabels[le:].index('a') + le - pMin = np.multiply(self.keycpos[iMin,:],np.transpose(self.ichord)) + iMin = self.keylabels[le:].index("a") + le + pMin = np.multiply(self.keycpos[iMin, :], np.transpose(self.ichord)) p[np.abs(p) < np.abs(pMin)] = pMin[np.abs(p) < np.abs(pMin)] - self.webcpos[ksw][1,:] = p + self.webcpos[ksw][1, :] = p for k in range(N): - self.webarcs[ksw][1,k] = interpolator_wrap(k_cpos,k_arclen,p[k]) - self.webpoints[ksw][1,:,k] = interpolator_wrap(k_cpos,k_geom,p[k]) + self.webarcs[ksw][1, k] = interpolator_wrap(k_cpos, k_arclen, p[k]) + self.webpoints[ksw][1, :, k] = interpolator_wrap( + k_cpos, k_geom, p[k] + ) else: - raise Exception('Shear web geometry LP extents not defined correctly (e.g., 0.5b-c, b, b+200)') - + raise Exception( + "Shear web geometry LP extents not defined correctly (e.g., 0.5b-c, b, b+200)" + ) + # calculate shell areas - for kc in range(N-1): + for kc in range(N - 1): for kr in range(M): # choose number of points to use in area calculation # jcb: I decided to base this on the number of points # in the interpolated station profile found within the region # of interest. - npts = sum(np.logical_and(self.arclength[:,kc]>= self.keyarcs[kr,kc], - self.arclength[:,kc]<= self.keyarcs[kr+1,kc])) - npts = np.amax((npts,2)) # need at least two points - ibarc = np.linspace(self.keyarcs[kr,kc],self.keyarcs[kr+1,kc],npts) # inboard curve arclengths - obarc = np.linspace(self.keyarcs[kr,kc+1],self.keyarcs[kr+1,kc+1],npts) # outboard curve arclengths - ib = interpolator_wrap(self.arclength[ns:nf+1,kc],self.geometry[ns:nf+1,:,kc],ibarc) # inboard xyz - ob = interpolator_wrap(self.arclength[ns:nf+1,kc+1],self.geometry[ns:nf+1,:,kc+1],obarc) # outboard xyz - dspan = np.sqrt(np.sum((ob-ib)**2,1)) # "ds" in the span direction + npts = sum( + np.logical_and( + self.arclength[:, kc] >= self.keyarcs[kr, kc], + self.arclength[:, kc] <= self.keyarcs[kr + 1, kc], + ) + ) + npts = np.amax((npts, 2)) # need at least two points + ibarc = np.linspace( + self.keyarcs[kr, kc], self.keyarcs[kr + 1, kc], npts + ) # inboard curve arclengths + obarc = np.linspace( + self.keyarcs[kr, kc + 1], self.keyarcs[kr + 1, kc + 1], npts + ) # outboard curve arclengths + ib = interpolator_wrap( + self.arclength[ns : nf + 1, kc], + self.geometry[ns : nf + 1, :, kc], + ibarc, + ) # inboard xyz + ob = interpolator_wrap( + self.arclength[ns : nf + 1, kc + 1], + self.geometry[ns : nf + 1, :, kc + 1], + obarc, + ) # outboard xyz + dspan = np.sqrt(np.sum((ob - ib) ** 2, 1)) # "ds" in the span direction # treat each "rectangular" area as two triangles - t1 = 0.5 * np.dot(np.sqrt(np.sum(np.diff(ib,1,axis=0) ** 2, 1)), dspan[0:-1]) - t2 = 0.5 * np.dot(np.sqrt(np.sum(np.diff(ob,1,axis=0) ** 2, 1)), dspan[1:]) - self.keyareas[kr,kc] = t1 + t2 - if kr==0: + t1 = 0.5 * np.dot( + np.sqrt(np.sum(np.diff(ib, 1, axis=0) ** 2, 1)), dspan[0:-1] + ) + t2 = 0.5 * np.dot( + np.sqrt(np.sum(np.diff(ob, 1, axis=0) ** 2, 1)), dspan[1:] + ) + self.keyareas[kr, kc] = t1 + t2 + if kr == 0: self.TEbond[kc] = dspan[0] - if (M / 2 + 1)==(kr + 1): + if (M / 2 + 1) == (kr + 1): self.LEbond[kc] = dspan[0] - + # calculate areas used by shear webs # jcb: note that these areas come purely from the geometry and # do not take into account the thickness of the shell or # sparcap layup. for ksw in range(len(self.webpoints)): - for kc in range(N-1): - ib = self.webpoints[ksw][:,:,kc] - ob = self.webpoints[ksw][:,:,kc+1] + for kc in range(N - 1): + ib = self.webpoints[ksw][:, :, kc] + ob = self.webpoints[ksw][:, :, kc + 1] # treat each "rectangular" area as two triangles - b1 = np.diff(ib,axis=0) - b2 = np.diff(ob,axis=0) - base1 = np.sqrt(np.sum(b1 ** 2, 1))[0] - base2 = np.sqrt(np.sum(b2 ** 2, 1))[0] + b1 = np.diff(ib, axis=0) + b2 = np.diff(ob, axis=0) + base1 = np.sqrt(np.sum(b1**2, 1))[0] + base2 = np.sqrt(np.sum(b2**2, 1))[0] b1 = b1 / base1 b2 = b2 / base2 - h1 = np.abs(np.dot((ob[0,:] - ib[0,:]), (1 - np.transpose(b1)))) - h2 = np.abs(np.dot((ib[1,:] - ob[1,:]), (1 - np.transpose(b2)))) - self.webareas[ksw][kc] = 0.5 * (base1*h1 + base2*h2) + h1 = np.abs(np.dot((ob[0, :] - ib[0, :]), (1 - np.transpose(b1)))) + h2 = np.abs(np.dot((ib[1, :] - ob[1, :]), (1 - np.transpose(b2)))) + self.webareas[ksw][kc] = 0.5 * (base1 * h1 + base2 * h2) self.webwidth[ksw][kc] = base1 # calculate edge (bond-line) lengths - self.webbonds[ksw][0:2,kc] = np.sqrt(np.sum((ob - ib) ** 2, 1)) - self.webwidth[ksw][N-1] = base2 - - return self + self.webbonds[ksw][0:2, kc] = np.sqrt(np.sum((ob - ib) ** 2, 1)) + self.webwidth[ksw][N - 1] = base2 + return self - def updateBOM(self): + def updateBOM(self): """This method updates the Bill-of-Materials See datatypes.BOM @@ -824,40 +933,53 @@ def updateBOM(self): """ # raise DeprecationWarning("updateBOM currently deprecated. Please do not use.") - # set conversion constants + # set conversion constants G_TO_KG = 0.001 M_TO_MM = 1000.0 MM_TO_M = 0.001 # initialize structures - self.bom = {'hp':[],'lp':[],'sw':[],'lebond':[],'tebond':[], - 'swbonds':[],'dryweight':[]} - self.bomIndices = {'hp':[],'lp':[],'sw':[]} - + self.bom = { + "hp": [], + "lp": [], + "sw": [], + "lebond": [], + "tebond": [], + "swbonds": [], + "dryweight": [], + } + self.bomIndices = {"hp": [], "lp": [], "sw": []} + # calculate non-dimensional span ndspan = (self.ispan - self.ispan[0]) / (self.ispan[-1] - self.ispan[0]) - - + hprow = 0 lprow = 0 - outer_shape_comps = [name for name in self.components if self.components[name].group == 0] + outer_shape_comps = [ + name for name in self.components if self.components[name].group == 0 + ] for comp_name in outer_shape_comps: comp = self.components[comp_name] mat = self.materials[comp.materialid] - hpRegion,lpRegion = self.findRegionExtents(comp) + hpRegion, lpRegion = self.findRegionExtents(comp) num_layers = comp.getNumLayers(ndspan) num_layers = np.round(num_layers) - for k_layer in range(1,int(np.max(num_layers))+1): - beginSta,endSta = self.findLayerExtents(num_layers,k_layer) - ksMax = np.amin((len(beginSta),len(endSta))) + for k_layer in range(1, int(np.max(num_layers)) + 1): + beginSta, endSta = self.findLayerExtents(num_layers, k_layer) + ksMax = np.amin((len(beginSta), len(endSta))) # situation that beginSta/endSta is longer than 1 for ks in range(ksMax): ## END if hpRegion: - areas = self.keyareas[hpRegion[0]:hpRegion[1],beginSta[ks]:endSta[ks]] + areas = self.keyareas[ + hpRegion[0] : hpRegion[1], beginSta[ks] : endSta[ks] + ] regionarea = sum(areas.flatten()) - arcs = self.keyarcs[hpRegion[1],beginSta[ks]:endSta[ks]+1] - self.keyarcs[hpRegion[0],beginSta[ks]:endSta[ks]+1] + arcs = ( + self.keyarcs[hpRegion[1], beginSta[ks] : endSta[ks] + 1] + - self.keyarcs[hpRegion[0], beginSta[ks] : endSta[ks] + 1] + ) cur_bom = BOM() cur_bom.layernum = hprow cur_bom.materialid = comp.materialid @@ -869,14 +991,21 @@ def updateBOM(self): cur_bom.area = regionarea cur_bom.thickness = mat.layerthickness cur_bom.weight = mat.drydensity * regionarea - self.bomIndices['hp'].append([beginSta[ks],endSta[ks],*hpRegion]) - self.bom['hp'].append(cur_bom) + self.bomIndices["hp"].append( + [beginSta[ks], endSta[ks], *hpRegion] + ) + self.bom["hp"].append(cur_bom) hprow = hprow + 1 - + if lpRegion: - areas = self.keyareas[lpRegion[0]:lpRegion[1],beginSta[ks]:endSta[ks]] + areas = self.keyareas[ + lpRegion[0] : lpRegion[1], beginSta[ks] : endSta[ks] + ] regionarea = sum(areas.flatten()) - arcs = self.keyarcs[lpRegion[1],beginSta[ks]:endSta[ks]+1] - self.keyarcs[lpRegion[0],beginSta[ks]:endSta[ks]+1] + arcs = ( + self.keyarcs[lpRegion[1], beginSta[ks] : endSta[ks] + 1] + - self.keyarcs[lpRegion[0], beginSta[ks] : endSta[ks] + 1] + ) cur_bom = BOM() cur_bom.layernum = lprow cur_bom.materialid = comp.materialid @@ -888,27 +1017,31 @@ def updateBOM(self): cur_bom.area = regionarea cur_bom.thickness = mat.layerthickness cur_bom.weight = mat.drydensity * regionarea - self.bomIndices['lp'].append([beginSta[ks],endSta[ks],*lpRegion]) - self.bom['lp'].append(cur_bom) + self.bomIndices["lp"].append( + [beginSta[ks], endSta[ks], *lpRegion] + ) + self.bom["lp"].append(cur_bom) lprow = lprow + 1 - + # shearwebs swnum = None swrow = 0 swBeginSta = [] swEndSta = [] sw_comps = [comp for comp in self.components.values() if comp.group > 0] + def sorter(e): return e.group - sw_comps.sort(key = sorter) + + sw_comps.sort(key=sorter) for comp in sw_comps: mat = self.materials[comp.materialid] num_layers = comp.getNumLayers(ndspan) num_layers = np.round(num_layers) - - for k_layer in range(1,int(np.max(num_layers))+1): - beginSta,endSta = self.findLayerExtents(num_layers,k_layer) - ksMax = np.amin((len(beginSta),len(endSta))) + + for k_layer in range(1, int(np.max(num_layers)) + 1): + beginSta, endSta = self.findLayerExtents(num_layers, k_layer) + ksMax = np.amin((len(beginSta), len(endSta))) # situation that beginSta/endSta is longer than 1 for ks in range(ksMax): if swnum != comp.group - 1: @@ -916,11 +1049,11 @@ def sorter(e): swrow = 0 swBeginSta.append(beginSta[0]) swEndSta.append(endSta[0]) - self.bom['sw'].append([]) - self.bomIndices['sw'].append([]) - swBeginSta[swnum] = np.amin([*beginSta,swBeginSta[swnum]]) - swEndSta[swnum] = np.amax([*endSta,swEndSta[swnum]]) - areas = self.webareas[swnum][beginSta[ks]:endSta[ks]] + self.bom["sw"].append([]) + self.bomIndices["sw"].append([]) + swBeginSta[swnum] = np.amin([*beginSta, swBeginSta[swnum]]) + swEndSta[swnum] = np.amax([*endSta, swEndSta[swnum]]) + areas = self.webareas[swnum][beginSta[ks] : endSta[ks]] regionarea = sum(areas.flatten()) cur_bom = BOM() cur_bom.layernum = swrow @@ -933,102 +1066,127 @@ def sorter(e): cur_bom.area = regionarea cur_bom.thickness = mat.layerthickness cur_bom.weight = mat.drydensity * regionarea - self.bom['sw'][swnum].append(cur_bom) - self.bomIndices['sw'][swnum].append([beginSta[ks],endSta[ks]]) + self.bom["sw"][swnum].append(cur_bom) + self.bomIndices["sw"][swnum].append([beginSta[ks], endSta[ks]]) swrow = swrow + 1 - + # compute lebond, tebond, and dryweight - self.bom['lebond'] = sum(self.LEbond) * M_TO_MM - self.bom['tebond'] = sum(self.TEbond) * M_TO_MM - hp_dw = sum([L.weight for L in self.bom['hp']]) - lp_dw = sum([L.weight for L in self.bom['lp']]) - self.bom['dryweight'] = G_TO_KG * (hp_dw + lp_dw) - - nsw = len(self.bom['sw']) - self.bom['swbonds'] = [None]*nsw + self.bom["lebond"] = sum(self.LEbond) * M_TO_MM + self.bom["tebond"] = sum(self.TEbond) * M_TO_MM + hp_dw = sum([L.weight for L in self.bom["hp"]]) + lp_dw = sum([L.weight for L in self.bom["lp"]]) + self.bom["dryweight"] = G_TO_KG * (hp_dw + lp_dw) + + nsw = len(self.bom["sw"]) + self.bom["swbonds"] = [None] * nsw for k in range(nsw): - sw_dw = sum([L.weight for L in self.bom['sw'][k]]) - self.bom['dryweight'] = self.bom['dryweight'] + sw_dw - C = self.webbonds[k][:,swBeginSta[k]:swEndSta[k]] - self.bom['swbonds'][k] = M_TO_MM * np.sum(C, 1) - + sw_dw = sum([L.weight for L in self.bom["sw"][k]]) + self.bom["dryweight"] = self.bom["dryweight"] + sw_dw + C = self.webbonds[k][:, swBeginSta[k] : swEndSta[k]] + self.bom["swbonds"][k] = M_TO_MM * np.sum(C, 1) + # build the material stack for each area nSegments = self.keyareas.shape[0] nStations = self.keyareas.shape[1] - nWebs = len(self.bomIndices['sw']) - segmentLabels = ['HP_TE_FLAT','HP_TE_REINF','HP_TE_PANEL', - 'HP_SPAR','HP_LE_PANEL','HP_LE','LP_LE', - 'LP_LE_PANEL','LP_SPAR','LP_TE_PANEL', - 'LP_TE_REINF','LP_TE_FLAT'] + nWebs = len(self.bomIndices["sw"]) + segmentLabels = [ + "HP_TE_FLAT", + "HP_TE_REINF", + "HP_TE_PANEL", + "HP_SPAR", + "HP_LE_PANEL", + "HP_LE", + "LP_LE", + "LP_LE_PANEL", + "LP_SPAR", + "LP_TE_PANEL", + "LP_TE_REINF", + "LP_TE_FLAT", + ] # self.stacks = [[None]*nStations]*nSegments - self.stacks = np.empty(shape=(nSegments,nStations), dtype = object) + self.stacks = np.empty(shape=(nSegments, nStations), dtype=object) for swsk1 in range(nSegments): for swsk2 in range(nStations): - self.stacks[swsk1,swsk2] = Stack() + self.stacks[swsk1, swsk2] = Stack() for kr in range(nSegments): for kc in range(nStations): # name the stacks __ - self.stacks[kr][kc].name = '{:02d}_{:02d}_{}'.format(kr, kc, segmentLabels[kr]) - self.stacks[kr][kc].indices = [kc,kc + 1,kr,kr + 1] - + self.stacks[kr][kc].name = "{:02d}_{:02d}_{}".format( + kr, kc, segmentLabels[kr] + ) + self.stacks[kr][kc].indices = [kc, kc + 1, kr, kr + 1] - for k in range(len(self.bom['hp'])): + for k in range(len(self.bom["hp"])): # for each row in the BOM, get the ply definition ... cur_ply = Ply() - cur_ply.component = self.bom['hp'][k].name #parent component of ply - cur_ply.materialid = self.bom['hp'][k].materialid # materialid of ply - cur_ply.thickness = self.bom['hp'][k].thickness # thickness [mm] of single ply - cur_ply.angle = 0 #TODO, set to 0 for now, self.bom['lp'](k, ); - cur_ply.nPlies = 1 # default to 1, modified in addply() if necessary + cur_ply.component = self.bom["hp"][k].name # parent component of ply + cur_ply.materialid = self.bom["hp"][k].materialid # materialid of ply + cur_ply.thickness = self.bom["hp"][ + k + ].thickness # thickness [mm] of single ply + cur_ply.angle = 0 # TODO, set to 0 for now, self.bom['lp'](k, ); + cur_ply.nPlies = 1 # default to 1, modified in addply() if necessary # ... and add the ply to every area that is part of the region - ind = self.bomIndices['hp'][k] - for kr in range(ind[2],ind[3]): - for kc in range(ind[0],ind[1]): - self.stacks[kr][kc].addply(copy(cur_ply)) # copy is important to keep make ply object unique in each stack - - for k in range(len(self.bom['lp'])): + ind = self.bomIndices["hp"][k] + for kr in range(ind[2], ind[3]): + for kc in range(ind[0], ind[1]): + self.stacks[kr][kc].addply( + copy(cur_ply) + ) # copy is important to keep make ply object unique in each stack + + for k in range(len(self.bom["lp"])): # for each row in the BOM, get the ply definition ... cur_ply = Ply() - cur_ply.component = self.bom['lp'][k].name #parent component of ply - cur_ply.materialid = self.bom['lp'][k].materialid # materialid of ply - cur_ply.thickness = self.bom['lp'][k].thickness # thickness [mm] of single ply - cur_ply.angle = 0 #TODO, set to 0 for now, self.bom['lp'](k, ); - cur_ply.nPlies = 1 # default to 1, modified in addply() if necessary - + cur_ply.component = self.bom["lp"][k].name # parent component of ply + cur_ply.materialid = self.bom["lp"][k].materialid # materialid of ply + cur_ply.thickness = self.bom["lp"][ + k + ].thickness # thickness [mm] of single ply + cur_ply.angle = 0 # TODO, set to 0 for now, self.bom['lp'](k, ); + cur_ply.nPlies = 1 # default to 1, modified in addply() if necessary + # ... and add the ply to every area that is part of the region - ind = self.bomIndices['lp'][k] - for kr in range(ind[2],ind[3]): - for kc in range(ind[0],ind[1]): + ind = self.bomIndices["lp"][k] + for kr in range(ind[2], ind[3]): + for kc in range(ind[0], ind[1]): self.stacks[kr][kc].addply(copy(cur_ply)) - self.swstacks = [None]*nWebs + self.swstacks = [None] * nWebs for kw in range(nWebs): self.swstacks[kw] = [] for swsk in range(nStations): self.swstacks[kw].append(Stack()) for kc in range(nStations): # name the stacks __SW - self.swstacks[kw][kc].name = '{:02d}_{:02d}_SW'.format(kw,kc) - ind = self.webindices[kw] # currently, the shearweb indices do not change down the span - self.swstacks[kw][kc].indices = [kc,kc + 1,ind[0],ind[1]] - for k in range(len(self.bom['sw'][kw])): + self.swstacks[kw][kc].name = "{:02d}_{:02d}_SW".format(kw, kc) + ind = self.webindices[ + kw + ] # currently, the shearweb indices do not change down the span + self.swstacks[kw][kc].indices = [kc, kc + 1, ind[0], ind[1]] + for k in range(len(self.bom["sw"][kw])): # for each row in the BOM, get the ply definition ... cur_ply = Ply() - cur_ply.component = self.bom['sw'][kw][k].name #parent component of ply - cur_ply.materialid = self.bom['sw'][kw][k].materialid # materialid of ply - cur_ply.thickness = self.bom['sw'][kw][k].thickness # thickness [mm] of single ply - cur_ply.angle = 0 #TODO, set to 0 for now, self.bom['lp'](k, ); - cur_ply.nPlies = 1 # default to 1, modified in addply() if necessary + cur_ply.component = self.bom["sw"][kw][ + k + ].name # parent component of ply + cur_ply.materialid = self.bom["sw"][kw][ + k + ].materialid # materialid of ply + cur_ply.thickness = self.bom["sw"][kw][ + k + ].thickness # thickness [mm] of single ply + cur_ply.angle = 0 # TODO, set to 0 for now, self.bom['lp'](k, ); + cur_ply.nPlies = 1 # default to 1, modified in addply() if necessary # ... and add the ply to every area that is part of the region - ind = self.bomIndices['sw'][kw][k] + ind = self.bomIndices["sw"][kw][k] - for kc in range(ind[0],ind[1]): + for kc in range(ind[0], ind[1]): self.swstacks[kw][kc].addply(copy(cur_ply)) - # need to add the 'MatDB' information which stores composite stack - # information in each region at each station - # see datatypes.MatDBentry - # prepare material database ========================================== + # need to add the 'MatDB' information which stores composite stack + # information in each region at each station + # see datatypes.MatDBentry + # prepare material database ========================================== self.matdb = dict() for mat_name in self.materials: cur_entry = MatDBentry() @@ -1041,7 +1199,7 @@ def sorter(e): cur_entry.gxy = cur_material.gxy cur_entry.gyz = cur_material.gyz cur_entry.gxz = cur_material.gxz - if cur_entry.type == 'isotropic': + if cur_entry.type == "isotropic": cur_entry.nuxy = cur_material.prxy else: cur_entry.prxy = cur_material.prxy @@ -1050,17 +1208,17 @@ def sorter(e): cur_entry.dens = cur_material.density cur_entry.reference = cur_material.reference self.matdb[mat_name] = cur_entry - - flat_stacks = self.stacks.flatten('F') + + flat_stacks = self.stacks.flatten("F") for k in range(self.stacks.size): cur_entry = MatDBentry() cur_entry.name = flat_stacks[k].name - cur_entry.type = 'composite' - cur_entry.reference = 'Reference text' - cur_entry.thicknessType = 'Constant' + cur_entry.type = "composite" + cur_entry.reference = "Reference text" + cur_entry.thicknessType = "Constant" cur_entry.uniqueLayers = len(flat_stacks[k].plygroups) - cur_entry.symmetryType = 'none' - cur_entry.layer = [None]*cur_entry.uniqueLayers + cur_entry.symmetryType = "none" + cur_entry.layer = [None] * cur_entry.uniqueLayers for j in range(cur_entry.uniqueLayers): cur_layer = Layer() matid = flat_stacks[k].plygroups[j].materialid @@ -1076,20 +1234,22 @@ def sorter(e): for k in range(len(self.swstacks[kw])): cur_entry = MatDBentry() cur_entry.name = self.swstacks[kw][k].name - cur_entry.type = 'composite' - cur_entry.reference = 'Reference text' - cur_entry.thicknessType = 'Constant' + cur_entry.type = "composite" + cur_entry.reference = "Reference text" + cur_entry.thicknessType = "Constant" try: cur_entry.uniqueLayers = len(self.swstacks[kw][k].plygroups) except TypeError: cur_entry.uniqueLayers = 0 - cur_entry.symmetryType = 'none' - cur_entry.layer = [None]*cur_entry.uniqueLayers + cur_entry.symmetryType = "none" + cur_entry.layer = [None] * cur_entry.uniqueLayers for j in range(cur_entry.uniqueLayers): cur_layer = Layer() matid = self.swstacks[kw][k].plygroups[j].materialid cur_layer.layerName = self.matdb[matid].name - cur_layer.thicknessA = MM_TO_M * self.swstacks[kw][k].plygroups[j].thickness + cur_layer.thicknessA = ( + MM_TO_M * self.swstacks[kw][k].plygroups[j].thickness + ) cur_layer.thicknessB = cur_layer.thicknessA cur_layer.quantity = self.swstacks[kw][k].plygroups[j].nPlies cur_layer.theta = self.swstacks[kw][k].plygroups[j].angle @@ -1106,17 +1266,21 @@ def sorter(e): if self.swstacks[kw][k].plygroups: cur_sw = Shearweb() cur_sw.Material = self.swstacks[kw][k].name - cur_sw.BeginStation = self.swstacks[kw][k].indices[0] # =k - cur_sw.EndStation = self.swstacks[kw][k].indices[1] # =k+1 - cur_sw.Corner = [ind[1]-1,ind[0]-1,ind[0]-1,ind[1]-1] # dp number is offset by 1 in NuMAD v1 + cur_sw.BeginStation = self.swstacks[kw][k].indices[0] # =k + cur_sw.EndStation = self.swstacks[kw][k].indices[1] # =k+1 + cur_sw.Corner = [ + ind[1] - 1, + ind[0] - 1, + ind[0] - 1, + ind[1] - 1, + ] # dp number is offset by 1 in NuMAD v1 self.shearweb.append(cur_sw) ctr += 1 return self - - def updateAirfoilProfile(self,k): + def updateAirfoilProfile(self, k): """ - + Parameters ---------- @@ -1124,44 +1288,43 @@ def updateAirfoilProfile(self,k): ------- None """ - thickness = self.ithickness[:,k] + thickness = self.ithickness[:, k] percentthick = self.ipercentthick[k] - camber = self.icamber[:,k] - c = self.ic[:,k] - #jcb: note that I'm using max thickness about camber - #instead of overall thickness of airfoil. We may need to - #change this definition. + camber = self.icamber[:, k] + c = self.ic[:, k] + # jcb: note that I'm using max thickness about camber + # instead of overall thickness of airfoil. We may need to + # change this definition. maxthick = np.amax(thickness) tratio = percentthick / (maxthick * 100) thick = thickness * tratio hp = camber - 0.5 * thick lp = camber + 0.5 * thick - profile1 = np.concatenate(([c[-1]],np.flipud(c),c[1:],[c[-1]])) - profile2 = np.concatenate(([0],np.flipud(hp),lp[1:],[0])) - profile = np.stack((profile1,profile2),axis=1) - self.profiles[:,:,k] = profile + profile1 = np.concatenate(([c[-1]], np.flipud(c), c[1:], [c[-1]])) + profile2 = np.concatenate(([0], np.flipud(hp), lp[1:], [0])) + profile = np.stack((profile1, profile2), axis=1) + self.profiles[:, :, k] = profile return self - def updateOMLgeometry(self, k): """ TODO docstring """ - x = self.profiles[:,0,k] - y = self.profiles[:,1,k] + x = self.profiles[:, 0, k] + y = self.profiles[:, 1, k] # self.xoffset[0,k] = c[mtindex] if self.naturaloffset: - x = x - self.xoffset[0,k] - x = x - self.ichordoffset[k] # apply chordwise offset - x = x * self.ichord[k] * - 1 * self.rotorspin # scale by chord - y = y * self.ichord[k] # scale by chord - twist = - 1 * self.rotorspin * self.idegreestwist[k] + x = x - self.xoffset[0, k] + x = x - self.ichordoffset[k] # apply chordwise offset + x = x * self.ichord[k] * -1 * self.rotorspin # scale by chord + y = y * self.ichord[k] # scale by chord + twist = -1 * self.rotorspin * self.idegreestwist[k] # prepare for hgtransform rotate & translate - coords = np.zeros((len(x),4)) - coords[:,0] = np.cos(np.deg2rad(twist)) * x - np.sin(np.deg2rad(twist)) * y - coords[:,1] = np.sin(np.deg2rad(twist)) * x + np.cos(np.deg2rad(twist)) * y - coords[:,2] = np.zeros(len(x)) - coords[:,3] = np.ones(len(x)) + coords = np.zeros((len(x), 4)) + coords[:, 0] = np.cos(np.deg2rad(twist)) * x - np.sin(np.deg2rad(twist)) * y + coords[:, 1] = np.sin(np.deg2rad(twist)) * x + np.cos(np.deg2rad(twist)) * y + coords[:, 2] = np.zeros(len(x)) + coords[:, 3] = np.ones(len(x)) # use the generating line to translate and rotate the coordinates # NOTE currently, rotation is not assigned from blade properties # and defaults to 0 @@ -1181,50 +1344,50 @@ def updateOMLgeometry(self, k): prebend_rot = atan(-prebend_slope); endc """ - transX = - 1 * self.rotorspin * self.isweep[k] + transX = -1 * self.rotorspin * self.isweep[k] transY = self.iprebend[k] transZ = self.ispan[k] - Ry = rotation('y', sweep_rot) - Rx = rotation('x', prebend_rot) - R = Ry@Rx - T = translation(transX,transY,transZ) + Ry = rotation("y", sweep_rot) + Rx = rotation("x", prebend_rot) + R = Ry @ Rx + T = translation(transX, transY, transZ) coords = coords @ np.transpose(R) @ np.transpose(T) # save the transformed coordinates - self.geometry[:,:,k] = coords[:,0:3] + self.geometry[:, :, k] = coords[:, 0:3] # self.geometry[:,0,k] = coords[:,0] # self.geometry[:,1,k] = coords[:,1] # self.geometry[:,2,k] = coords[:,2] return self - def addInterpolatedStation(self,newSpanLocation): - x0=self.ispan + def addInterpolatedStation(self, newSpanLocation): + x0 = self.ispan - if newSpanLocation < self.ispan[-1] and newSpanLocation>0: + if newSpanLocation < self.ispan[-1] and newSpanLocation > 0: for iSpan, spanLocation in enumerate(self.ispan[1:]): - if newSpanLocation < spanLocation: - insertIndex=iSpan+1 + insertIndex = iSpan + 1 break else: - raise ValueError(f'A new span location with value {newSpanLocation} is not possible.' ) + raise ValueError( + f"A new span location with value {newSpanLocation} is not possible." + ) - - self.ispan=np.insert(self.ispan, insertIndex,np.array([newSpanLocation])) + self.ispan = np.insert(self.ispan, insertIndex, np.array([newSpanLocation])) - self.leband=interpolator_wrap(x0,self.leband,self.ispan) - self.teband=interpolator_wrap(x0,self.teband,self.ispan) - self.sparcapwidth_hp=interpolator_wrap(x0,self.sparcapwidth_hp,self.ispan) - self.sparcapwidth_lp=interpolator_wrap(x0,self.sparcapwidth_lp,self.ispan) - self.sparcapoffset_hp=interpolator_wrap(x0,self.sparcapoffset_hp,self.ispan) - self.sparcapoffset_lp=interpolator_wrap(x0,self.sparcapoffset_lp,self.ispan) + self.leband = interpolator_wrap(x0, self.leband, self.ispan) + self.teband = interpolator_wrap(x0, self.teband, self.ispan) + self.sparcapwidth_hp = interpolator_wrap(x0, self.sparcapwidth_hp, self.ispan) + self.sparcapwidth_lp = interpolator_wrap(x0, self.sparcapwidth_lp, self.ispan) + self.sparcapoffset_hp = interpolator_wrap(x0, self.sparcapoffset_hp, self.ispan) + self.sparcapoffset_lp = interpolator_wrap(x0, self.sparcapoffset_lp, self.ispan) self.updateBlade() return insertIndex - def addStation(self, af = None, spanlocation: float = None): + def addStation(self, af=None, spanlocation: float = None): """This method adds a station - Specifically, the station object is created + Specifically, the station object is created and appended to self.stations. Parameters @@ -1238,7 +1401,7 @@ def addStation(self, af = None, spanlocation: float = None): Example ------- - ``blade.addStation(af,spanlocation)`` where ``af`` = airfoil filename + ``blade.addStation(af,spanlocation)`` where ``af`` = airfoil filename or ``AirfoilDef`` object """ newStation = Station(af) @@ -1249,24 +1412,24 @@ def addStation(self, af = None, spanlocation: float = None): else: self.stations = [] self.stations.append(newStation) - + # N = np.asarray(self.stations).size # k = N + 1 # if k > 1: # self.stations[k] = StationDef(af) # else: # self.stations = StationDef(af) - + # self.stations[k].spanlocation = spanlocation # self.stations[k].parent = self return self # Supporting function for updateBOM - def findLayerExtents(self,layerDist = None,layerN = None): + def findLayerExtents(self, layerDist=None, layerN=None): """ TODO docstring """ - assert np.isscalar(layerN),'second argument "layerN" must be a scalar' + assert np.isscalar(layerN), 'second argument "layerN" must be a scalar' staLogical = layerDist >= layerN prev = 0 beginSta = [] @@ -1276,67 +1439,66 @@ def findLayerExtents(self,layerDist = None,layerN = None): beginSta.append(k) if staLogical[k] == 0 and prev == 1: endSta.append(k) - elif k == len(staLogical)-1 and prev == 1: - endSta.append(k) + elif k == len(staLogical) - 1 and prev == 1: + endSta.append(k) prev = staLogical[k] - - return beginSta,endSta - + + return beginSta, endSta + # Supporting function for updateBOM - def findRegionExtents(self,comp = None): + def findRegionExtents(self, comp=None): """ TODO docstring """ - le = self.keylabels.index('le') + le = self.keylabels.index("le") # "keylabels" is expected to wrap from te on hp side around to te on lp side try: if len(comp.hpextents) == 2: try: - hp1 = self.keylabels[0:le+1].index(comp.hpextents[0]) + hp1 = self.keylabels[0 : le + 1].index(comp.hpextents[0]) except KeyError: print(f'HP extent label "{comp.hpextents[0]}" not defined.') try: - hp2 = self.keylabels[0:le+1].index(comp.hpextents[1]) + hp2 = self.keylabels[0 : le + 1].index(comp.hpextents[1]) except KeyError: print(f'HP extent label "{comp.hpextents[1]}" not defined.') - hpRegion = [hp1,hp2] + hpRegion = [hp1, hp2] hpRegion.sort() else: hpRegion = [] except TypeError: hpRegion = [] - + try: if len(comp.lpextents) == 2: try: - lp1 = self.keylabels[le:].index(comp.lpextents[0]) + le + lp1 = self.keylabels[le:].index(comp.lpextents[0]) + le except KeyError: print(f'HP extent label "{comp.hpextents[0]}" not defined.') try: lp2 = self.keylabels[le:].index(comp.lpextents[1]) + le except KeyError: print(f'HP extent label "{comp.hpextents[1]}" not defined.') - lpRegion = [lp1,lp2] + lpRegion = [lp1, lp2] lpRegion.sort() else: lpRegion = [] except: lpRegion = [] - + # if length(comp['hpextents'])==1 && length(comp['lpextents'])==1 - # sw1 = find(1==strcmpi(comp['hpextents']{1},keylabels(1:le))); - # assert(~isempty(sw1),'HP extent label "#s" not defined.',comp['hpextents']{1}); - # w2 = find(1==strcmpi(comp['lpextents']{1},keylabels(le:end))) + le-1; - # assert(~isempty(sw2),'LP extent label "#s" not defined.',comp['lpextents']{1}); - # swRegion = [sw1 sw2]; + # sw1 = find(1==strcmpi(comp['hpextents']{1},keylabels(1:le))); + # assert(~isempty(sw1),'HP extent label "#s" not defined.',comp['hpextents']{1}); + # w2 = find(1==strcmpi(comp['lpextents']{1},keylabels(le:end))) + le-1; + # assert(~isempty(sw2),'LP extent label "#s" not defined.',comp['lpextents']{1}); + # swRegion = [sw1 sw2]; # else swRegion = [] - return hpRegion,lpRegion #,swRegion - + return hpRegion, lpRegion # ,swRegion def getprofileTEtype(self, k: int): """ - + Parameters ---------- k @@ -1345,79 +1507,76 @@ def getprofileTEtype(self, k: int): ------ tetype : str """ - xy = self.profiles[:,:,k] + xy = self.profiles[:, :, k] unitNormals = getAirfoilNormals(xy) angleChange = getAirfoilNormalsAngleChange(unitNormals) - disconts = np.flatnonzero(angleChange>30) + disconts = np.flatnonzero(angleChange > 30) if np.std(angleChange) < 2: - tetype = 'round' + tetype = "round" elif len(disconts) > 1: - tetype = 'flat' + tetype = "flat" else: - tetype = 'sharp' - return tetype - - + tetype = "sharp" + return tetype - def expandBladeGeometryTEs(self,minimumTEedgelengths): + def expandBladeGeometryTEs(self, minimumTEedgelengths): """ TODO: docstring """ nStations = self.geometry.shape[2] - - for iStation in range(0,nStations): - firstPoint = self.ichord[iStation] * self.profiles[-2,:,iStation] - secondPont = self.ichord[iStation] * self.profiles[1,:,iStation] + for iStation in range(0, nStations): + firstPoint = self.ichord[iStation] * self.profiles[-2, :, iStation] + secondPont = self.ichord[iStation] * self.profiles[1, :, iStation] edgeLength = np.linalg.norm(secondPont - firstPoint) - #fprintf('station #i, edgeLength: #f\n',iStation,edgeLength*1000) + # fprintf('station #i, edgeLength: #f\n',iStation,edgeLength*1000) - maxthick = np.amax(self.ithickness[:,iStation]) - mtindex = np.argmax(self.ithickness[:,iStation]) + maxthick = np.amax(self.ithickness[:, iStation]) + mtindex = np.argmax(self.ithickness[:, iStation]) tratio = self.ipercentthick[iStation] / (maxthick * 100) - airFoilThickness = self.ithickness[:,iStation] * tratio - onset = self.ic[mtindex,iStation] + airFoilThickness = self.ithickness[:, iStation] * tratio + onset = self.ic[mtindex, iStation] if edgeLength < minimumTEedgelengths[iStation]: - print(f'Updating station: {iStation} TE separation from {edgeLength} to {minimumTEedgelengths[iStation]}') - tet = (minimumTEedgelengths[iStation] - edgeLength) / self.ichord[iStation] - tes = 5 / 3 * tet # slope of TE adjustment; 5/3*tet is "natural" + print( + f"Updating station: {iStation} TE separation from {edgeLength} to {minimumTEedgelengths[iStation]}" + ) + tet = (minimumTEedgelengths[iStation] - edgeLength) / self.ichord[ + iStation + ] + tes = 5 / 3 * tet # slope of TE adjustment; 5/3*tet is "natural" # continuous first & second derivatives at 'onset' # maintain second & third derivative at mc==1 (TE) # adjust slope at mc==1 (TE) by tes - A = np.array([ - [1,1,1,1], - [3,4,5,6], - [6,12,20,30], - [6,24,60,120] - ]) - d = np.array([[tet],[tes],[0],[0]]) - p = np.linalg.solve(A,d) - #onset = self(k).maxthick; # start of TE modification, measured from LE - vec = (self.ic[:,iStation] - onset) / (1 - onset) + A = np.array( + [[1, 1, 1, 1], [3, 4, 5, 6], [6, 12, 20, 30], [6, 24, 60, 120]] + ) + d = np.array([[tet], [tes], [0], [0]]) + p = np.linalg.solve(A, d) + # onset = self(k).maxthick; # start of TE modification, measured from LE + vec = (self.ic[:, iStation] - onset) / (1 - onset) mc = np.maximum(vec, np.zeros(vec.shape)) - temod = np.vstack([mc ** 3,mc ** 4,mc ** 5,mc ** 6]).T @ p + temod = np.vstack([mc**3, mc**4, mc**5, mc**6]).T @ p temod = temod.reshape(-1) airFoilThickness = airFoilThickness + temod - self.ithickness[:,iStation] = airFoilThickness / tratio + self.ithickness[:, iStation] = airFoilThickness / tratio self.updateAirfoilProfile(iStation) - mtindex = np.argmax(self.ithickness[:,iStation]) - self.xoffset[0,iStation] = self.ic[mtindex,iStation] + mtindex = np.argmax(self.ithickness[:, iStation]) + self.xoffset[0, iStation] = self.ic[mtindex, iStation] self.updateOMLgeometry(iStation) # firstPoint=self.ichord(iStation)*self.profiles(end-1,:,iStation); # secondPont=self.ichord(iStation)*self.profiles(2,:,iStation); # edgeLength2=norm(secondPont-firstPoint); # fprintf('station #i, edgeLength: #f, New edgeLength=#f, percent diff: #f\n',iStation,edgeLength*1000,edgeLength2*1000,(edgeLength2-edgeLength)/edgeLength2*100) - + self.updateKeypoints() return - - + ### Shell - def copyPly(self,ply): + def copyPly(self, ply): newPly = Ply() newPly.component = ply.component newPly.materialid = ply.materialid @@ -1425,84 +1584,82 @@ def copyPly(self,ply): newPly.angle = ply.angle newPly.nPlies = ply.nPlies return newPly - def editStacksForSolidMesh(self): - numSec,numStat = self.stacks.shape + numSec, numStat = self.stacks.shape for i in range(numSec): for j in range(numStat): - pg = self.stacks[i,j].plygroups - if (len(pg) == 4): + pg = self.stacks[i, j].plygroups + if len(pg) == 4: ply1 = self.copyPly(pg[1]) ply2 = self.copyPly(pg[2]) ply3 = self.copyPly(pg[3]) - newPg = np.array([ply1,ply2,ply3]) + newPg = np.array([ply1, ply2, ply3]) else: - if (len(pg) == 3): - #newPg = np.array([pg[1],pg[1],pg[2]]) + if len(pg) == 3: + # newPg = np.array([pg[1],pg[1],pg[2]]) ply1 = self.copyPly(pg[1]) ply2 = self.copyPly(pg[1]) ply3 = self.copyPly(pg[2]) t2 = ply1.thickness t3 = ply3.thickness - ply2.thickness = 0.3333333*(t2 + t3) - ply1.thickness = 0.6666666*t2 - ply3.thickness = 0.6666666*t3 - newPg = np.array([ply1,ply2,ply3]) + ply2.thickness = 0.3333333 * (t2 + t3) + ply1.thickness = 0.6666666 * t2 + ply3.thickness = 0.6666666 * t3 + newPg = np.array([ply1, ply2, ply3]) else: - if (len(pg) == 2): + if len(pg) == 2: ply1 = self.copyPly(pg[0]) ply2 = self.copyPly(pg[0]) ply3 = self.copyPly(pg[1]) - #newPg = np.array([pg[0],pg[0],pg[1]]) + # newPg = np.array([pg[0],pg[0],pg[1]]) t1 = ply1.thickness t2 = ply3.thickness - ply2.thickness = 0.3333333*(t1 + t2) - ply1.thickness = 0.6666666*t1 - ply3.thickness = 0.6666666*t2 - newPg = np.array([ply1,ply2,ply3]) + ply2.thickness = 0.3333333 * (t1 + t2) + ply1.thickness = 0.6666666 * t1 + ply3.thickness = 0.6666666 * t2 + newPg = np.array([ply1, ply2, ply3]) else: ply1 = self.copyPly(pg[0]) ply2 = self.copyPly(pg[0]) ply3 = self.copyPly(pg[0]) - #newPg = np.array([pg[0],pg[0],pg[0]]) + # newPg = np.array([pg[0],pg[0],pg[0]]) t1 = ply1.thickness - ply2.thickness = 0.3333333*t1 - ply1.thickness = 0.3333333*t1 - ply3.thickness = 0.3333333*t1 - newPg = np.array([ply1,ply2,ply3]) - self.stacks[i,j].plygroups = newPg - + ply2.thickness = 0.3333333 * t1 + ply1.thickness = 0.3333333 * t1 + ply3.thickness = 0.3333333 * t1 + newPg = np.array([ply1, ply2, ply3]) + self.stacks[i, j].plygroups = newPg + for i in range(2): stackLst = self.swstacks[i] for j in range(len(stackLst)): pg = stackLst[j].plygroups - if (len(pg) == 2): + if len(pg) == 2: ply1 = self.copyPly(pg[0]) ply2 = self.copyPly(pg[0]) ply3 = self.copyPly(pg[1]) - #newPg = np.array([pg[0],pg[0],pg[1]]) + # newPg = np.array([pg[0],pg[0],pg[1]]) t1 = ply1.thickness t2 = ply3.thickness - ply2.thickness = 0.3333333*(t1 + t2) - ply1.thickness = 0.6666666*t1 - ply3.thickness = 0.6666666*t2 - newPg = np.array([ply1,ply2,ply3]) + ply2.thickness = 0.3333333 * (t1 + t2) + ply1.thickness = 0.6666666 * t1 + ply3.thickness = 0.6666666 * t2 + newPg = np.array([ply1, ply2, ply3]) self.swstacks[i][j].plygroups = newPg - elif(len(pg) == 1): + elif len(pg) == 1: ply1 = self.copyPly(pg[0]) ply2 = self.copyPly(pg[0]) - ply3 = self.copyPly(pg[0]) - #newPg = np.array([pg[0],pg[0],pg[0]]) + ply3 = self.copyPly(pg[0]) + # newPg = np.array([pg[0],pg[0],pg[0]]) t1 = ply1.thickness - ply2.thickness = 0.3333333*t1 - ply1.thickness = 0.3333333*t1 - ply3.thickness = 0.3333333*t1 - newPg = np.array([ply1,ply2,ply3]) + ply2.thickness = 0.3333333 * t1 + ply1.thickness = 0.3333333 * t1 + ply3.thickness = 0.3333333 * t1 + newPg = np.array([ply1, ply2, ply3]) self.swstacks[i][j].plygroups = newPg return - """ #NOTE need team help here -kb # not converted diff --git a/src/pynumad/objects/Component.py b/src/pynumad/objects/Component.py index 095866d..751a805 100644 --- a/src/pynumad/objects/Component.py +++ b/src/pynumad/objects/Component.py @@ -40,14 +40,14 @@ class Component: hCtrl hLine - Examples: - - ``comp_self = ComponentDef();`` - - ``comp_self = ComponentDef(comp_struct);`` + Examples: + + ``comp_self = ComponentDef();`` + + ``comp_self = ComponentDef(comp_struct);`` """ - + def __init__(self): self.group: int = None self.name: str = None @@ -56,28 +56,28 @@ def __init__(self): self.hpextents: list = None self.lpextents: list = None self.cp: np.ndarray = None - self.imethod: str = 'linear' + self.imethod: str = "linear" self.pinnedends: bool = None self.hCtrl = None self.hLine = None - - def getcp(self): + def getcp(self): if self.pinnedends: - if np.any(self.cp[:,0] < 0) or np.any(self.cp[:,0] > 1): - raise Exception('ComponentDef: first coordinate of control points must be in range [0,1] when using "pinned" ends') - cpx = np.concatenate(([-0.01],self.cp[:,0],[1.01])) - cpy = np.concatenate(([0],self.cp[:,1],[0])) + if np.any(self.cp[:, 0] < 0) or np.any(self.cp[:, 0] > 1): + raise Exception( + 'ComponentDef: first coordinate of control points must be in range [0,1] when using "pinned" ends' + ) + cpx = np.concatenate(([-0.01], self.cp[:, 0], [1.01])) + cpy = np.concatenate(([0], self.cp[:, 1], [0])) else: - cpx = self.cp[:,0] - cpy = self.cp[:,1] - - return cpx,cpy - - - def getNumLayers(self,span): - cpx,cpy = self.getcp() - nLayers = interpolator_wrap(cpx,cpy,span,self.imethod,0) + cpx = self.cp[:, 0] + cpy = self.cp[:, 1] + + return cpx, cpy + + def getNumLayers(self, span): + cpx, cpy = self.getcp() + nLayers = interpolator_wrap(cpx, cpy, span, self.imethod, 0) return nLayers # TODO translate @@ -85,11 +85,11 @@ def plotcp(self): """ TODO docstring """ - cpx,cpy = self.getcp() + cpx, cpy = self.getcp() fig, ax = plt.subplots() - ax.plot(cpx,cpy) - x = np.linspace(0,1,100) - y = np.round(interpolator_wrap(cpx,cpy,x,'pchip',0)) - ax.plot(x,y) + ax.plot(cpx, cpy) + x = np.linspace(0, 1, 100) + y = np.round(interpolator_wrap(cpx, cpy, x, "pchip", 0)) + ax.plot(x, y) plt.title(self.name) - return \ No newline at end of file + return diff --git a/src/pynumad/objects/Material.py b/src/pynumad/objects/Material.py index 2e0a78e..1ebc681 100644 --- a/src/pynumad/objects/Material.py +++ b/src/pynumad/objects/Material.py @@ -5,12 +5,12 @@ ######################################################################## -class Material(): +class Material: """MaterialDef: A class for blade materials. - + Parameters ---------- - + Attributes ---------- name : str @@ -20,89 +20,106 @@ class Material(): layerthickness : float Layer thickness [mm] ex : float - Longitudinal elastic modulus [Pa] + Longitudinal elastic modulus [Pa] ey : float - Transverse elastic modulus [Pa] + Transverse elastic modulus [Pa] ez : float - Through-the-thickness elastic modulus in the + Through-the-thickness elastic modulus in the principal material coordinates [Pa] gxy : float - In-plane shear modulus [Pa] + In-plane shear modulus [Pa] gyz : float - Transverse shear modulus [Pa] + Transverse shear modulus [Pa] gxz : float - Transverse shear modulus [Pa] + Transverse shear modulus [Pa] prxy : float - In-plane Poisson ratio [ ] + In-plane Poisson ratio [ ] pryz : float - Transverse Poisson ratio [ ] + Transverse Poisson ratio [ ] prxz : float - Transverse Poisson ratio [ ] + Transverse Poisson ratio [ ] density : float - Cured mass density [kg/m2] + Cured mass density [kg/m2] drydensity : float - Density of fabric + Density of fabric uts : float - 1 x 3 array of ultimate tensile strength design values. + 1 x 3 array of ultimate tensile strength design values. Sequence: SL , ST, Sz, 1 x 1 for isotropic. ucs : float - 1 x 3 array of ultimate compressive strength design values. + 1 x 3 array of ultimate compressive strength design values. Sequence: SL , ST, Sz, 1 x 1 for isotropic. uss : float - 1 x 3 array of ultimate shear strength design values. + 1 x 3 array of ultimate shear strength design values. Sequence: SLT , STz, SLz, 1 x 1 for isotropic. xzit : float - Lz tensile inclination parameter for Puck failure index + Lz tensile inclination parameter for Puck failure index xzic : float - Lz compressive inclination parameter for Puck failure index + Lz compressive inclination parameter for Puck failure index yzit : float - Tz tensile inclination parameter for Puck failure index + Tz tensile inclination parameter for Puck failure index yzic : float - Tz compressive inclination parameter for Puck failure index + Tz compressive inclination parameter for Puck failure index g1g2 : float - Fracture toughness ratio between GI (mode I) and GII (mode II) [ ] + Fracture toughness ratio between GI (mode I) and GII (mode II) [ ] alp0 : float - Fracture angle under pure transverse compression [degrees] + Fracture angle under pure transverse compression [degrees] etat : float - Transverse friction coefficient for Larc [ ] + Transverse friction coefficient for Larc [ ] etal : float - Longitudinal friction coefficient for Larc [ ] + Longitudinal friction coefficient for Larc [ ] m : list - Fatigue slope exponent [ ] + Fatigue slope exponent [ ] gamma_mf : list - from DNL-GL standard, fatigue strength reduction factor + from DNL-GL standard, fatigue strength reduction factor gamma_ms : list - from DNV-GL standard, short term strength reduction factor + from DNV-GL standard, short term strength reduction factor reference : str = None """ + def __init__(self): - self.name: str = None # User selected name of the material - self.type: str = None # Two options: ‘isotropic’ or ‘orthotropic’ - self.layerthickness: float = None # Layer thickness [mm] - self.ex: float = None # Longitudinal elastic modulus [Pa] - self.ey: float = None # Transverse elastic modulus [Pa] - self.ez: float = None # Through-the-thickness elastic modulus in the principal material coordinates [Pa] - self.gxy: float = None # In-plane shear modulus [Pa] - self.gyz: float = None # Transverse shear modulus [Pa] - self.gxz: float = None # Transverse shear modulus [Pa] - self.prxy: float = None # In-plane Poisson ratio [ ] - self.pryz: float = None # Transverse Poisson ratio [ ] - self.prxz: float = None # Transverse Poisson ratio [ ] - self.density: float = None # Cured mass density [kg/m2] - self.drydensity: float = None # Density of fabric - self.uts: float = None # 1 × 3 array of ultimate tensile strength design values. Sequence: SL , ST, Sz, 1 × 1 for isotropic. - self.ucs: float = None # 1 × 3 array of ultimate compressive strength design values. Sequence: SL , ST, Sz, 1 × 1 for isotropic. - self.uss: float = None # 1 × 3 array of ultimate shear strength design values. Sequence: SLT , STz, SLz, 1 × 1 for isotropic. - self.xzit: float = None # Lz tensile inclination parameter for Puck failure index - self.xzic: float = None # Lz compressive inclination parameter for Puck failure index - self.yzit: float = None # Tz tensile inclination parameter for Puck failure index - self.yzic: float = None # Tz compressive inclination parameter for Puck failure index - self.g1g2: float = None # Fracture toughness ratio between GI (mode I) and GII (mode II) [ ] - self.alp0: float = None # Fracture angle under pure transverse compression [degrees] - self.etat: float = None # Transverse friction coefficient for Larc [ ] - self.etal: float = None # Longitudinal friction coefficient for Larc [ ] - self.m: list = None # Fatigue slope exponent [ ] - self.gamma_mf: list = None # from DNL-GL standard, fatigue strength reduction factor - self.gamma_ms: list = None # from DNV-GL standard, short term strength reduction factor - self.reference: str = None \ No newline at end of file + self.name: str = None # User selected name of the material + self.type: str = None # Two options: ‘isotropic’ or ‘orthotropic’ + self.layerthickness: float = None # Layer thickness [mm] + self.ex: float = None # Longitudinal elastic modulus [Pa] + self.ey: float = None # Transverse elastic modulus [Pa] + self.ez: float = None # Through-the-thickness elastic modulus in the principal material coordinates [Pa] + self.gxy: float = None # In-plane shear modulus [Pa] + self.gyz: float = None # Transverse shear modulus [Pa] + self.gxz: float = None # Transverse shear modulus [Pa] + self.prxy: float = None # In-plane Poisson ratio [ ] + self.pryz: float = None # Transverse Poisson ratio [ ] + self.prxz: float = None # Transverse Poisson ratio [ ] + self.density: float = None # Cured mass density [kg/m2] + self.drydensity: float = None # Density of fabric + self.uts: float = None # 1 × 3 array of ultimate tensile strength design values. Sequence: SL , ST, Sz, 1 × 1 for isotropic. + self.ucs: float = None # 1 × 3 array of ultimate compressive strength design values. Sequence: SL , ST, Sz, 1 × 1 for isotropic. + self.uss: float = None # 1 × 3 array of ultimate shear strength design values. Sequence: SLT , STz, SLz, 1 × 1 for isotropic. + self.xzit: float = ( + None # Lz tensile inclination parameter for Puck failure index + ) + self.xzic: float = ( + None # Lz compressive inclination parameter for Puck failure index + ) + self.yzit: float = ( + None # Tz tensile inclination parameter for Puck failure index + ) + self.yzic: float = ( + None # Tz compressive inclination parameter for Puck failure index + ) + self.g1g2: float = ( + None # Fracture toughness ratio between GI (mode I) and GII (mode II) [ ] + ) + self.alp0: float = ( + None # Fracture angle under pure transverse compression [degrees] + ) + self.etat: float = None # Transverse friction coefficient for Larc [ ] + self.etal: float = None # Longitudinal friction coefficient for Larc [ ] + self.m: list = None # Fatigue slope exponent [ ] + self.gamma_mf: list = ( + None # from DNL-GL standard, fatigue strength reduction factor + ) + self.gamma_ms: list = ( + None # from DNV-GL standard, short term strength reduction factor + ) + self.reference: str = None diff --git a/src/pynumad/objects/Stack.py b/src/pynumad/objects/Stack.py index c3c2e79..c687882 100644 --- a/src/pynumad/objects/Stack.py +++ b/src/pynumad/objects/Stack.py @@ -4,6 +4,7 @@ # See license.txt for disclaimer information # ######################################################################## + class Stack: """A class definition for a stack of composite layers. @@ -15,15 +16,15 @@ class Stack: name : string Name of the stack or composite material used by NuMAD, e.g. '000000_HP_LE_PANEL' indices : list - Indices of stack, ``[in board station, out board station, + Indices of stack, ``[in board station, out board station, 1st kepoint, 2nd keypoint]``, e.g. ``[ibSta,obSta,keypt1,keypt2]`` plygroups : list List of ``ply`` dataclasses - + Example ------- ``stack = StackDef();`` - + See also ``xlsBlade``, ``BladeDef``, ``BladeDef.updateBOM`` """ @@ -31,7 +32,7 @@ class Stack: indices = None plygroups: list = [] - def addply(self,ply): + def addply(self, ply): """This method adds a Ply object to stack Parameters @@ -47,8 +48,9 @@ def addply(self,ply): ``stack.addply(ply)`` """ if self.plygroups: - if ((ply.component == self.plygroups[-1].component) and - (ply.angle == self.plygroups[-1].angle)): + if (ply.component == self.plygroups[-1].component) and ( + ply.angle == self.plygroups[-1].angle + ): self.plygroups[-1].nPlies += 1 else: self.plygroups.append(ply) @@ -58,12 +60,12 @@ def addply(self,ply): return self - def layerThicknesses(self): - nLayers=len(self.plygroups) - thickness=0 - layerThicknesses=[] + nLayers = len(self.plygroups) + thickness = 0 + layerThicknesses = [] for iLayer in range(nLayers): - layerThicknesses.append(self.plygroups[iLayer].nPlies*self.plygroups[iLayer].thickness) + layerThicknesses.append( + self.plygroups[iLayer].nPlies * self.plygroups[iLayer].thickness + ) return layerThicknesses - \ No newline at end of file diff --git a/src/pynumad/objects/Station.py b/src/pynumad/objects/Station.py index a0a16e9..583f401 100644 --- a/src/pynumad/objects/Station.py +++ b/src/pynumad/objects/Station.py @@ -9,9 +9,10 @@ from pynumad.objects.Airfoil import Airfoil from pynumad.utils.interpolation import interpolator_wrap -class Station(): + +class Station: """Station object - + Parameters ---------- af : AirfoilDef or string @@ -26,44 +27,51 @@ class Station(): parent hgProfiile """ - def __init__(self,af = None): + + def __init__(self, af=None): self.airfoil = None self.spanlocation = None self.parent = None self.hgProfile = None - if isinstance(af,str): - self.airfoil = Airfoil(filename = af) + if isinstance(af, str): + self.airfoil = Airfoil(filename=af) elif isinstance(af, Airfoil): self.airfoil = af else: - self.airfoil = Airfoil() - + self.airfoil = Airfoil() + @property def degreestwist(self): """ TODO docstring """ - _degreestwist = interpolator_wrap(self.parent.span,self.parent.degreestwist,self.spanlocation) + _degreestwist = interpolator_wrap( + self.parent.span, self.parent.degreestwist, self.spanlocation + ) return _degreestwist - - @property - def chord(self): + + @property + def chord(self): """ TODO docstring """ - _chord = interpolator_wrap(self.parent.span,self.parent.chord,self.spanlocation) + _chord = interpolator_wrap( + self.parent.span, self.parent.chord, self.spanlocation + ) return _chord - - @property + + @property def percentthick(self): """ TODO docstring """ - _percentthick = interpolator_wrap(self.parent.span,self.parent.percentthick,self.spanlocation) + _percentthick = interpolator_wrap( + self.parent.span, self.parent.percentthick, self.spanlocation + ) return _percentthick - - @property + + @property def coffset(self): """ TODO docstring @@ -71,41 +79,39 @@ def coffset(self): # coffset = interp1(self.parent.span,self.parent.coffset,self.spanlocation); _coffset = self.airfoil.maxthick return _coffset - - @property + + @property def xyz(self): """ TODO docstring """ - twistFlag = - 1 + twistFlag = -1 tratio = self.percentthick / self.airfoil.percentthick thick = self.airfoil.thickness * tratio hp = self.airfoil.camber - 0.5 * thick lp = self.airfoil.camber + 0.5 * thick c = self.airfoil.c - x = np.concatenate((c[-1],np.flipud(c),c[1:],c[-1]),axis = 0) - y = np.concatenate((hp[-1],np.flipud(hp),lp[1:],lp[-1]),axis = 0) + x = np.concatenate((c[-1], np.flipud(c), c[1:], c[-1]), axis=0) + y = np.concatenate((hp[-1], np.flipud(hp), lp[1:], lp[-1]), axis=0) x = (x - self.coffset) * self.chord * twistFlag y = (y) * self.chord twist = twistFlag * self.degreestwist * np.pi / 180 - xyz = np.zeros((len(x),3)) - xyz[:,1] = np.cos(twist) * x - np.sin(twist) * y - xyz[:,2] = np.sin(twist) * x + np.cos(twist) * y - xyz[:,3] = self.spanlocation + xyz = np.zeros((len(x), 3)) + xyz[:, 1] = np.cos(twist) * x - np.sin(twist) * y + xyz[:, 2] = np.sin(twist) * x + np.cos(twist) * y + xyz[:, 3] = self.spanlocation return xyz - # NOTE: Not finished. Not sure what this is used for - def updateProfile(self): + def updateProfile(self): xyz = self.xyz - if len(self.hgProfile)==0: - self.hgProfile = line(0,0,0) - set(self.hgProfile,'XData',xyz[:,0],'YData',xyz[:,1],'ZData',xyz[:,2]) + if len(self.hgProfile) == 0: + self.hgProfile = line(0, 0, 0) + set(self.hgProfile, "XData", xyz[:, 0], "YData", xyz[:, 1], "ZData", xyz[:, 2]) return - - - # def delete(self = None): + + # def delete(self = None): # if ishandle(self.hgProfile): # os.delete(self.hgProfile) - - # return \ No newline at end of file + + # return diff --git a/src/pynumad/objects/Subobjects.py b/src/pynumad/objects/Subobjects.py index ff94706..bb3e3e3 100644 --- a/src/pynumad/objects/Subobjects.py +++ b/src/pynumad/objects/Subobjects.py @@ -4,48 +4,49 @@ # See license.txt for disclaimer information # ######################################################################## + class MatDBentry: - """A simple class to organize the attributes of a material - """ + """A simple class to organize the attributes of a material""" + def __init__(self): self.type: str = None - self.name:str = None - self.reference:str = None - self.dens:list = None - self.nuxy:list = None - self.ex:list = None - self.ey:list = None - self.ez:list = None - self.gxy:list = None - self.gyz:list = None - self.gxz:list = None - self.prxy:list = None - self.pryz:list = None - self.prxz:list = None - self.xten:list = None - self.xcmp:list = None - self.yten:list = None - self.ycmp:list = None - self.zten:list = None - self.zcmp:list = None - self.xy:list = None - self.yz:list = None - self.xz:list = None - self.xycp:list = None - self.yzcp:list = None - self.xzcp:list = None - self.xzit:list = None - self.xzic:list = None - self.yzit:list = None - self.yzic:list = None - self.g1g2:list = None - self.etal:list = None - self.etat:list = None - self.alp0:list = None - self.thicknessType:list = None - self.uniqueLayers:list = None - self.symmetryType:list = None - self.layer:list = None + self.name: str = None + self.reference: str = None + self.dens: list = None + self.nuxy: list = None + self.ex: list = None + self.ey: list = None + self.ez: list = None + self.gxy: list = None + self.gyz: list = None + self.gxz: list = None + self.prxy: list = None + self.pryz: list = None + self.prxz: list = None + self.xten: list = None + self.xcmp: list = None + self.yten: list = None + self.ycmp: list = None + self.zten: list = None + self.zcmp: list = None + self.xy: list = None + self.yz: list = None + self.xz: list = None + self.xycp: list = None + self.yzcp: list = None + self.xzcp: list = None + self.xzit: list = None + self.xzic: list = None + self.yzit: list = None + self.yzic: list = None + self.g1g2: list = None + self.etal: list = None + self.etat: list = None + self.alp0: list = None + self.thicknessType: list = None + self.uniqueLayers: list = None + self.symmetryType: list = None + self.layer: list = None class Layer: @@ -60,11 +61,11 @@ class Layer: self.quantity : int self.theta : float """ - def __init__(self): - self.layerName:str = None - self.thicknessA:float = None - self.thicknessB:float = None + def __init__(self): + self.layerName: str = None + self.thicknessA: float = None + self.thicknessB: float = None self.quantity: int = None self.theta: float = None @@ -77,20 +78,20 @@ class Shearweb: Material : str BeginStation : int - EndStation : int + EndStation : int Corner : list """ - def __init__(self): + def __init__(self): self.Material: str = None - self.BeginStation:int = None - self.EndStation:int = None - self.Corner:list = None + self.BeginStation: int = None + self.EndStation: int = None + self.Corner: list = None class BOM: """A simple class to organize the attributes of a Bill of Materials - + Attributes ---------- @@ -115,18 +116,18 @@ class BOM: weight : float Computed dry layer weight (g) """ - def __init__(self): - self.layernum:int = None - self.materialid:int = None - self.name:str = None - self.beginsta:float = None - self.endsta:float = None - self.maxwidth:float = None - self.avgwidth:float = None - self.area:float = None - self.thickness:float = None - self.weight:float = None + def __init__(self): + self.layernum: int = None + self.materialid: int = None + self.name: str = None + self.beginsta: float = None + self.endsta: float = None + self.maxwidth: float = None + self.avgwidth: float = None + self.area: float = None + self.thickness: float = None + self.weight: float = None class Ply: @@ -141,17 +142,18 @@ class Ply: Material id of ply thickness : float thickness of single ply (mm) - angle : float + angle : float ply angle nPlies : int number of plies """ + def __init__(self): - self.component: str = None # parent component`` - self.materialid: str = None # materialid of ply`` - self.thickness: float = None # thickness [mm] of single ply`` - self.angle: float = None # ply angle`` - self.nPlies: int = None # number of plies`` + self.component: str = None # parent component`` + self.materialid: str = None # materialid of ply`` + self.thickness: float = None # thickness [mm] of single ply`` + self.angle: float = None # ply angle`` + self.nPlies: int = None # number of plies`` class SkinArea: diff --git a/src/pynumad/shell/Boundary2DClass.py b/src/pynumad/shell/Boundary2DClass.py deleted file mode 100644 index 4d7df5d..0000000 --- a/src/pynumad/shell/Boundary2DClass.py +++ /dev/null @@ -1,32 +0,0 @@ -import numpy as np -import pynumad.shell.MeshTools as mt -from pynumad.shell.Segment2DClass import * - -class Boundary2D(): - - def __init__(self,segList=[]): - self.segList = list() - self.segList.extend(segList) - - def addSegment(self,segType,keyPts,numEls): - self.segList.append(Segment2D(segType,keyPts,numEls)) - - def getBoundaryMesh(self): - allNds = list() - allEds = list() - totNds = 0 - for seg in self.segList: - segMesh = seg.getNodesEdges() - allNds.extend(segMesh['nodes']) - allEds.extend(segMesh['edges'] + totNds) - totNds = len(allNds) - allNds = np.array(allNds) - allEds = np.array(allEds) - - meshData = dict() - meshData['nodes'] = allNds - meshData['elements'] = allEds - - output = mt.mergeDuplicateNodes(meshData) - - return output \ No newline at end of file diff --git a/src/pynumad/shell/Mesh2DClass.py b/src/pynumad/shell/Mesh2DClass.py deleted file mode 100644 index 7783a71..0000000 --- a/src/pynumad/shell/Mesh2DClass.py +++ /dev/null @@ -1,981 +0,0 @@ -import numpy as np -from pynumad.shell.SpatialGridList2DClass import * -import plotly.graph_objects as go - -class Mesh2D(): - - def __init__(self,boundaryNodes,boundaryEdges=[]): - self.nodeGL = None - self.edgeGL = None - self.triElGL = None - - self.minEdgeLen = 0.0 - self.maxEdgeLen = 1.0 - self.avgProjLen = 1.0 - - self.numBndNodes = len(boundaryNodes) - self.numNodes = self.numBndNodes - self.ndSize = self.numBndNodes - self.nodes = np.array(boundaryNodes) - - - self.numBndEdges = len(boundaryEdges) - self.edgeNodes = np.array(boundaryEdges) - self.numEdges = self.numBndEdges - self.edSize = self.numBndEdges - self.edgeElements = np.array([]) - self.edgeUnitNorms = np.array([]) - - self.numTriEls = 0 - self.triElSize = 0 - self.triElements = np.array([]) - - self.numQuadEls = 0 - self.quadElSize = 0 - self.quadElements = np.array([]) - - ## !! check changes to createSweptMesh calls - def createSweptMesh(self, sweepMethod, sweepElements, sweepDistance=1.0, point=[], axis=[], followNormal=False, destNodes=[], interpMethod='linear'): - ## sweepMethod = inDirection, toPoint, fromPoint, toDestNodes, revolve - """Object data modified: self.quadElements, self.nodes, self.quadElements - Parameters - ---------- - - Returns - ------- - nodes - elements - """ - nbNds = self.numBndNodes - nbEds = self.numBndEdges - try: - totSweepEls = sum(sweepElements) - ndSize = nbNds*(totSweepEls+1) - stages = len(sweepElements) - multiStage = True - except: - totSweepEls = sweepElements - ndSize = nbNds*(sweepElements+1) - stages = 1 - multiStage = False - dimSpace = len(self.nodes[0]) - tmp = self.nodes.copy() - self.nodes = np.zeros((ndSize,dimSpace)) - self.nodes[0:nbNds] = tmp - self.ndSize = ndSize - self.numNodes = nbNds - - if(self.numBndEdges == 0): - n1 = np.array(range(nbNds-1)) - n2 = np.array(range(1,nbNds)) - self.edgeNodes = np.transpose(np.array([n1,n2])) - self.numEdges = nbNds - 1 - self.numBndEdges = nbNds - 1 - self.edSize = nbNds - 1 - - self.triElements = np.array([]) - self.triElSize = 0 - self.numTriEls = 0 - - quadElSize = nbEds*totSweepEls - self.quadElements = -np.ones((quadElSize,4),dtype=int) - self.quadElSize = quadElSize - self.numQuadEls = 0 - - methString = 'inDirection toPoint fromPoint' - if(sweepMethod in methString): - ndDir = list() - if(sweepMethod == 'inDirection'): - mag = np.linalg.norm(axis) - unitAxis = (1.0/mag)*np.array(axis) - for i in range(0,self.numNodes): - ndDir.append(unitAxis) - else: - pAr = np.array(point) - for i in range(0,self.numNodes): - if(sweepMethod == 'toPoint'): - vec = pAr - nd - else: - vec = nd - pAr - mag = np.linalg.norm(vec) - unitVec = (1.0/mag)*vec - ndDir.append(unitVec) - rowNds = self.numNodes - rowEls = self.numEdges - stepLen = sweepDistance/sweepElements - k = self.numNodes - m = self.numQuadEls - for i in range(0,sweepElements): - for j in range(0,rowNds): - newNd = self.nodes[j] + (i+1)*stepLen*ndDir[j] - self.nodes[k] = newNd - k = k + 1 - for j in range(0,rowEls): - n1 = self.edgeNodes[j,0] + i*rowNds - n2 = self.edgeNodes[j,1] + i*rowNds - n3 = n2 + rowNds - n4 = n1 + rowNds - self.quadElements[m,:] = np.array([n1,n2,n3,n4]) - m = m + 1 - self.numNodes = k - self.numQuadEls = m - elif(sweepMethod == 'toDestNodes'): - nNds = self.numNodes - nbNds = self.numBndNodes - nEds = self.numEdges - nQuad = self.numQuadEls - if(not multiStage): - sweepElements = [sweepElements] - destNodes = [destNodes] - if(interpMethod == 'linear'): - prevDest = self.nodes.copy() - for stg in range(0,stages): - dNds = np.array(destNodes[stg]) - ndDir = list() - for ndi in range(0,nbNds): - vec = (1.0/sweepElements[stg])*(dNds[ndi] - prevDest[ndi]) - ndDir.append(vec) - ndDir = np.array([ndDir]) - for i in range(0,sweepElements[stg]): - for ndi in range(0,nbNds): - self.nodes[nNds] = self.nodes[ndi] + (i+1)*ndDir[ndi] - nNds = nNds + 1 - for edi in range(0,self.numBndEdges): - n1 = self.edgeNodes[edi,0] + i*nbNds - n2 = self.edgeNodes[edi,1] + i*nbNds - n3 = n2 + nbNds - n4 = n1 + nbNds - self.quadElements[nQuad] = np.array([n1,n2,n3,n4]) - nQuad = nQuad + 1 - prevDest = dNds - else: ## Smooth interpolation - xMat = np.zeros((nbNds,totSweepEls+1)) - yMat = np.zeros((nbNds,totSweepEls+1)) - pDest = (1.0/stages)*np.array(range(0,stages+1)) - pAll = (1.0/totSweepEls)*np.array(range(0,totSweepEls+1)) - for ndi in range(0,nbNds): - xDest = [self.nodes[ndi,0]] - yDest = [self.nodes[ndi,1]] - for dNds in destNodes: - xDest.append(dNds[ndi][0]) - yDest.append(dNds[ndi][1]) - xDest = np.array(xDest) - iFun = interpolate.interp1d(pDest,xDest,'cubic', axis=0,bounds_error=False,fill_value='extrapolate') - xAll = iFun(pAll) - xMat[ndi,:] = xAll - yDest = np.array(yDest) - iFun = interpolate.interp1d(pDest,yDest,'cubic', axis=0,bounds_error=False,fill_value='extrapolate') - yAll = iFun(pAll) - yMat[ndi,:] = yAll - if(dimSpace == 3): - zMat = np.zeros((nbNds,totSweepEls+1)) - for ndi in range(0,nbNds): - zDest = [self.nodes[ndi,2]] - for dNds in destNodes: - zDest.append(dNds[ndi][2]) - zDest = np.array(zDest) - iFun = interpolate.interp1d(pDest,zDest,'cubic', axis=0,bounds_error=False,fill_value='extrapolate') - zAll = iFun(pAll) - zMat[ndi,:] = zAll - for i in range(0,totSweepElements): - for ndi in range(0,nbNds): - if(dimSpace == 2): - self.nodes[nNds] = np.array([xMat[ndi,i+1],yMat[ndi,i+1]]) - else: - self.nodes[nNds] = np.array([xMat[ndi,i+1],yMat[ndi,i+1],zMat[ndi,i+1]]) - nNds = nNds + 1 - for edi in range(0,self.numBndEdges): - n1 = self.edgeNodes[edi,0] + i*nbNds - n2 = self.edgeNodes[edi,1] + i*nbNds - n3 = n2 + nbNds - n4 = n1 + nbNds - self.quadElements[nQuad] = np.array([n1,n2,n3,n4]) - nQuad = nQuad + 1 - self.numNodes = nNds - self.numQuadEls = nQuad - - - meshOut = dict() - meshOut['nodes'] = self.nodes - meshOut['elements'] = self.quadElements - return meshOut - - def skewNodes(self): - od = np.tan(np.pi/12.0) - skewMat = np.array([[1.0,od],[od,1.0]]) - self.nodes = np.matmul(self.nodes,skewMat) - - def unskewNodes(self): - od = np.tan(np.pi/12.0) - skewMat = np.array([[1.0,od],[od,1.0]]) - invSkew = np.linalg.inv(skewMat) - self.nodes = np.matmul(self.nodes,invSkew) - - def getBoundaryEdgeNormals(self): - stepLen = self.minEdgeLen/np.sqrt(3.0) - numSteps = int(np.ceil(self.edgeGL.xGSz*self.edgeGL.xRows/stepLen)) - yMin = self.edgeGL.yMin - xMin = self.edgeGL.xMin - xMarg = 0.6*self.maxEdgeLen - for i in range(0,numSteps): - xCrd = xMin + i*stepLen - p1 = np.array([xCrd,yMin]) - v1 = np.array([0.0,1.0]) - Xns = list() - nearEdges = self.edgeGL.findInXYMargin(p1,xMarg,-1) - for edi in nearEdges: - n1 = self.edgeNodes[edi,0] - n2 = self.edgeNodes[edi,1] - p2 = self.nodes[n1] - v2 = self.nodes[n2] - p2 - Amat = np.array([[v1[0],-v2[0]],[v1[1],-v2[1]]]) - detA = np.linalg.det(Amat) - if(detA != 0.0): - bVec = p2 - p1 - soln = np.linalg.solve(Amat,bVec) - if(soln[1] > 0.0 and soln[1] < 1.0): - Xns.append([edi,soln[0]]) - iLen = len(Xns) - for i in range(0,iLen-1): - for j in range(0,iLen-1): - x1 = Xns[j] - x2 = Xns[j+1] - if(x2[1] < x1[1]): - Xns[j] = x2 - Xns[j+1] = x1 - for i in range(0,iLen,2): - edi = Xns[i][0] - uN = self.edgeUnitNorms[edi] - if(uN[1] < 0.0): - self.edgeUnitNorms[edi] = -uN - for i in range(1,iLen,2): - edi = Xns[i][0] - uN = self.edgeUnitNorms[edi] - if(uN[1] > 0.0): - self.edgeUnitNorms[edi] = -uN - - numSteps = int(np.ceil(self.edgeGL.yGSz*self.edgeGL.yRows/stepLen)) - yMarg = 0.6*self.maxEdgeLen - for i in range(0,numSteps): - yCrd = yMin + i*stepLen - p1 = np.array([xMin,yCrd]) - v1 = np.array([1.0,0.0]) - Xns = list() - nearEdges = self.edgeGL.findInXYMargin(p1,-1,yMarg) - for edi in nearEdges: - n1 = self.edgeNodes[edi,0] - n2 = self.edgeNodes[edi,1] - p2 = self.nodes[n1] - v2 = self.nodes[n2] - p2 - Amat = np.array([[v1[0],-v2[0]],[v1[1],-v2[1]]]) - detA = np.linalg.det(Amat) - if(detA != 0.0): - bVec = p2 - p1 - soln = np.linalg.solve(Amat,bVec) - if(soln[1] > 0.0 and soln[1] < 1.0): - Xns.append([edi,soln[0]]) - iLen = len(Xns) - for i in range(0,iLen-1): - for j in range(0,iLen-1): - x1 = Xns[j] - x2 = Xns[j+1] - if(x2[1] < x1[1]): - Xns[j] = x2 - Xns[j+1] = x1 - for i in range(0,iLen,2): - edi = Xns[i][0] - uN = self.edgeUnitNorms[edi] - if(uN[0] < 0.0): - self.edgeUnitNorms[edi] = -uN - for i in range(1,iLen,2): - edi = Xns[i][0] - uN = self.edgeUnitNorms[edi] - if(uN[0] > 0.0): - self.edgeUnitNorms[edi] = -uN - - def unstructuredPrep(self,elType): - nbNds = self.numBndNodes - n_pi = nbNds/np.pi - ndSize = int(4*n_pi*n_pi) - tmp = self.nodes.copy() - self.nodes = np.zeros((ndSize,2)) - self.nodes[0:nbNds,0:2] = tmp - self.ndSize = ndSize - self.numNodes = nbNds - - edSize = int(3*nbNds*n_pi) - nbEds = self.numBndEdges - if(nbEds == 0): - n1 = np.array(range(nbNds)) - n2 = np.array(range(1,nbNds+1)) - n2[nbNds-1] = 0 - self.edgeNodes = -np.ones((edSize,2),dtype=int) - self.edgeNodes[0:nbNds,0] = n1 - self.edgeNodes[0:nbNds,1] = n2 - nbEds = nbNds - self.numBndEdges = nbEds - else: - tmp = self.edgeNodes.copy() - self.edgeNodes = -np.ones((edSize,2),dtype=int) - self.edgeNodes[0:nbEds] = tmp - self.edgeElements = -np.ones((edSize,2),dtype=int) - self.edgeUnitNorms = np.zeros((edSize,2)) - self.edSize = edSize - self.numEdges = nbEds - - triElSize = int(2*nbNds*n_pi) - self.triElements = -np.ones((triElSize,3),dtype=int) - self.triElSize = triElSize - self.numTriEls = 0 - - quadElSize = int(nbNds*n_pi) - self.quadElements = -np.ones((quadElSize,4),dtype=int) - self.quadElSize = quadElSize - self.numQuadEls = 0 - - if(elType == 'quad'): - self.skewNodes() - minLen = 1.0e+100 - maxLen = 0.0 - avgLen = 0.0 - for edi in range(0,self.numEdges): - n1 = self.edgeNodes[edi,0] - n2 = self.edgeNodes[edi,1] - vec = self.nodes[n1] - self.nodes[n2] - ln = np.linalg.norm(vec) - avgLen = avgLen + ln - if(ln < minLen): - minLen = ln - if(ln > maxLen): - maxLen = ln - unitVec = (1.0/ln)*vec - self.edgeUnitNorms[edi] = np.array([-unitVec[1],unitVec[0]]) - avgLen = avgLen/self.numEdges - self.minEdgeLen = minLen - self.maxEdgeLen = maxLen - self.avgProjLen = 0.5*np.sqrt(3)*avgLen - xMin = np.amin(self.nodes[0:self.numNodes,0]) - xMax = np.amax(self.nodes[0:self.numNodes,0]) - yMin = np.amin(self.nodes[0:self.numNodes,1]) - yMax = np.amax(self.nodes[0:self.numNodes,1]) - xLen = xMax - xMin - yLen = yMax - yMin - meshLen = np.sqrt(xLen*xLen + yLen*yLen) - marg = 0.01*meshLen - xMax = xMax + marg - xMin = xMin - marg - yMax = yMax + marg - yMin = yMin - marg - xLen = xMax - xMin - yLen = yMax - yMin - meshLen = np.sqrt(xLen*xLen + yLen*yLen) - aL2 = avgLen*avgLen - xS3 = xLen*aL2 - xGS = np.power(xS3,0.333333) - yS3 = yLen*aL2 - yGS = np.power(yS3,0.333333) - - self.nodeGL = SpatialGridList2D(xMin,xMax,yMin,yMax,xGS,yGS) - for ndi in range(0,self.numNodes): - self.nodeGL.addEntry(ndi,self.nodes[ndi,:]) - - self.edgeGL = SpatialGridList2D(xMin,xMax,yMin,yMax,xGS,yGS) - for edi in range(0,self.numEdges): - n1 = self.edgeNodes[edi,0] - n2 = self.edgeNodes[edi,1] - midPt = 0.5*(self.nodes[n1] + self.nodes[n2]) - self.edgeGL.addEntry(edi,midPt) - - self.triElGL = SpatialGridList2D(xMin,xMax,yMin,yMax,xGS,yGS) - - self.getBoundaryEdgeNormals() - - def edgesIntersect(self,e1Nds,e2Nds): - e1n1 = e1Nds[0] - e1n2 = e1Nds[1] - e2n1 = e2Nds[0] - e2n2 = e2Nds[1] - p1 = self.nodes[e1n1] - v1 = self.nodes[e1n2] - p1 - p2 = self.nodes[e2n1] - v2 = self.nodes[e2n2] - p2 - Amat = np.array([[v1[0],-v2[0]],[v1[1],-v2[1]]]) - detA = np.linalg.det(Amat) - if(detA != 0.0): - bVec = p2 - p1 - soln = np.linalg.solve(Amat,bVec) - if(soln[0] > 1.0e-6 and soln[0] < 0.999999 and soln[1] > 1.0e-6 and soln[1] < 0.999999): - return True - else: - return False - else: - return False - - def ptInEl(self,pt,el): - p1 = self.nodes[el[0]] - v1 = self.nodes[el[1]] - p1 - v2 = self.nodes[el[2]] - p1 - Amat = np.array([[v1[0],v2[0]],[v1[1],v2[1]]]) - detA = np.linalg.det(Amat) - if(detA != 0.0): - bVec = pt - p1 - soln = np.linalg.solve(Amat,bVec) - if(soln[0] > 1e-6 and soln[1] > 1e-6): - solSum = soln[0] + soln[1] - if(solSum < 0.999999): - return True - else: - return False - else: - return False - else: - return False - - def violations(self,newEl): - n1 = newEl[0] - n2 = newEl[1] - n3 = newEl[2] - ed1 = np.array([n1,n2]) - ed2 = np.array([n2,n3]) - ed3 = np.array([n3,n1]) - cent = 0.333333*(self.nodes[n1] + self.nodes[n2] + self.nodes[n3]) - srchRad = 1.4*self.maxEdgeLen - - nearNds = self.nodeGL.findInRadius(cent,srchRad) - for ndi in nearNds: - inEl = self.ptInEl(self.nodes[ndi],newEl) - if(inEl): - return True - - nearEdges = self.edgeGL.findInRadius(cent,srchRad) - for edi in nearEdges: - intSect = self.edgesIntersect(self.edgeNodes[edi],ed1) - if(intSect): - return True - intSect = self.edgesIntersect(self.edgeNodes[edi],ed2) - if(intSect): - return True - intSect = self.edgesIntersect(self.edgeNodes[edi],ed3) - if(intSect): - return True - - srtNew = np.sort(newEl) - nearEls = self.triElGL.findInRadius(cent,srchRad) - for eli in nearEls: - srtEi = np.sort(self.triElements[eli]) - if(all(srtNew == srtEi)): - return True - inEl = self.ptInEl(self.nodes[n1],self.triElements[eli]) - if(inEl): - return True - inEl = self.ptInEl(self.nodes[n2],self.triElements[eli]) - if(inEl): - return True - inEl = self.ptInEl(self.nodes[n3],self.triElements[eli]) - if(inEl): - return True - - return False - - def createEdge(self,nds,el): - midpt = 0.5*(self.nodes[nds[0]] + self.nodes[nds[1]]) - nearEdges = self.edgeGL.findInRadius(midpt,self.minEdgeLen) - for nEi in nearEdges: - iNds = self.edgeNodes[nEi] - if(nds[0] == iNds[0] and nds[1] == iNds[1]): - self.edgeElements[nEi,1] = el - return - if(nds[0] == iNds[1] and nds[1] == iNds[0]): - self.edgeElements[nEi,1] = el - return - k = self.numEdges - self.edgeNodes[k] = nds - self.edgeElements[k,0] = el - edVec = self.nodes[nds[1]] - self.nodes[nds[0]] - mag = np.linalg.norm(edVec) - ## --------------- - if(mag < 1.0e-12): - outStr = 'nodes ' + str(nds) - print(outStr) - outStr = 'coords ' + str(self.nodes[nds[0]]) + ' ' + str(self.nodes[nds[1]]) - print(outStr) - unitEV = (1.0/mag)*edVec - unitNorm = np.array([-unitEV[1],unitEV[0]]) - for eNd in self.triElements[el]: - if(eNd != nds[0] and eNd != nds[1]): - vec = midpt - self.nodes[eNd] - dp = np.dot(vec,unitNorm) - if(dp > 0.0): - self.edgeUnitNorms[k] = unitNorm - else: - self.edgeUnitNorms[k] = -unitNorm - self.edgeGL.addEntry(k,midpt) - self.numEdges = k + 1 - - def adoptConnectedNode(self,edgeIndex,point,srchRad): - eNds = self.edgeNodes[edgeIndex] - midPt = 0.5*(self.nodes[eNds[0]] + self.nodes[eNds[1]]) - nearEdges = self.edgeGL.findInRadius(point,srchRad) - for nEi in nearEdges: - if(nEi != edgeIndex): - eiNds = self.edgeNodes[nEi] - commonNd = -1 - for i in range(0,2): - for j in range(0,2): - if(eNds[i] == eiNds[j]): - commonNd = j - if(commonNd != -1): - if(commonNd == 0): - commonNdi = eiNds[0] - unComNdi = eiNds[1] - else: - commonNdi = eiNds[1] - unComNdi = eiNds[0] - unComNdPt = self.nodes[unComNdi] - vec = point - unComNdPt - dist = np.linalg.norm(vec) - if(dist < srchRad): - vec = unComNdPt - midPt - dp = np.dot(vec,self.edgeUnitNorms[edgeIndex]) - if(dp > 0.0): - newEl = np.array([eNds[0],eNds[1],unComNdi]) - viol = self.violations(newEl) - if(not viol): - k = self.numTriEls - self.triElements[k] = newEl - cent = 0.33333333*(self.nodes[newEl[0]] + self.nodes[newEl[1]] + self.nodes[newEl[2]]) - self.triElGL.addEntry(k,cent) - self.edgeElements[edgeIndex,1] = k - self.edgeElements[nEi,1] = k - if(eNds[0] == commonNdi): - newNds = np.array([eNds[1],unComNdi]) - else: - newNds = np.array([eNds[0],unComNdi]) - self.createEdge(newNds,k) - self.numTriEls = k + 1 - return True - return False - - def adoptAnyNode(self,edgeIndex,point,srchRad): - eNds = self.edgeNodes[edgeIndex] - midPt = 0.5*(self.nodes[eNds[0]] + self.nodes[eNds[1]]) - nearNds = self.nodeGL.findInRadius(point,srchRad) - for ndi in nearNds: - if(ndi not in eNds): - vec = self.nodes[ndi] - point - dist = np.linalg.norm(vec) - if(dist < srchRad): - vec = self.nodes[ndi] - midPt - dp = np.dot(vec,self.edgeUnitNorms[edgeIndex]) - if(dp > 0.0): - newEl = np.array([eNds[0],eNds[1],ndi]) - viol = self.violations(newEl) - if(not viol): - k = self.numTriEls - self.triElements[k] = newEl - cent = 0.33333333*(self.nodes[newEl[0]] + self.nodes[newEl[1]] + self.nodes[newEl[2]]) - self.triElGL.addEntry(k,cent) - self.edgeElements[edgeIndex,1] = k - newNds = np.array([eNds[0],ndi]) - self.createEdge(newNds,k) - newNds = np.array([eNds[1],ndi]) - self.createEdge(newNds,k) - self.numTriEls = k + 1 - return True - return False - - def createNode(self,edgeIndex,point): - eNds = self.edgeNodes[edgeIndex] - n = self.numNodes - self.nodes[n] = point - newEl = np.array([eNds[0],eNds[1],n]) - viol = self.violations(newEl) - if(not viol): - k = self.numTriEls - self.triElements[k] = newEl - cent = 0.33333333*(self.nodes[newEl[0]] + self.nodes[newEl[1]] + self.nodes[newEl[2]]) - self.triElGL.addEntry(k,cent) - self.edgeElements[edgeIndex,1] = k - newNds = np.array([eNds[0],n]) - self.createEdge(newNds,k) - newNds = np.array([eNds[1],n]) - self.createEdge(newNds,k) - self.nodeGL.addEntry(n,point) - self.numTriEls = k + 1 - self.numNodes = n + 1 - return True - else: - return False - - def distributeNodes(self): - dim = 2*self.numNodes - Dmat = np.zeros(dim) - bDim = 2*self.numBndNodes - Dmat[0:bDim] = 100000.0 - Pmat = 10.0*np.ones(dim) + Dmat - Pinv = np.zeros(dim) - Pinv[0:bDim] = 9.999e-6 - Pinv[bDim:dim] = 0.1 - rhs = np.zeros(dim) - j = 0 - for bni in range(0,self.numBndNodes): - rhs[j] = 100000.0*self.nodes[bni,0] - rhs[j+1] = 100000.0*self.nodes[bni,1] - j = j + 2 - - nEls = self.numTriEls - elWt = np.zeros(nEls) - for eli in range(0,nEls): - ni = self.triElements[eli] - v1 = self.nodes[ni[1]] - self.nodes[ni[0]] - v2 = self.nodes[ni[2]] - self.nodes[ni[0]] - cp = v1[0] * v2[1] - v1[1] * v2[0] - elWt[eli] = np.abs(cp) - avgWt = np.mean(elWt) - elWt = (1.0/avgWt)*elWt - - elMat = np.zeros((6,6)) - elMat[0,0] = 2.0 - elMat[0,2] = -1.0 - elMat[0,4] = -1.0 - for i in range(1,6): - elMat[i,i:6] = elMat[0,0:6-i] - for i in range(0,5): - elMat[i+1:6,i] = elMat[i,i+1:6] - - xVec = np.zeros(dim) - gVec = -rhs - wVec = np.multiply(Pinv,gVec) - hVec = -wVec - zVec = np.zeros(dim) - res = np.dot(gVec,wVec) - i = 0 - while(res > 1e-12 and i < dim): - zVec[:] = 0.0 - for eli in range(0,nEls): - inT2 = 2*self.triElements[eli] - vecInd = [inT2[0],inT2[0]+1,inT2[1],inT2[1]+1,inT2[2],inT2[2]+1] - elH = hVec[vecInd] - elMati = elWt[eli]*elMat - elZ = np.matmul(elMati,elH) - zVec[vecInd] = zVec[vecInd] + elZ - zVec = zVec + np.multiply(Dmat,hVec) - alpha = res/np.dot(hVec,zVec) - xVec = xVec + alpha*hVec - gVec = gVec + alpha*zVec - wVec = np.multiply(Pinv,gVec) - rNext = np.dot(gVec,wVec) - beta = rNext/res - res = rNext - hVec = -wVec - i = i + 1 - - for i in range(0,self.numNodes): - j = i*2 - self.nodes[i] = xVec[j:j+2] - - def mergePairsAbove(self,edgeFactor,elElim,elLongEdge): - nQuad = self.numQuadEls - for edi in range(0,self.numEdges): - n1 = self.edgeNodes[edi,0] - n2 = self.edgeNodes[edi,1] - edLen = np.linalg.norm(self.nodes[n1]-self.nodes[n2]) - el1 = self.edgeElements[edi,0] - el2 = self.edgeElements[edi,1] - if(el1 != -1 and el2 != -1): - if(elElim[el1] == 0 and elElim[el2] == 0): - if(edLen > edgeFactor*elLongEdge[el1] and edLen > edgeFactor*elLongEdge[el2]): - elElim[el1] = 1 - elElim[el2] = 1 - quadNds = np.array([n1,-1,n2,-1]) - e1Nds = self.triElements[el1] - e2Nds = self.triElements[el2] - for i in range(0,3): - if(e1Nds[i] != n1 and e1Nds[i] != n2): - quadNds[1] = e1Nds[i] - if(e2Nds[i] != n1 and e2Nds[i] != n2): - quadNds[3] = e2Nds[i] - self.quadElements[nQuad] = quadNds - nQuad = nQuad + 1 - self.numQuadEls = nQuad - return elElim - - def mergeTriEls(self,elType): - ## Initialize local fields - nNds = self.numNodes - nbNds = self.numBndNodes - nEls = self.numTriEls - nQuad = self.numQuadEls - elElim = np.zeros(self.triElSize,dtype=int) - ndElems = -np.ones((nNds,6),dtype=int) - ndElems[:,0] = 0 - ndElim = np.zeros(nNds,dtype=int) - - ## Form the node-to-element connectivity list - for eli in range(0,nEls): - for nd in self.triElements[eli]: - j = ndElems[nd,0] - if(j < 5): - j = j + 1 - ndElems[nd,j] = eli - ndElems[nd,0] = j - - ## Merge neste clusters - for ndi in range(nbNds,nNds): - if(ndElems[ndi,0] == 3): - abrt = False - for el in ndElems[ndi,1:4]: - if(elElim[el] == 1): - abrt = True - if(not abrt): - newElNds = list() - for el in ndElems[ndi,1:4]: - newElNds.extend(self.triElements[el]) - srtedNds = np.sort(newElNds) - finalNds = list() - for i in range(0,8): - j = srtedNds[i] - if(j != ndi and srtedNds[i+1] == j): - finalNds.append(j) - if(len(finalNds) == 3): - self.triElements[nEls] = np.array(finalNds,dtype=int) - nEls = nEls + 1 - ndElim[ndi] = 1 - for el in ndElems[ndi,1:4]: - elElim[el] = 1 - elif(ndElems[ndi,0] == 4): - abrt = False - for el in ndElems[ndi,1:5]: - if(elElim[el] == 1): - abrt = True - if(not abrt): - newElNds = list() - for el in ndElems[ndi,1:4]: - newElNds.extend(self.triElements[el]) - srtedNds = np.sort(newElNds) - nds12 = list() - for i in range(0,8): - j = srtedNds[i] - if(j != ndi and j == srtedNds[i+1]): - nds12.append(j) - nds34 = list() - for i in range(0,8): - j = srtedNds[i] - if(j != ndi and j not in nds12): - nds34.append(j) - if(len(nds12) == 2 and len(nds34) == 2): - n1 = nds12[0] - n2 = nds12[1] - n3 = nds34[0] - n4 = nds34[1] - v1 = self.nodes[n2] - self.nodes[n1] - v2 = self.nodes[n3] - self.nodes[n1] - v3 = self.nodes[n4] - self.nodes[n1] - mat = np.array([v1,v2]) - det1 = np.linalg.det(mat) - mat = np.array([v2,v3]) - det2 = np.linalg.det(mat) - if(det1*det2 > 0.0): - if(elType == 'quad'): - self.quadElements[nQuad] = np.array([n1,n2,n3,n4]) - nQuad = nQuad + 1 - else: - self.triElements[nEls] = np.array([n1,n2,n3]) - nEls = nEls + 1 - self.triElements[nEls] = np.array([n1,n3,n4]) - nEls = nEls + 1 - else: - if(elType == 'quad'): - self.quadElements[nQuad] = np.array([n1,n2,n4,n3]) - nQuad = nQuad + 1 - else: - self.triElements[nEls] = np.array([n1,n2,n4]) - nEls = nEls + 1 - self.triElements[nEls] = np.array([n1,n4,n3]) - nEls = nEls + 1 - ndElim[ndi] = 1 - for el in ndElems[ndi,1:5]: - elElim[el] = 1 - self.numTriEls = nEls - self.numQuadEls = nQuad - - if(elType == 'quad'): - elLongEdge = np.zeros(nEls) - - for eli in range(0,nEls): - nds = self.triElements[eli] - longEd = np.linalg.norm(self.nodes[nds[0]] - self.nodes[nds[1]]) - eLen = np.linalg.norm(self.nodes[nds[1]] - self.nodes[nds[2]]) - if(eLen > longEd): - longEd = eLen - eLen = np.linalg.norm(self.nodes[nds[2]] - self.nodes[nds[0]]) - if(eLen > longEd): - longEd = eLen - elLongEdge[eli] = longEd - - ## Merge quad-forming pairs of triangles - - elElim = self.mergePairsAbove(0.99,elElim,elLongEdge) - elElim = self.mergePairsAbove(0.85,elElim,elLongEdge) - elElim = self.mergePairsAbove(0.75,elElim,elLongEdge) - nQuad = self.numQuadEls - - finalNodes = list() - ndNewInd = -np.ones(nNds,dtype=int) - ndi = 0 - for ni in range(0,nNds): - if(ndElim[ni] == 0): - finalNodes.append(self.nodes[ni]) - ndNewInd[ni] = ndi - ndi = ndi + 1 - self.nodes = np.array(finalNodes) - self.numNodes = len(self.nodes) - self.ndSize = self.numNodes - - for eli in range(0,nEls): - if(elElim[eli] == 0): - for j in range(0,3): - nd = self.triElements[eli,j] - self.triElements[eli,j] = ndNewInd[nd] - - for eli in range(0,nQuad): - for j in range(0,4): - nd = self.quadElements[eli,j] - self.quadElements[eli,j] = ndNewInd[nd] - - newTEind = list() - for i in range(0,nEls): - if(elElim[i] == 0): - newTEind.append(i) - - self.triElements = self.triElements[newTEind] - self.numTriEls = len(newTEind) - self.triElSize = self.numTriEls - - def unstructuredPost(self,elType): - #self.plot2DMesh() - self.distributeNodes() - #self.plot2DMesh() - if(elType == 'quad'): - self.unskewNodes() - #self.plot2DMesh() - self.mergeTriEls(elType) - #self.plot2DMesh() - - ## !! rename any calls to creating unstructured mesh as necessary, createPlanarMesh - def createUnstructuredMesh(self,elType): - self.unstructuredPrep(elType) - - elsCreated = True - while(elsCreated): - # if(self.numTriEls > 0): - # self.plot2DMesh() - # cnt = input('continue?\n') - # if(cnt != 'y'): - # break - elsCreated = False - nEd = self.numEdges - for edi in range(0,nEd): - if(self.edgeElements[edi,1] == -1): - n1 = self.edgeNodes[edi,0] - n2 = self.edgeNodes[edi,1] - uNorm = self.edgeUnitNorms[edi] - midPt = 0.5*(self.nodes[n1] + self.nodes[n2]) - vec = self.nodes[n1] - self.nodes[n2] - edLen = np.linalg.norm(vec) - projLen = 0.2165*edLen + 0.75*self.avgProjLen ## 0.25(sqrt(3)/2)edLen + 0.75avgProjLen - srchPt = midPt + 0.5*projLen*uNorm - srchRad = 0.5*np.sqrt(edLen*edLen + projLen*projLen) - found = self.adoptConnectedNode(edi,srchPt,srchRad) - if(not found): - srchPt = midPt + projLen*uNorm - found = self.adoptConnectedNode(edi,srchPt,srchRad) - if(not found): - srchPt = midPt + 0.5*projLen*uNorm - found = self.adoptAnyNode(edi,srchPt,srchRad) - if(not found): - srchPt = midPt + projLen*uNorm - found = self.adoptAnyNode(edi,srchPt,srchRad) - if(not found): - srchPt = midPt + projLen*uNorm - self.createNode(edi,srchPt) - if(not found): - srchPt = midPt + 0.5*projLen*uNorm - self.createNode(edi,srchPt) - if(found): - elsCreated = True - for edi in range(0,self.numEdges): - uNMag = np.linalg.norm(self.edgeUnitNorms[edi]) - if(uNMag < 1.0e-6): - n1 = self.edgeNodes[edi,0] - n2 = self.edgeNodes[edi,1] - midPt = 0.5*(self.nodes[n1] + self.nodes[n2]) - vec = self.nodes[n2] - self.nodes[n1] - mag = np.linalg.norm(vec) - unitVec = (1.0/mag)*vec - uNorm = np.array([-unitVec[1],unitVec[0]]) - eL1 = self.edgeElements[edi,0] - for i in range(0,3): - ni = self.triElements[eL1,i] - if(ni != n1 and ni != n2): - vec = midPt - self.nodes[ni] - dp = np.dot(vec,uNorm) - if(dp > 0.0): - self.edgeUnitNorms[edi] = uNorm - else: - self.edgeUnitNorms[edi] = -uNorm - - self.unstructuredPost(elType) - - meshOut = dict() - meshOut['nodes'] = self.nodes - totalEls = self.numTriEls + self.numQuadEls - allEls = -np.ones((totalEls,4),dtype=int) - allEls[0:self.numTriEls,0:3] = self.triElements[0:self.numTriEls] - allEls[self.numTriEls:totalEls,:] = self.quadElements[0:self.numQuadEls] - meshOut['elements'] = allEls - - return meshOut - - def plot2DMesh(self): - xLst = self.nodes[0:self.numNodes,0] - yLst = self.nodes[0:self.numNodes,1] - zLst = np.zeros(self.numNodes) - value = list() - v1 = list() - v2 = list() - v3 = list() - for i in range(0,self.numTriEls): - v1.append(self.triElements[i,0]) - v2.append(self.triElements[i,1]) - v3.append(self.triElements[i,2]) - value.append(np.sin(i)) - for i in range(0,self.numQuadEls): - v1.append(self.quadElements[i,0]) - v2.append(self.quadElements[i,1]) - v3.append(self.quadElements[i,2]) - value.append(np.sin(i)) - v1.append(self.quadElements[i,0]) - v2.append(self.quadElements[i,2]) - v3.append(self.quadElements[i,3]) - value.append(np.sin(i)) - fig = go.Figure(data=[ - go.Mesh3d( - x=xLst, - y=yLst, - z=zLst, - colorbar_title = '', - colorscale=[[0.0, 'blue'], - [0.5, 'yellow'], - [1.0, 'red']], - intensity=value, - intensitymode='cell', - i=v1, - j=v2, - k=v3, - name='', - showscale=True - ) - ]) - - fig.show() \ No newline at end of file diff --git a/src/pynumad/shell/Mesh3DClass.py b/src/pynumad/shell/Mesh3DClass.py deleted file mode 100644 index 04b02d8..0000000 --- a/src/pynumad/shell/Mesh3DClass.py +++ /dev/null @@ -1,283 +0,0 @@ -import numpy as np -from scipy import interpolate -from pynumad.shell.SpatialGridList3DClass import * - -class Mesh3D(): - - def __init__(self,boundaryNodes,boundaryFaces=[]): - self.nodeGL = None - self.faceGL = None - self.tetElGL = None - - self.minFaceArea = 0.0 - self.maxFaceArea = 1.0 - self.avgProjLen = 1.0 - - self.numBndNodes = len(boundaryNodes) - self.numNodes = self.numBndNodes - self.ndSize = self.numBndNodes - self.nodes = np.array(boundaryNodes) - - self.numBndFaces = len(boundaryFaces) - self.faceNodes = np.array(boundaryFaces) - self.numFaces = self.numBndFaces - self.faceSize = self.numBndFaces - self.faceElements = np.array([]) - self.faceUnitNorms = np.array([]) - - self.numTetEls = 0 - self.tetElSize = 0 - self.tetElements = np.array([]) - - self.numWedgeEls = 0 - self.wedgeElSize = 0 - self.wedgeElements = np.array([]) - - self.numHexEls = 0 - self.hexElSize = 0 - self.hexElements = np.array([]) - - def createSweptMesh(self, sweepMethod, sweepElements, sweepDistance=1.0, point=[], axis=[], followNormal=False, destNodes=[], interpMethod='linear'): - ## sweepMethod = inDirection, toPoint, fromPoint, toDestNodes, revolve - """Object data modified: self.quadElements, self.nodes, self.quadElements - Parameters - ---------- - - Returns - ------- - nodes - elements - """ - nbNds = self.numBndNodes - try: - totSweepEls = sum(sweepElements) - ndSize = nbNds*(totSweepEls+1) - stages = len(sweepElements) - multiStage = True - except: - totSweepEls = sweepElements - ndSize = nbNds*(sweepElements+1) - stages = 1 - multiStage = False - tmp = self.nodes.copy() - self.nodes = np.zeros((ndSize,3)) - self.nodes[0:nbNds] = tmp - self.ndSize = ndSize - self.numNodes = nbNds - - nbFcs = self.numBndFaces - elSize = nbFcs*totSweepEls - - self.wedgeElements = -np.ones((elSize,6),dtype=int) - self.wedgeElSize = elSize - self.numWedgeEls = 0 - wERank = np.zeros(elSize,dtype=int) - - self.hexElements = -np.ones((elSize,8),dtype=int) - self.hexElSize = elSize - self.numHexEls = 0 - hERank = np.zeros(elSize,dtype=int) - - methString = 'inDirection toPoint fromPoint' - if(sweepMethod in methString): - ndDir = list() - if(sweepMethod == 'inDirection'): - mag = np.linalg.norm(axis) - unitAxis = (1.0/mag)*np.array(axis) - for i in range(0,self.numNodes): - ndDir.append(unitAxis) - else: - pAr = np.array(point) - for i in range(0,self.numNodes): - if(sweepMethod == 'toPoint'): - vec = pAr - nd - else: - vec = nd - pAr - mag = np.linalg.norm(vec) - unitVec = (1.0/mag)*vec - ndDir.append(unitVec) - rowNds = self.numNodes - rowEls = self.numFaces - stepLen = sweepDistance/sweepElements - nNds = self.numNodes - wE = self.numWedgeEls - hE = self.numHexEls - eli = 0 - for i in range(0,sweepElements): - for j in range(0,rowNds): - newNd = self.nodes[j] + (i+1)*stepLen*ndDir[j] - self.nodes[nNds] = newNd - nNds = nNds + 1 - for j in range(0,rowEls): - n1 = self.faceNodes[j,0] + i*rowNds - n2 = self.faceNodes[j,1] + i*rowNds - n3 = self.faceNodes[j,2] + i*rowNds - if(self.faceNodes[j,3] == -1): - n4 = n1 + rowNds - n5 = n2 + rowNds - n6 = n3 + rowNds - self.wedgeElements[wE] = np.array([n1,n2,n3,n4,n5,n6]) - wERank[wE] = eli - wE = wE + 1 - else: - n4 = self.faceNodes[j,3] + i*rowNds - n5 = n1 + rowNds - n6 = n2 + rowNds - n7 = n3 + rowNds - n8 = n4 + rowNds - self.hexElements[hE] = np.array([n1,n2,n3,n4,n5,n6,n7,n8]) - hERank[hE] = eli - hE = hE + 1 - eli = eli + 1 - self.numNodes = nNds - self.numWedgeEls = wE - self.numHexEls = hE - elif(sweepMethod == 'toDestNodes'): - nNds = self.numNodes - nbNds = self.numBndNodes - wE = self.numWedgeEls - hE = self.numHexEls - eli = 0 - if(not multiStage): - sweepElements = [sweepElements] - destNodes = [destNodes] - if(interpMethod == 'linear'): - prevDest = self.nodes.copy() - cumElLay = 0 - for stg in range(0,stages): - dNds = np.array(destNodes[stg]) - ndDir = np.zeros((nbNds,3)) - for ndi in range(0,nbNds): - ndDir[ndi] = (1.0/sweepElements[stg])*(dNds[ndi] - prevDest[ndi]) - for i in range(0,sweepElements[stg]): - for ndi in range(0,nbNds): - self.nodes[nNds] = self.nodes[nNds-nbNds] + ndDir[ndi] - nNds = nNds + 1 - for fci in range(0,self.numBndFaces): - n1 = self.faceNodes[fci,0] + cumElLay*nbNds - n2 = self.faceNodes[fci,1] + cumElLay*nbNds - n3 = self.faceNodes[fci,2] + cumElLay*nbNds - if(self.faceNodes[fci,3] == -1): - n4 = n1 + nbNds - n5 = n2 + nbNds - n6 = n3 + nbNds - self.wedgeElements[wE] = np.array([n1,n2,n3,n4,n5,n6]) - wERank[wE] = eli - wE = wE + 1 - else: - n4 = self.faceNodes[fci,3] + cumElLay*nbNds - n5 = n1 + nbNds - n6 = n2 + nbNds - n7 = n3 + nbNds - n8 = n4 + nbNds - self.hexElements[hE] = np.array([n1,n2,n3,n4,n5,n6,n7,n8]) - hERank[hE] = eli - hE = hE + 1 - eli = eli + 1 - cumElLay = cumElLay + 1 - prevDest = dNds.copy(); - else: ## Smooth interpolation - xMat = np.zeros((nbNds,totSweepEls+1)) - yMat = np.zeros((nbNds,totSweepEls+1)) - zMat = np.zeros((nbNds,totSweepEls+1)) - pDest = (1.0/stages)*np.array(range(0,stages+1)) - pAll = (1.0/totSweepEls)*np.array(range(0,totSweepEls+1)) - for ndi in range(0,nbNds): - xDest = [self.nodes[ndi,0]] - yDest = [self.nodes[ndi,1]] - zDest = [self.nodes[ndi,2]] - for dNds in destNodes: - xDest.append(dNds[ndi][0]) - yDest.append(dNds[ndi][1]) - zDest.append(dNds[ndi][2]) - xDest = np.array(xDest) - iFun = interpolate.interp1d(pDest,xDest,'cubic', axis=0,bounds_error=False,fill_value='extrapolate') - xAll = iFun(pAll) - xMat[ndi,:] = xAll - yDest = np.array(yDest) - iFun = interpolate.interp1d(pDest,yDest,'cubic', axis=0,bounds_error=False,fill_value='extrapolate') - yAll = iFun(pAll) - yMat[ndi,:] = yAll - zDest = np.array(zDest) - iFun = interpolate.interp1d(pDest,zDest,'cubic', axis=0,bounds_error=False,fill_value='extrapolate') - zAll = iFun(pAll) - zMat[ndi,:] = zAll - eli = 0 - for i in range(0,totSweepEls): - for ndi in range(0,nbNds): - self.nodes[nNds] = np.array([xMat[ndi,i+1],yMat[ndi,i+1],zMat[ndi,i+1]]) - nNds = nNds + 1 - for fci in range(0,self.numBndFaces): - n1 = self.faceNodes[fci,0] + i*nbNds - n2 = self.faceNodes[fci,1] + i*nbNds - n3 = self.faceNodes[fci,2] + i*nbNds - if(self.faceNodes[fci,3] == -1): - n4 = n1 + nbNds - n5 = n2 + nbNds - n6 = n3 + nbNds - self.wedgeElements[wE] = np.array([n1,n2,n3,n4,n5,n6]) - wERank[wE] = eli - wE = wE + 1 - else: - n4 = self.faceNodes[fci,3] + i*nbNds - n5 = n1 + nbNds - n6 = n2 + nbNds - n7 = n3 + nbNds - n8 = n4 + nbNds - self.hexElements[hE] = np.array([n1,n2,n3,n4,n5,n6,n7,n8]) - hERank[hE] = eli - hE = hE + 1 - eli = eli + 1 - self.numNodes = nNds - self.numWedgeEls = wE - self.numHexEls = hE - - for eli in range(0,self.numWedgeEls): - n1 = self.wedgeElements[eli,0] - n2 = self.wedgeElements[eli,1] - n3 = self.wedgeElements[eli,2] - n4 = self.wedgeElements[eli,3] - v1 = self.nodes[n2] - self.nodes[n1] - v2 = self.nodes[n3] - self.nodes[n1] - v3 = self.nodes[n4] - self.nodes[n1] - mat = np.array([v1,v2,v3]) - detM = np.linalg.det(mat) - if(detM < 0.0): - self.wedgeElements[eli,1] = n3 - self.wedgeElements[eli,2] = n2 - sw = self.wedgeElements[eli,4] - self.wedgeElements[eli,4] = self.wedgeElements[eli,5] - self.wedgeElements[eli,5] = sw - - for eli in range(0,self.numHexEls): - n1 = self.hexElements[eli,0] - n2 = self.hexElements[eli,1] - n4 = self.hexElements[eli,3] - n5 = self.hexElements[eli,4] - v1 = self.nodes[n2] - self.nodes[n1] - v2 = self.nodes[n4] - self.nodes[n1] - v3 = self.nodes[n5] - self.nodes[n1] - mat = np.array([v1,v2,v3]) - detM = np.linalg.det(mat) - if(detM < 0.0): - self.hexElements[eli,1] = n4 - self.hexElements[eli,3] = n2 - sw = self.hexElements[eli,5] - self.hexElements[eli,5] = self.hexElements[eli,7] - self.hexElements[eli,7] = sw - - - meshOut = dict() - meshOut['nodes'] = self.nodes - totEls = self.numWedgeEls + self.numHexEls - allEls = -np.ones((totEls,8),dtype=int) - for eli in range(0,self.numWedgeEls): - allEls[wERank[eli],0:6] = self.wedgeElements[eli] - for eli in range(0,self.numHexEls): - allEls[hERank[eli],0:8] = self.hexElements[eli] - # if(self.numWedgeEls > 0): - # allEls[0:self.numWedgeEls,0:6] = self.wedgeElements[0:self.numWedgeEls] - # if(self.numHexEls > 0): - # allEls[self.numWedgeEls:totEls,0:8] = self.hexElements[0:self.numHexEls] - meshOut['elements'] = allEls - return meshOut \ No newline at end of file diff --git a/src/pynumad/shell/Segment2DClass.py b/src/pynumad/shell/Segment2DClass.py deleted file mode 100644 index 5e0786a..0000000 --- a/src/pynumad/shell/Segment2DClass.py +++ /dev/null @@ -1,100 +0,0 @@ -import numpy as np -from scipy import interpolate - -class Segment2D(): - - def __init__(self,segType,keyPts,numEls): - self.segType = segType ## line, curve, arc - self.keyPts = keyPts - self.numEls = numEls - - def getNodesEdges(self): - nNds = self.numEls+1 - if(self.segType == 'line'): - pt1 = np.array(self.keyPts[0]) - pt2 = np.array(self.keyPts[1]) - proj = pt2 - pt1 - steps = (1.0/self.numEls)*np.array(range(0,nNds)) - nds = list() - for st in steps: - nd = pt1 + st*proj - nds.append(nd) - nodes = np.array(nds) - eN1 = np.array(range(0,self.numEls),dtype=int) - eN2 = np.array(range(1,nNds),dtype=int) - edges = np.transpose(np.array([eN1,eN2])) - output = dict() - output['nodes'] = nodes - output['edges'] = edges - return output - elif(self.segType == 'curve'): - kPTp = np.transpose(np.array(self.keyPts)) - numKp = len(self.keyPts) - pKp = (1.0/(numKp-1))*np.array(range(0,numKp)) - pNds = (1.0/self.numEls)*np.array(range(0,nNds)) - if(numKp == 2): - order = 'linear' - elif(numKp == 3): - order = 'quadratic' - else: - order = 'cubic' - iFun = interpolate.interp1d(pKp,kPTp[0],order, axis=0,bounds_error=False,fill_value='extrapolate') - xNds = iFun(pNds) - iFun = interpolate.interp1d(pKp,kPTp[1],order, axis=0,bounds_error=False,fill_value='extrapolate') - yNds = iFun(pNds) - eN1 = np.array(range(0,self.numEls),dtype=int) - eN2 = np.array(range(1,nNds),dtype=int) - output = dict() - output['nodes'] = np.transpose(np.array([xNds,yNds])) - output['edges'] = np.transpose(np.array([eN1,eN2])) - return output - elif(self.segType == 'arc'): - kPar = np.array(self.keyPts) - dist13 = np.linalg.norm(kPar[0] - kPar[2]) - if(dist13 < 1.0e-12): - center = 0.5*(kPar[0] + kPar[1]) - rad = np.linalg.norm(kPar[0] - center) - else: - center = 0.3333*(kPar[0] + kPar[1] + kPar[2]) - ndc = 1.0 - i = 0 - Rvec = np.zeros(2) - dRdC = np.zeros((2,2)) - while(ndc > 1.0e-12 and i < 50): - v1 = kPar[0] - center - v2 = kPar[1] - center - v3 = kPar[2] - center - Rvec[0] = np.dot(v1,v1) - np.dot(v2,v2) - Rvec[1] = np.dot(v1,v1) - np.dot(v3,v3) - dRdC[0] = 2.0*(v2 - v1) - dRdC[1] = 2.0*(v3 - v1) - dc = np.linalg.solve(dRdC,-Rvec) - center = center + dc - ndc = np.linalg.norm(dc) - i = i + 1 - rad = np.linalg.norm(kPar[0] - center) - theta = np.zeros(3) - for i in range(0,3): - xRel = kPar[i,0] - center[0] - yRel = kPar[i,1] - center[1] - if(abs(xRel) < 1.0e-12): - xRel = 1.0e-12 - if(xRel > 0.0): - theta[i] = np.arctan(yRel/xRel) - else: - theta[i] = np.arctan(yRel/xRel) + np.pi - if(theta[1] > theta[0] and theta[1] < theta[2]): - thetaNds = np.linspace(theta[0],theta[2],nNds) - else: - t0adj = theta[0] + 2.0*np.pi - thetaNds = np.linspace(t0adj,theta[2],nNds) - nodes = np.zeros((nNds,2)) - for thi in range(0,nNds): - nodes[thi,0] = rad*np.cos(thetaNds[thi]) + center[0] - nodes[thi,1] = rad*np.sin(thetaNds[thi]) + center[1] - eN1 = np.array(range(0,self.numEls),dtype=int) - eN2 = np.array(range(1,nNds),dtype=int) - output = dict() - output['nodes'] = nodes - output['edges'] = np.transpose(np.array([eN1,eN2])) - return output \ No newline at end of file diff --git a/src/pynumad/shell/ShellRegionClass.py b/src/pynumad/shell/ShellRegionClass.py deleted file mode 100644 index ac19806..0000000 --- a/src/pynumad/shell/ShellRegionClass.py +++ /dev/null @@ -1,348 +0,0 @@ -from scipy import interpolate -from pynumad.shell.Segment2DClass import * -from pynumad.shell.Boundary2DClass import * -from pynumad.shell.Mesh2DClass import * -import pynumad.shell.MeshTools as mt - -class ShellRegion: - """ - Attributes - ----------- - type : str - keyPts : list - edgeEls : list - """ - def __init__(self, regType, keyPoints, numEdgeEls, natSpaceCrd=[], elType='quad', meshMethod='free'): - - self.regType = regType - self.keyPts = np.array(keyPoints) - self.edgeEls = numEdgeEls - if(len(natSpaceCrd) == 0): - if(regType == 'quad1'): - self.natSpaceCrd = np.array([[-1.0,-1.0],[1.0,-1.0],[1.0,1.0],[-1.0,1.0]]) - elif(regType == 'quad2'): - self.natSpaceCrd = np.array([[-1.0,-1.0],[1.0,-1.0],[1.0,1.0],[-1.0,1.0], - [0.0,-1.0],[1.0,0.0],[0.0,1.0],[-1.0,0.0],[0.0,0.0]]) - elif(regType == 'quad3'): - r3 = 1.0/3.0 - self.natSpaceCrd = np.array([[-1.0,-1.0],[1.0,-1.0],[1.0,1.0],[-1.0,1.0], - [-r3,-1.0],[r3,-1.0],[1.0,-r3],[1.0,r3],[r3,1.0],[-r3,1.0],[-1.0,r3],[-1.0,-r3], - [-r3,-r3],[r3,-r3],[r3,r3],[-r3,r3]]) - else: - self.natSpaceCrd = np.array(natSpaceCrd) - self.elType = elType - self.meshMethod = meshMethod - - def createShellMesh(self): - """Object data modified: none - Parameters - ---------- - elType - method : str - - Returns - ------- - nodes - elements - """ - if(self.meshMethod == 'structured'): - if('quad' in self.regType): - ee = self.edgeEls - if(ee[0] >= ee[2]): - xNodes = ee[0] + 1 - else: - xNodes = ee[2] + 1 - if(ee[1] >= ee[3]): - yNodes = ee[1] + 1 - else: - yNodes = ee[3] + 1 - totNds = xNodes*yNodes - seg = Segment2D('line',[[-1.0,-1.0],[1.0,-1.0]],(xNodes-1)) - bnd = seg.getNodesEdges() - mesh = Mesh2D(bnd['nodes'],bnd['edges']) - mData = mesh.createSweptMesh('inDirection',(yNodes-1),sweepDistance=2.0,axis=[0.0,1.0]) - - moved = False - if(self.edgeEls[0] < self.edgeEls[2]): - seg = Segment2D('line',[[-1.0,-1.0],[1.0,-1.0]],self.edgeEls[0]) - bnd = seg.getNodesEdges() - segNds = bnd['nodes'] - meshNds = mData['nodes'] - for ndi in range(0,xNodes): - minDist = 2.0 - for sN in segNds: - vec = meshNds[ndi] - sN - dist = np.linalg.norm(vec) - if(dist < minDist): - minDist = dist - minPt = sN - meshNds[ndi] = minPt - mData['nodes'] = meshNds - moved = True - elif(self.edgeEls[2] < self.edgeEls[0]): - seg = Segment2D('line',[[-1.0,1.0],[1.0,1.0]],self.edgeEls[2]) - bnd = seg.getNodesEdges() - segNds = bnd['nodes'] - meshNds = mData['nodes'] - for ndi in range((totNds-xNodes),totNds): - minDist = 2.0 - for sN in segNds: - vec = meshNds[ndi] - sN - dist = np.linalg.norm(vec) - if(dist < minDist): - minDist = dist - minPt = sN - meshNds[ndi] = minPt - mData['nodes'] = meshNds - moved = True - if(self.edgeEls[1] < self.edgeEls[3]): - seg = Segment2D('line',[[1.0,-1.0],[1.0,1.0]],self.edgeEls[1]) - bnd = seg.getNodesEdges() - segNds = bnd['nodes'] - meshNds = mData['nodes'] - for ndi in range((xNodes-1),totNds,xNodes): - minDist = 2.0 - for sN in segNds: - vec = meshNds[ndi] - sN - dist = np.linalg.norm(vec) - if(dist < minDist): - minDist = dist - minPt = sN - meshNds[ndi] = minPt - mData['nodes'] = meshNds - moved = True - elif(self.edgeEls[3] < self.edgeEls[1]): - seg = Segment2D('line',[[-1.0,1.0],[-1.0,-1.0]],self.edgeEls[3]) - bnd = seg.getNodesEdges() - segNds = bnd['nodes'] - meshNds = mData['nodes'] - for ndi in range(0,totNds,xNodes): - minDist = 2.0 - for sN in segNds: - vec = meshNds[ndi] - sN - dist = np.linalg.norm(vec) - if(dist < minDist): - minDist = dist - minPt = sN - meshNds[ndi] = minPt - mData['nodes'] = meshNds - moved = True - - if(moved): - mData = mt.mergeDuplicateNodes(mData) - elLst = mData['elements'] - ndLst = mData['nodes'] - for eli in range(0,len(elLst)): - srted = np.sort(elLst[eli]) - for i in range(0,3): - if(srted[i+1] == srted[i]): - srted[i+1] = srted[3] - srted[3] = -1 - elLst[eli] = srted - if(elLst[eli,3] == -1): - n1 = elLst[eli,0] - n2 = elLst[eli,1] - n3 = elLst[eli,2] - v1 = ndLst[n2] - ndLst[n1] - v2 = ndLst[n3] - ndLst[n1] - k = v1[0]*v2[1] - v1[1]*v2[0] - if(k < 0.0): - elLst[eli,1] = n3 - elLst[eli,2] = n2 - else: - elLst = mData['elements'] - ndLst = mData['nodes'] - - XYZ = self.XYZCoord(ndLst) - - mData['nodes'] = XYZ - mData['elements'] = elLst - return mData - - else: - raise Exception('Only quadrilateral shell regions can use the structured meshing option') - - else: - bndData = self.initialBoundary() - mesh = Mesh2D(bndData['nodes'],bndData['elements']) - mData = mesh.createUnstructuredMesh(self.elType) - XYZ = self.XYZCoord(mData['nodes']) - mData['nodes'] = XYZ - return mData - - def initialBoundary(self): - """ Object data modified: none - Parameters - ---------- - - Returns - ------- - nodes - edges - """ - if 'quad' in self.regType: - bnd = Boundary2D() - bnd.addSegment('line',[[-1.0,-1.0],[1.0,-1.0]],self.edgeEls[0]) - bnd.addSegment('line',[[1.0,-1.0],[1.0,1.0]],self.edgeEls[1]) - bnd.addSegment('line',[[1.0,1.0],[-1.0,1.0]],self.edgeEls[2]) - bnd.addSegment('line',[[-1.0,1.0],[-1.0,-1.0]],self.edgeEls[3]) - bData = bnd.getBoundaryMesh() - return bData - elif 'tri' in self.regType: - bnd = Boundary2D() - bnd.addSegment('line',[[0.0,0.0],[1.0,0.0]],self.edgeEls[0]) - bnd.addSegment('line',[[1.0,0.0],[0.0,1.0]],self.edgeEls[1]) - bnd.addSegment('line',[[0.0,1.0],[0.0,0.0]],self.edgeEls[2]) - bData = bnd.getBoundaryMesh() - return bData - elif 'sphere' in self.regType: - pi_2 = 0.5*np.pi - bnd = Boundary2D() - bnd.addSegment('arc',[[pi_2,0.0],[-pi_2,0.0],[pi_2,0.0]],self.edgeEls[0]) - bData = bnd.getBoundaryMesh() - return bData - - - def XYZCoord(self, eta): - """ - Parameters - ---------- - eta - - Returns - ------- - XYZ - """ - # if('1' in self.regType): - # xCrd = interpolate.griddata(self.natSpaceCrd,self.keyPts[:,0],eta,method='linear') - # yCrd = interpolate.griddata(self.natSpaceCrd,self.keyPts[:,1],eta,method='linear') - # zCrd = interpolate.griddata(self.natSpaceCrd,self.keyPts[:,2],eta,method='linear') - # return np.transpose(np.array([xCrd,yCrd,zCrd])) - # elif('2' in self.regType or '3' in self.regType): - # xCrd = interpolate.griddata(self.natSpaceCrd,self.keyPts[:,0],eta,method='cubic') - # yCrd = interpolate.griddata(self.natSpaceCrd,self.keyPts[:,1],eta,method='cubic') - # zCrd = interpolate.griddata(self.natSpaceCrd,self.keyPts[:,2],eta,method='cubic') - # return np.transpose(np.array([xCrd,yCrd,zCrd])) - numPts = len(eta) - if('quad1' == self.regType): - #Nvec = np.zeros((1,4)) - Nmat = np.zeros((numPts,4)) - for i in range(0,numPts): - Nmat[i,0] = 0.25*(eta[i,0] - 1.0)*(eta[i,1] - 1.0) - Nmat[i,1] = -0.25*(eta[i,0] + 1.0)*(eta[i,1] - 1.0) - Nmat[i,2] = 0.25*(eta[0] + 1.0)*(eta[i,1] + 1.0) - Nmat[i,3] = -0.25*(eta[0] - 1.0)*(eta[i,1] + 1.0) - XYZ = np.matmul(Nmat,self.keyPts) - elif('quad2' == self.regType): - r1 = - 1 - r2 = 0 - r3 = 1 - Nmat = np.zeros((numPts,9)) - for i in range(0,numPts): - Nmat[i,0] = 0.25*(eta[i,0] - r2)*(eta[i,0] - r3)*(eta[i,1] - r2)*(eta[i,1] - r3) - Nmat[i,1] = 0.25*(eta[i,0] - r1)*(eta[i,0] - r2)*(eta[i,1] - r2)*(eta[i,1] - r3) - Nmat[i,2] = 0.25*(eta[i,0] - r1)*(eta[i,0] - r2)*(eta[i,1] - r1)*(eta[i,1] - r2) - Nmat[i,3] = 0.25*(eta[i,0] - r2)*(eta[i,0] - r3)*(eta[i,1] - r1)*(eta[i,1] - r2) - Nmat[i,4] = - 0.5*(eta[i,0] - r1)*(eta[i,0] - r3)*(eta[i,1] - r2)*(eta[i,1] - r3) - Nmat[i,5] = - 0.5*(eta[i,0] - r1)*(eta[i,0] - r2)*(eta[i,1] - r1)*(eta[i,1] - r3) - Nmat[i,6] = - 0.5*(eta[i,0] - r1)*(eta[i,0] - r3)*(eta[i,1] - r1)*(eta[i,1] - r2) - Nmat[i,7] = - 0.5*(eta[i,0] - r2)*(eta[i,0] - r3)*(eta[i,1] - r1)*(eta[i,1] - r3) - Nmat[i,8] = (eta[i,0] - r1)*(eta[i,0] - r3)*(eta[i,1] - r1)*(eta[i,1] - r3) - XYZ = np.matmul(Nmat,self.keyPts) - elif('quad3' == self.regType): - r1 = - 1 - r2 = - 0.333333333333333 - r3 = 0.333333333333333 - r4 = 1 - coef = np.array([0.31640625,- 0.31640625,0.31640625,- 0.31640625,- 0.94921875,0.94921875,0.94921875,- 0.94921875,- 0.94921875,0.94921875,0.94921875,- 0.94921875,2.84765625,- 2.84765625,2.84765625,- 2.84765625]) - Nmat = np.zeros((numPts,16)) - for i in range(0,numPts): - Nmat[i,0] = coef[0]*(eta[i,0] - r2)*(eta[i,0] - r3)*(eta[i,0] - r4)*(eta[i,1] - r2)*(eta[i,1] - r3)*(eta[i,1] - r4) - Nmat[i,1] = coef[1]*(eta[i,0] - r1)*(eta[i,0] - r2)*(eta[i,0] - r3)*(eta[i,1] - r2)*(eta[i,1] - r3)*(eta[i,1] - r4) - Nmat[i,2] = coef[2]*(eta[i,0] - r1)*(eta[i,0] - r2)*(eta[i,0] - r3)*(eta[i,1] - r1)*(eta[i,1] - r2)*(eta[i,1] - r3) - Nmat[i,3] = coef[3]*(eta[i,0] - r2)*(eta[i,0] - r3)*(eta[i,0] - r4)*(eta[i,1] - r1)*(eta[i,1] - r2)*(eta[i,1] - r3) - Nmat[i,4] = coef[4]*(eta[i,0] - r1)*(eta[i,0] - r3)*(eta[i,0] - r4)*(eta[i,1] - r2)*(eta[i,1] - r3)*(eta[i,1] - r4) - Nmat[i,5] = coef[5]*(eta[i,0] - r1)*(eta[i,0] - r2)*(eta[i,0] - r4)*(eta[i,1] - r2)*(eta[i,1] - r3)*(eta[i,1] - r4) - Nmat[i,6] = coef[6]*(eta[i,0] - r1)*(eta[i,0] - r2)*(eta[i,0] - r3)*(eta[i,1] - r1)*(eta[i,1] - r3)*(eta[i,1] - r4) - Nmat[i,7] = coef[7]*(eta[i,0] - r1)*(eta[i,0] - r2)*(eta[i,0] - r3)*(eta[i,1] - r1)*(eta[i,1] - r2)*(eta[i,1] - r4) - Nmat[i,8] = coef[8]*(eta[i,0] - r1)*(eta[i,0] - r2)*(eta[i,0] - r4)*(eta[i,1] - r1)*(eta[i,1] - r2)*(eta[i,1] - r3) - Nmat[i,9] = coef[9]*(eta[i,0] - r1)*(eta[i,0] - r3)*(eta[i,0] - r4)*(eta[i,1] - r1)*(eta[i,1] - r2)*(eta[i,1] - r3) - Nmat[i,10] = coef[10]*(eta[i,0] - r2)*(eta[i,0] - r3)*(eta[i,0] - r4)*(eta[i,1] - r1)*(eta[i,1] - r2)*(eta[i,1] - r4) - Nmat[i,11] = coef[11]*(eta[i,0] - r2)*(eta[i,0] - r3)*(eta[i,0] - r4)*(eta[i,1] - r1)*(eta[i,1] - r3)*(eta[i,1] - r4) - Nmat[i,12] = coef[12]*(eta[i,0] - r1)*(eta[i,0] - r3)*(eta[i,0] - r4)*(eta[i,1] - r1)*(eta[i,1] - r3)*(eta[i,1] - r4) - Nmat[i,13] = coef[13]*(eta[i,0] - r1)*(eta[i,0] - r2)*(eta[i,0] - r4)*(eta[i,1] - r1)*(eta[i,1] - r3)*(eta[i,1] - r4) - Nmat[i,14] = coef[14]*(eta[i,0] - r1)*(eta[i,0] - r2)*(eta[i,0] - r4)*(eta[i,1] - r1)*(eta[i,1] - r2)*(eta[i,1] - r4) - Nmat[i,15] = coef[15]*(eta[i,0] - r1)*(eta[i,0] - r3)*(eta[i,0] - r4)*(eta[i,1] - r1)*(eta[i,1] - r2)*(eta[i,1] - r4) - XYZ = np.matmul(Nmat,self.keyPts) - elif('tri1' == self.regType): - Nmat = np.zeros((numPts,3)) - for i in range(0,numPts): - Nmat[i,0] = 1 - eta[i,0] - eta[i,1] - Nmat[i,1] = eta[i,0] - Nmat[i,2] = eta[i,1] - XYZ = np.matmul(Nmat,self.keyPts) - elif('tri2' == self.regType): - Nmat = np.zeros((numPts,6)) - for i in range(0,numPts): - Nmat[i,0] = 2*(eta[i,0] + eta[i,1] - 1)*(eta[i,0] + eta[i,1] - 0.5) - Nmat[i,1] = 2*eta[i,0]*(eta[i,0] - 0.5) - Nmat[i,2] = 2*eta[i,1]*(eta[i,1] - 0.5) - Nmat[i,3] = - 4*eta[i,0]*(eta[i,0] + eta[i,1] - 1) - Nmat[i,4] = 4*eta[i,0]*eta[i,1] - Nmat[i,5] = - 4*eta[i,1]*(eta[i,0] + eta[i,1] - 1) - XYZ = np.matmul(Nmat,self.keyPts) - elif 'tri3' == self.type: - r2 = 1 / 3 - r3 = 2 / 3 - coef = np.array([- 4.5,4.5,4.5,13.5,- 13.5,13.5,13.5,- 13.5,13.5,- 27]) - Nmat = np.zeros((numPts,10)) - for i in range(0,numPts): - Nmat[i,0] = coef[0]*(eta[i,0] + eta[i,1] - r2)*(eta[i,0] + eta[i,1] - r3)*(eta[i,0] + eta[i,1] - 1) - Nmat[i,1] = coef[1]*eta[i,0]*(eta[i,0] - r2)*(eta[i,0] - r3) - Nmat[i,2] = coef[2]*eta[i,1]*(eta[i,1] - r2)*(eta[i,1] - r3) - Nmat[i,3] = coef[3]*eta[i,0]*(eta[i,0] + eta[i,1] - r3)*(eta[i,0] + eta[i,1] - 1) - Nmat[i,4] = coef[4]*eta[i,0]*(eta[i,0] - r2)*(eta[i,0] + eta[i,1] - 1) - Nmat[i,5] = coef[5]*eta[i,0]*eta[i,1]*(eta[i,0] - r2) - Nmat[i,6] = coef[6]*eta[i,0]*eta[i,1]*(eta[i,1] - r2) - Nmat[i,7] = coef[7]*eta[i,1]*(eta[i,1] - r2)*(eta[i,0] + eta[i,1] - 1) - Nmat[i,8] = coef[8]*eta[i,1]*(eta[i,0] + eta[i,1] - r3)*(eta[i,0] + eta[i,1] - 1) - Nmat[i,9] = coef[9]*eta[i,0]*eta[i,1]*(eta[i,0] + eta[i,1] - 1) - XYZ = np.matmul(Nmat,self.keyPts) - elif(self.regType == 'sphere'): - vec = self.keyPts[1,:] - self.keyPts[0,:] ## local x-direction - outerRad = np.linalg.norm(vec) - XYZ = np.zeros((len(eta),3)) - ndi = 0 - for nd in eta: - phiComp = np.linalg.norm(nd) - if (nd[0] > 1.0e-12): - theta = np.arctan(nd[1]/nd[0]) - else: - if (nd[0] < 1.0e-12): - theta = np.pi + np.arctan(nd[1] / nd[0]) - else: - theta = np.arctan(nd[1]/1.0e-12) - phi = 0.5*np.pi - phiComp - xloc = outerRad*np.cos(theta)*np.cos(phi) - yloc = outerRad*np.sin(theta)*np.cos(phi) - zloc = outerRad*np.sin(phi) - XYZLoc = np.array([xloc,yloc,zloc]) - a1 = (1/outerRad)*vec - vec2 = self.keyPts[2,:] - self.keyPts[1,:] - vec3 = np.array([ - (vec[1]*vec2[2] - vec[2]*vec2[1]), - (vec[2]*vec2[0] - vec[0]*vec2[2]), - (vec[0]*vec2[1] - vec[1]*vec2[0]) - ]) - mag = np.sqrt(vec3*vec3.T) - a3 = (1/mag)*vec3 - a2 = np.array([ - (a3[1]*a1[2] - a3[2]*a1[1]), - (a3[2]*a1[0] - a3[0]*a1[2]), - (a3[0]*a1[1] - a3[1]*a1[0]) - ]) - alpha = np.array([[a1],[a2],[a3]]) - XYZ[ndi] = np.matmul(XYZLoc,alpha) + self.keyPts[0,:] - ndi = ndi + 1 - - return XYZ \ No newline at end of file diff --git a/src/pynumad/shell/SpatialGridList3DClass.py b/src/pynumad/shell/SpatialGridList3DClass.py deleted file mode 100644 index b32a5a5..0000000 --- a/src/pynumad/shell/SpatialGridList3DClass.py +++ /dev/null @@ -1,78 +0,0 @@ -import numpy as np - -class SpatialGridList3D(): - - def __init__(self, minimumX, maximumX, minimumY, maximumY, minimumZ, maximumZ, xGridSize, yGridSize, zGridSize): - self.xMin = minimumX - self.yMin = minimumY - self.zMin = minimumZ - self.xGSz = xGridSize - self.yGSz = yGridSize - self.zGSz = zGridSize - xLen = maximumX - minimumX - yLen = maximumY - minimumY - zLen = maximumZ - minimumZ - self.xRows = int(np.ceil(xLen/xGridSize)) - self.yRows = int(np.ceil(yLen/yGridSize)) - self.zRows = int(np.ceil(zLen/zGridSize)) - self.fullList = list() - for i in range(0,self.xRows): - xList = list() - for j in range(0,self.yRows): - yList = list() - for k in range(0,self.zRows): - yList.append(list()) - xList.append(yList) - self.fullList.append(xList) - - def addEntry(self, val, coord): - xRow = int(np.floor((coord[0] - self.xMin)/self.xGSz)) - yRow = int(np.floor((coord[1] - self.yMin)/self.yGSz)) - zRow = int(np.floor((coord[2] - self.zMin)/self.zGSz)) - self.fullList[xRow][yRow][zRow].append(val) - - def findInXYZMargin(self,point,Xmargin,Ymargin,Zmargin): - if(Xmargin == -1): - iMax = self.xRows - iMin = 0 - else: - iMax = int(np.ceil((point[0] + Xmargin - self.xMin)/self.xGSz)) - if(iMax > self.xRows): - iMax = self.xRows - iMin = int(np.floor((point[0] - Xmargin - self.xMin)/self.xGSz)) - if(iMin < 0): - iMin = 0 - - if(Ymargin == -1): - jMax = self.yRows - jMin = 0 - else: - jMax = int(np.ceil((point[1] + Ymargin - self.yMin)/self.yGSz)) - if(jMax > self.yRows): - jMax = self.yRows - jMin = int(np.floor((point[1] - Ymargin - self.yMin)/self.yGSz)) - if(jMin < 0): - jMin = 0 - - if(Zmargin == -1): - kMax = self.zRows - kMin = 0 - else: - kMax = int(np.ceil((point[2] + Zmargin - self.zMin)/self.zGSz)) - if(kMax > self.zRows): - kMax = self.zRows - kMin = int(np.floor((point[2] - Zmargin - self.zMin)/self.zGSz)) - if(kMin < 0): - kMin = 0 - - labelList = list() - for i in range(iMin,iMax): - for j in range(jMin,jMax): - for k in range(kMin,kMax): - labelList.extend(self.fullList[i][j][k]) - - return labelList - - def findInRadius(self, point, radius): - labelList = self.findInXYZMargin(point,radius,radius,radius) - return labelList \ No newline at end of file diff --git a/src/pynumad/shell/__init__.py b/src/pynumad/shell/__init__.py index aa75602..c82a0d6 100644 --- a/src/pynumad/shell/__init__.py +++ b/src/pynumad/shell/__init__.py @@ -1 +1 @@ -import pynumad.shell.shell \ No newline at end of file +from . import shell diff --git a/src/pynumad/shell/boundary2d.py b/src/pynumad/shell/boundary2d.py new file mode 100644 index 0000000..2fc1c67 --- /dev/null +++ b/src/pynumad/shell/boundary2d.py @@ -0,0 +1,32 @@ +import numpy as np +import pynumad.shell.mesh_tools as mt +from pynumad.shell.segment2d import * + + +class Boundary2D: + def __init__(self, segList=[]): + self.segList = list() + self.segList.extend(segList) + + def addSegment(self, segType, keyPts, numEls): + self.segList.append(Segment2D(segType, keyPts, numEls)) + + def getBoundaryMesh(self): + allNds = list() + allEds = list() + totNds = 0 + for seg in self.segList: + segMesh = seg.getNodesEdges() + allNds.extend(segMesh["nodes"]) + allEds.extend(segMesh["edges"] + totNds) + totNds = len(allNds) + allNds = np.array(allNds) + allEds = np.array(allEds) + + meshData = dict() + meshData["nodes"] = allNds + meshData["elements"] = allEds + + output = mt.mergeDuplicateNodes(meshData) + + return output diff --git a/src/pynumad/shell/mesh2d.py b/src/pynumad/shell/mesh2d.py new file mode 100644 index 0000000..f8cfe05 --- /dev/null +++ b/src/pynumad/shell/mesh2d.py @@ -0,0 +1,1043 @@ +import numpy as np +from pynumad.shell.spatial_grid_list2d import * +import plotly.graph_objects as go + + +class Mesh2D: + def __init__(self, boundaryNodes, boundaryEdges=[]): + self.nodeGL = None + self.edgeGL = None + self.triElGL = None + + self.minEdgeLen = 0.0 + self.maxEdgeLen = 1.0 + self.avgProjLen = 1.0 + + self.numBndNodes = len(boundaryNodes) + self.numNodes = self.numBndNodes + self.ndSize = self.numBndNodes + self.nodes = np.array(boundaryNodes) + + self.numBndEdges = len(boundaryEdges) + self.edgeNodes = np.array(boundaryEdges) + self.numEdges = self.numBndEdges + self.edSize = self.numBndEdges + self.edgeElements = np.array([]) + self.edgeUnitNorms = np.array([]) + + self.numTriEls = 0 + self.triElSize = 0 + self.triElements = np.array([]) + + self.numQuadEls = 0 + self.quadElSize = 0 + self.quadElements = np.array([]) + + ## !! check changes to createSweptMesh calls + def createSweptMesh( + self, + sweepMethod, + sweepElements, + sweepDistance=1.0, + point=[], + axis=[], + followNormal=False, + destNodes=[], + interpMethod="linear", + ): + ## sweepMethod = inDirection, toPoint, fromPoint, toDestNodes, revolve + """Object data modified: self.quadElements, self.nodes, self.quadElements + Parameters + ---------- + + Returns + ------- + nodes + elements + """ + nbNds = self.numBndNodes + nbEds = self.numBndEdges + try: + totSweepEls = sum(sweepElements) + ndSize = nbNds * (totSweepEls + 1) + stages = len(sweepElements) + multiStage = True + except: + totSweepEls = sweepElements + ndSize = nbNds * (sweepElements + 1) + stages = 1 + multiStage = False + dimSpace = len(self.nodes[0]) + tmp = self.nodes.copy() + self.nodes = np.zeros((ndSize, dimSpace)) + self.nodes[0:nbNds] = tmp + self.ndSize = ndSize + self.numNodes = nbNds + + if self.numBndEdges == 0: + n1 = np.array(range(nbNds - 1)) + n2 = np.array(range(1, nbNds)) + self.edgeNodes = np.transpose(np.array([n1, n2])) + self.numEdges = nbNds - 1 + self.numBndEdges = nbNds - 1 + self.edSize = nbNds - 1 + + self.triElements = np.array([]) + self.triElSize = 0 + self.numTriEls = 0 + + quadElSize = nbEds * totSweepEls + self.quadElements = -np.ones((quadElSize, 4), dtype=int) + self.quadElSize = quadElSize + self.numQuadEls = 0 + + methString = "inDirection toPoint fromPoint" + if sweepMethod in methString: + ndDir = list() + if sweepMethod == "inDirection": + mag = np.linalg.norm(axis) + unitAxis = (1.0 / mag) * np.array(axis) + for i in range(0, self.numNodes): + ndDir.append(unitAxis) + else: + pAr = np.array(point) + for i in range(0, self.numNodes): + if sweepMethod == "toPoint": + vec = pAr - nd + else: + vec = nd - pAr + mag = np.linalg.norm(vec) + unitVec = (1.0 / mag) * vec + ndDir.append(unitVec) + rowNds = self.numNodes + rowEls = self.numEdges + stepLen = sweepDistance / sweepElements + k = self.numNodes + m = self.numQuadEls + for i in range(0, sweepElements): + for j in range(0, rowNds): + newNd = self.nodes[j] + (i + 1) * stepLen * ndDir[j] + self.nodes[k] = newNd + k = k + 1 + for j in range(0, rowEls): + n1 = self.edgeNodes[j, 0] + i * rowNds + n2 = self.edgeNodes[j, 1] + i * rowNds + n3 = n2 + rowNds + n4 = n1 + rowNds + self.quadElements[m, :] = np.array([n1, n2, n3, n4]) + m = m + 1 + self.numNodes = k + self.numQuadEls = m + elif sweepMethod == "toDestNodes": + nNds = self.numNodes + nbNds = self.numBndNodes + nEds = self.numEdges + nQuad = self.numQuadEls + if not multiStage: + sweepElements = [sweepElements] + destNodes = [destNodes] + if interpMethod == "linear": + prevDest = self.nodes.copy() + for stg in range(0, stages): + dNds = np.array(destNodes[stg]) + ndDir = list() + for ndi in range(0, nbNds): + vec = (1.0 / sweepElements[stg]) * (dNds[ndi] - prevDest[ndi]) + ndDir.append(vec) + ndDir = np.array([ndDir]) + for i in range(0, sweepElements[stg]): + for ndi in range(0, nbNds): + self.nodes[nNds] = self.nodes[ndi] + (i + 1) * ndDir[ndi] + nNds = nNds + 1 + for edi in range(0, self.numBndEdges): + n1 = self.edgeNodes[edi, 0] + i * nbNds + n2 = self.edgeNodes[edi, 1] + i * nbNds + n3 = n2 + nbNds + n4 = n1 + nbNds + self.quadElements[nQuad] = np.array([n1, n2, n3, n4]) + nQuad = nQuad + 1 + prevDest = dNds + else: ## Smooth interpolation + xMat = np.zeros((nbNds, totSweepEls + 1)) + yMat = np.zeros((nbNds, totSweepEls + 1)) + pDest = (1.0 / stages) * np.array(range(0, stages + 1)) + pAll = (1.0 / totSweepEls) * np.array(range(0, totSweepEls + 1)) + for ndi in range(0, nbNds): + xDest = [self.nodes[ndi, 0]] + yDest = [self.nodes[ndi, 1]] + for dNds in destNodes: + xDest.append(dNds[ndi][0]) + yDest.append(dNds[ndi][1]) + xDest = np.array(xDest) + iFun = interpolate.interp1d( + pDest, + xDest, + "cubic", + axis=0, + bounds_error=False, + fill_value="extrapolate", + ) + xAll = iFun(pAll) + xMat[ndi, :] = xAll + yDest = np.array(yDest) + iFun = interpolate.interp1d( + pDest, + yDest, + "cubic", + axis=0, + bounds_error=False, + fill_value="extrapolate", + ) + yAll = iFun(pAll) + yMat[ndi, :] = yAll + if dimSpace == 3: + zMat = np.zeros((nbNds, totSweepEls + 1)) + for ndi in range(0, nbNds): + zDest = [self.nodes[ndi, 2]] + for dNds in destNodes: + zDest.append(dNds[ndi][2]) + zDest = np.array(zDest) + iFun = interpolate.interp1d( + pDest, + zDest, + "cubic", + axis=0, + bounds_error=False, + fill_value="extrapolate", + ) + zAll = iFun(pAll) + zMat[ndi, :] = zAll + for i in range(0, totSweepElements): + for ndi in range(0, nbNds): + if dimSpace == 2: + self.nodes[nNds] = np.array( + [xMat[ndi, i + 1], yMat[ndi, i + 1]] + ) + else: + self.nodes[nNds] = np.array( + [xMat[ndi, i + 1], yMat[ndi, i + 1], zMat[ndi, i + 1]] + ) + nNds = nNds + 1 + for edi in range(0, self.numBndEdges): + n1 = self.edgeNodes[edi, 0] + i * nbNds + n2 = self.edgeNodes[edi, 1] + i * nbNds + n3 = n2 + nbNds + n4 = n1 + nbNds + self.quadElements[nQuad] = np.array([n1, n2, n3, n4]) + nQuad = nQuad + 1 + self.numNodes = nNds + self.numQuadEls = nQuad + + meshOut = dict() + meshOut["nodes"] = self.nodes + meshOut["elements"] = self.quadElements + return meshOut + + def skewNodes(self): + od = np.tan(np.pi / 12.0) + skewMat = np.array([[1.0, od], [od, 1.0]]) + self.nodes = np.matmul(self.nodes, skewMat) + + def unskewNodes(self): + od = np.tan(np.pi / 12.0) + skewMat = np.array([[1.0, od], [od, 1.0]]) + invSkew = np.linalg.inv(skewMat) + self.nodes = np.matmul(self.nodes, invSkew) + + def getBoundaryEdgeNormals(self): + stepLen = self.minEdgeLen / np.sqrt(3.0) + numSteps = int(np.ceil(self.edgeGL.xGSz * self.edgeGL.xRows / stepLen)) + yMin = self.edgeGL.yMin + xMin = self.edgeGL.xMin + xMarg = 0.6 * self.maxEdgeLen + for i in range(0, numSteps): + xCrd = xMin + i * stepLen + p1 = np.array([xCrd, yMin]) + v1 = np.array([0.0, 1.0]) + Xns = list() + nearEdges = self.edgeGL.findInXYMargin(p1, xMarg, -1) + for edi in nearEdges: + n1 = self.edgeNodes[edi, 0] + n2 = self.edgeNodes[edi, 1] + p2 = self.nodes[n1] + v2 = self.nodes[n2] - p2 + Amat = np.array([[v1[0], -v2[0]], [v1[1], -v2[1]]]) + detA = np.linalg.det(Amat) + if detA != 0.0: + bVec = p2 - p1 + soln = np.linalg.solve(Amat, bVec) + if soln[1] > 0.0 and soln[1] < 1.0: + Xns.append([edi, soln[0]]) + iLen = len(Xns) + for i in range(0, iLen - 1): + for j in range(0, iLen - 1): + x1 = Xns[j] + x2 = Xns[j + 1] + if x2[1] < x1[1]: + Xns[j] = x2 + Xns[j + 1] = x1 + for i in range(0, iLen, 2): + edi = Xns[i][0] + uN = self.edgeUnitNorms[edi] + if uN[1] < 0.0: + self.edgeUnitNorms[edi] = -uN + for i in range(1, iLen, 2): + edi = Xns[i][0] + uN = self.edgeUnitNorms[edi] + if uN[1] > 0.0: + self.edgeUnitNorms[edi] = -uN + + numSteps = int(np.ceil(self.edgeGL.yGSz * self.edgeGL.yRows / stepLen)) + yMarg = 0.6 * self.maxEdgeLen + for i in range(0, numSteps): + yCrd = yMin + i * stepLen + p1 = np.array([xMin, yCrd]) + v1 = np.array([1.0, 0.0]) + Xns = list() + nearEdges = self.edgeGL.findInXYMargin(p1, -1, yMarg) + for edi in nearEdges: + n1 = self.edgeNodes[edi, 0] + n2 = self.edgeNodes[edi, 1] + p2 = self.nodes[n1] + v2 = self.nodes[n2] - p2 + Amat = np.array([[v1[0], -v2[0]], [v1[1], -v2[1]]]) + detA = np.linalg.det(Amat) + if detA != 0.0: + bVec = p2 - p1 + soln = np.linalg.solve(Amat, bVec) + if soln[1] > 0.0 and soln[1] < 1.0: + Xns.append([edi, soln[0]]) + iLen = len(Xns) + for i in range(0, iLen - 1): + for j in range(0, iLen - 1): + x1 = Xns[j] + x2 = Xns[j + 1] + if x2[1] < x1[1]: + Xns[j] = x2 + Xns[j + 1] = x1 + for i in range(0, iLen, 2): + edi = Xns[i][0] + uN = self.edgeUnitNorms[edi] + if uN[0] < 0.0: + self.edgeUnitNorms[edi] = -uN + for i in range(1, iLen, 2): + edi = Xns[i][0] + uN = self.edgeUnitNorms[edi] + if uN[0] > 0.0: + self.edgeUnitNorms[edi] = -uN + + def unstructuredPrep(self, elType): + nbNds = self.numBndNodes + n_pi = nbNds / np.pi + ndSize = int(4 * n_pi * n_pi) + tmp = self.nodes.copy() + self.nodes = np.zeros((ndSize, 2)) + self.nodes[0:nbNds, 0:2] = tmp + self.ndSize = ndSize + self.numNodes = nbNds + + edSize = int(3 * nbNds * n_pi) + nbEds = self.numBndEdges + if nbEds == 0: + n1 = np.array(range(nbNds)) + n2 = np.array(range(1, nbNds + 1)) + n2[nbNds - 1] = 0 + self.edgeNodes = -np.ones((edSize, 2), dtype=int) + self.edgeNodes[0:nbNds, 0] = n1 + self.edgeNodes[0:nbNds, 1] = n2 + nbEds = nbNds + self.numBndEdges = nbEds + else: + tmp = self.edgeNodes.copy() + self.edgeNodes = -np.ones((edSize, 2), dtype=int) + self.edgeNodes[0:nbEds] = tmp + self.edgeElements = -np.ones((edSize, 2), dtype=int) + self.edgeUnitNorms = np.zeros((edSize, 2)) + self.edSize = edSize + self.numEdges = nbEds + + triElSize = int(2 * nbNds * n_pi) + self.triElements = -np.ones((triElSize, 3), dtype=int) + self.triElSize = triElSize + self.numTriEls = 0 + + quadElSize = int(nbNds * n_pi) + self.quadElements = -np.ones((quadElSize, 4), dtype=int) + self.quadElSize = quadElSize + self.numQuadEls = 0 + + if elType == "quad": + self.skewNodes() + minLen = 1.0e100 + maxLen = 0.0 + avgLen = 0.0 + for edi in range(0, self.numEdges): + n1 = self.edgeNodes[edi, 0] + n2 = self.edgeNodes[edi, 1] + vec = self.nodes[n1] - self.nodes[n2] + ln = np.linalg.norm(vec) + avgLen = avgLen + ln + if ln < minLen: + minLen = ln + if ln > maxLen: + maxLen = ln + unitVec = (1.0 / ln) * vec + self.edgeUnitNorms[edi] = np.array([-unitVec[1], unitVec[0]]) + avgLen = avgLen / self.numEdges + self.minEdgeLen = minLen + self.maxEdgeLen = maxLen + self.avgProjLen = 0.5 * np.sqrt(3) * avgLen + xMin = np.amin(self.nodes[0 : self.numNodes, 0]) + xMax = np.amax(self.nodes[0 : self.numNodes, 0]) + yMin = np.amin(self.nodes[0 : self.numNodes, 1]) + yMax = np.amax(self.nodes[0 : self.numNodes, 1]) + xLen = xMax - xMin + yLen = yMax - yMin + meshLen = np.sqrt(xLen * xLen + yLen * yLen) + marg = 0.01 * meshLen + xMax = xMax + marg + xMin = xMin - marg + yMax = yMax + marg + yMin = yMin - marg + xLen = xMax - xMin + yLen = yMax - yMin + meshLen = np.sqrt(xLen * xLen + yLen * yLen) + aL2 = avgLen * avgLen + xS3 = xLen * aL2 + xGS = np.power(xS3, 0.333333) + yS3 = yLen * aL2 + yGS = np.power(yS3, 0.333333) + + self.nodeGL = SpatialGridList2D(xMin, xMax, yMin, yMax, xGS, yGS) + for ndi in range(0, self.numNodes): + self.nodeGL.addEntry(ndi, self.nodes[ndi, :]) + + self.edgeGL = SpatialGridList2D(xMin, xMax, yMin, yMax, xGS, yGS) + for edi in range(0, self.numEdges): + n1 = self.edgeNodes[edi, 0] + n2 = self.edgeNodes[edi, 1] + midPt = 0.5 * (self.nodes[n1] + self.nodes[n2]) + self.edgeGL.addEntry(edi, midPt) + + self.triElGL = SpatialGridList2D(xMin, xMax, yMin, yMax, xGS, yGS) + + self.getBoundaryEdgeNormals() + + def edgesIntersect(self, e1Nds, e2Nds): + e1n1 = e1Nds[0] + e1n2 = e1Nds[1] + e2n1 = e2Nds[0] + e2n2 = e2Nds[1] + p1 = self.nodes[e1n1] + v1 = self.nodes[e1n2] - p1 + p2 = self.nodes[e2n1] + v2 = self.nodes[e2n2] - p2 + Amat = np.array([[v1[0], -v2[0]], [v1[1], -v2[1]]]) + detA = np.linalg.det(Amat) + if detA != 0.0: + bVec = p2 - p1 + soln = np.linalg.solve(Amat, bVec) + if ( + soln[0] > 1.0e-6 + and soln[0] < 0.999999 + and soln[1] > 1.0e-6 + and soln[1] < 0.999999 + ): + return True + else: + return False + else: + return False + + def ptInEl(self, pt, el): + p1 = self.nodes[el[0]] + v1 = self.nodes[el[1]] - p1 + v2 = self.nodes[el[2]] - p1 + Amat = np.array([[v1[0], v2[0]], [v1[1], v2[1]]]) + detA = np.linalg.det(Amat) + if detA != 0.0: + bVec = pt - p1 + soln = np.linalg.solve(Amat, bVec) + if soln[0] > 1e-6 and soln[1] > 1e-6: + solSum = soln[0] + soln[1] + if solSum < 0.999999: + return True + else: + return False + else: + return False + else: + return False + + def violations(self, newEl): + n1 = newEl[0] + n2 = newEl[1] + n3 = newEl[2] + ed1 = np.array([n1, n2]) + ed2 = np.array([n2, n3]) + ed3 = np.array([n3, n1]) + cent = 0.333333 * (self.nodes[n1] + self.nodes[n2] + self.nodes[n3]) + srchRad = 1.4 * self.maxEdgeLen + + nearNds = self.nodeGL.findInRadius(cent, srchRad) + for ndi in nearNds: + inEl = self.ptInEl(self.nodes[ndi], newEl) + if inEl: + return True + + nearEdges = self.edgeGL.findInRadius(cent, srchRad) + for edi in nearEdges: + intSect = self.edgesIntersect(self.edgeNodes[edi], ed1) + if intSect: + return True + intSect = self.edgesIntersect(self.edgeNodes[edi], ed2) + if intSect: + return True + intSect = self.edgesIntersect(self.edgeNodes[edi], ed3) + if intSect: + return True + + srtNew = np.sort(newEl) + nearEls = self.triElGL.findInRadius(cent, srchRad) + for eli in nearEls: + srtEi = np.sort(self.triElements[eli]) + if all(srtNew == srtEi): + return True + inEl = self.ptInEl(self.nodes[n1], self.triElements[eli]) + if inEl: + return True + inEl = self.ptInEl(self.nodes[n2], self.triElements[eli]) + if inEl: + return True + inEl = self.ptInEl(self.nodes[n3], self.triElements[eli]) + if inEl: + return True + + return False + + def createEdge(self, nds, el): + midpt = 0.5 * (self.nodes[nds[0]] + self.nodes[nds[1]]) + nearEdges = self.edgeGL.findInRadius(midpt, self.minEdgeLen) + for nEi in nearEdges: + iNds = self.edgeNodes[nEi] + if nds[0] == iNds[0] and nds[1] == iNds[1]: + self.edgeElements[nEi, 1] = el + return + if nds[0] == iNds[1] and nds[1] == iNds[0]: + self.edgeElements[nEi, 1] = el + return + k = self.numEdges + self.edgeNodes[k] = nds + self.edgeElements[k, 0] = el + edVec = self.nodes[nds[1]] - self.nodes[nds[0]] + mag = np.linalg.norm(edVec) + ## --------------- + if mag < 1.0e-12: + outStr = "nodes " + str(nds) + print(outStr) + outStr = ( + "coords " + str(self.nodes[nds[0]]) + " " + str(self.nodes[nds[1]]) + ) + print(outStr) + unitEV = (1.0 / mag) * edVec + unitNorm = np.array([-unitEV[1], unitEV[0]]) + for eNd in self.triElements[el]: + if eNd != nds[0] and eNd != nds[1]: + vec = midpt - self.nodes[eNd] + dp = np.dot(vec, unitNorm) + if dp > 0.0: + self.edgeUnitNorms[k] = unitNorm + else: + self.edgeUnitNorms[k] = -unitNorm + self.edgeGL.addEntry(k, midpt) + self.numEdges = k + 1 + + def adoptConnectedNode(self, edgeIndex, point, srchRad): + eNds = self.edgeNodes[edgeIndex] + midPt = 0.5 * (self.nodes[eNds[0]] + self.nodes[eNds[1]]) + nearEdges = self.edgeGL.findInRadius(point, srchRad) + for nEi in nearEdges: + if nEi != edgeIndex: + eiNds = self.edgeNodes[nEi] + commonNd = -1 + for i in range(0, 2): + for j in range(0, 2): + if eNds[i] == eiNds[j]: + commonNd = j + if commonNd != -1: + if commonNd == 0: + commonNdi = eiNds[0] + unComNdi = eiNds[1] + else: + commonNdi = eiNds[1] + unComNdi = eiNds[0] + unComNdPt = self.nodes[unComNdi] + vec = point - unComNdPt + dist = np.linalg.norm(vec) + if dist < srchRad: + vec = unComNdPt - midPt + dp = np.dot(vec, self.edgeUnitNorms[edgeIndex]) + if dp > 0.0: + newEl = np.array([eNds[0], eNds[1], unComNdi]) + viol = self.violations(newEl) + if not viol: + k = self.numTriEls + self.triElements[k] = newEl + cent = 0.33333333 * ( + self.nodes[newEl[0]] + + self.nodes[newEl[1]] + + self.nodes[newEl[2]] + ) + self.triElGL.addEntry(k, cent) + self.edgeElements[edgeIndex, 1] = k + self.edgeElements[nEi, 1] = k + if eNds[0] == commonNdi: + newNds = np.array([eNds[1], unComNdi]) + else: + newNds = np.array([eNds[0], unComNdi]) + self.createEdge(newNds, k) + self.numTriEls = k + 1 + return True + return False + + def adoptAnyNode(self, edgeIndex, point, srchRad): + eNds = self.edgeNodes[edgeIndex] + midPt = 0.5 * (self.nodes[eNds[0]] + self.nodes[eNds[1]]) + nearNds = self.nodeGL.findInRadius(point, srchRad) + for ndi in nearNds: + if ndi not in eNds: + vec = self.nodes[ndi] - point + dist = np.linalg.norm(vec) + if dist < srchRad: + vec = self.nodes[ndi] - midPt + dp = np.dot(vec, self.edgeUnitNorms[edgeIndex]) + if dp > 0.0: + newEl = np.array([eNds[0], eNds[1], ndi]) + viol = self.violations(newEl) + if not viol: + k = self.numTriEls + self.triElements[k] = newEl + cent = 0.33333333 * ( + self.nodes[newEl[0]] + + self.nodes[newEl[1]] + + self.nodes[newEl[2]] + ) + self.triElGL.addEntry(k, cent) + self.edgeElements[edgeIndex, 1] = k + newNds = np.array([eNds[0], ndi]) + self.createEdge(newNds, k) + newNds = np.array([eNds[1], ndi]) + self.createEdge(newNds, k) + self.numTriEls = k + 1 + return True + return False + + def createNode(self, edgeIndex, point): + eNds = self.edgeNodes[edgeIndex] + n = self.numNodes + self.nodes[n] = point + newEl = np.array([eNds[0], eNds[1], n]) + viol = self.violations(newEl) + if not viol: + k = self.numTriEls + self.triElements[k] = newEl + cent = 0.33333333 * ( + self.nodes[newEl[0]] + self.nodes[newEl[1]] + self.nodes[newEl[2]] + ) + self.triElGL.addEntry(k, cent) + self.edgeElements[edgeIndex, 1] = k + newNds = np.array([eNds[0], n]) + self.createEdge(newNds, k) + newNds = np.array([eNds[1], n]) + self.createEdge(newNds, k) + self.nodeGL.addEntry(n, point) + self.numTriEls = k + 1 + self.numNodes = n + 1 + return True + else: + return False + + def distributeNodes(self): + dim = 2 * self.numNodes + Dmat = np.zeros(dim) + bDim = 2 * self.numBndNodes + Dmat[0:bDim] = 100000.0 + Pmat = 10.0 * np.ones(dim) + Dmat + Pinv = np.zeros(dim) + Pinv[0:bDim] = 9.999e-6 + Pinv[bDim:dim] = 0.1 + rhs = np.zeros(dim) + j = 0 + for bni in range(0, self.numBndNodes): + rhs[j] = 100000.0 * self.nodes[bni, 0] + rhs[j + 1] = 100000.0 * self.nodes[bni, 1] + j = j + 2 + + nEls = self.numTriEls + elWt = np.zeros(nEls) + for eli in range(0, nEls): + ni = self.triElements[eli] + v1 = self.nodes[ni[1]] - self.nodes[ni[0]] + v2 = self.nodes[ni[2]] - self.nodes[ni[0]] + cp = v1[0] * v2[1] - v1[1] * v2[0] + elWt[eli] = np.abs(cp) + avgWt = np.mean(elWt) + elWt = (1.0 / avgWt) * elWt + + elMat = np.zeros((6, 6)) + elMat[0, 0] = 2.0 + elMat[0, 2] = -1.0 + elMat[0, 4] = -1.0 + for i in range(1, 6): + elMat[i, i:6] = elMat[0, 0 : 6 - i] + for i in range(0, 5): + elMat[i + 1 : 6, i] = elMat[i, i + 1 : 6] + + xVec = np.zeros(dim) + gVec = -rhs + wVec = np.multiply(Pinv, gVec) + hVec = -wVec + zVec = np.zeros(dim) + res = np.dot(gVec, wVec) + i = 0 + while res > 1e-12 and i < dim: + zVec[:] = 0.0 + for eli in range(0, nEls): + inT2 = 2 * self.triElements[eli] + vecInd = [ + inT2[0], + inT2[0] + 1, + inT2[1], + inT2[1] + 1, + inT2[2], + inT2[2] + 1, + ] + elH = hVec[vecInd] + elMati = elWt[eli] * elMat + elZ = np.matmul(elMati, elH) + zVec[vecInd] = zVec[vecInd] + elZ + zVec = zVec + np.multiply(Dmat, hVec) + alpha = res / np.dot(hVec, zVec) + xVec = xVec + alpha * hVec + gVec = gVec + alpha * zVec + wVec = np.multiply(Pinv, gVec) + rNext = np.dot(gVec, wVec) + beta = rNext / res + res = rNext + hVec = -wVec + i = i + 1 + + for i in range(0, self.numNodes): + j = i * 2 + self.nodes[i] = xVec[j : j + 2] + + def mergePairsAbove(self, edgeFactor, elElim, elLongEdge): + nQuad = self.numQuadEls + for edi in range(0, self.numEdges): + n1 = self.edgeNodes[edi, 0] + n2 = self.edgeNodes[edi, 1] + edLen = np.linalg.norm(self.nodes[n1] - self.nodes[n2]) + el1 = self.edgeElements[edi, 0] + el2 = self.edgeElements[edi, 1] + if el1 != -1 and el2 != -1: + if elElim[el1] == 0 and elElim[el2] == 0: + if ( + edLen > edgeFactor * elLongEdge[el1] + and edLen > edgeFactor * elLongEdge[el2] + ): + elElim[el1] = 1 + elElim[el2] = 1 + quadNds = np.array([n1, -1, n2, -1]) + e1Nds = self.triElements[el1] + e2Nds = self.triElements[el2] + for i in range(0, 3): + if e1Nds[i] != n1 and e1Nds[i] != n2: + quadNds[1] = e1Nds[i] + if e2Nds[i] != n1 and e2Nds[i] != n2: + quadNds[3] = e2Nds[i] + self.quadElements[nQuad] = quadNds + nQuad = nQuad + 1 + self.numQuadEls = nQuad + return elElim + + def mergeTriEls(self, elType): + ## Initialize local fields + nNds = self.numNodes + nbNds = self.numBndNodes + nEls = self.numTriEls + nQuad = self.numQuadEls + elElim = np.zeros(self.triElSize, dtype=int) + ndElems = -np.ones((nNds, 6), dtype=int) + ndElems[:, 0] = 0 + ndElim = np.zeros(nNds, dtype=int) + + ## Form the node-to-element connectivity list + for eli in range(0, nEls): + for nd in self.triElements[eli]: + j = ndElems[nd, 0] + if j < 5: + j = j + 1 + ndElems[nd, j] = eli + ndElems[nd, 0] = j + + ## Merge neste clusters + for ndi in range(nbNds, nNds): + if ndElems[ndi, 0] == 3: + abrt = False + for el in ndElems[ndi, 1:4]: + if elElim[el] == 1: + abrt = True + if not abrt: + newElNds = list() + for el in ndElems[ndi, 1:4]: + newElNds.extend(self.triElements[el]) + srtedNds = np.sort(newElNds) + finalNds = list() + for i in range(0, 8): + j = srtedNds[i] + if j != ndi and srtedNds[i + 1] == j: + finalNds.append(j) + if len(finalNds) == 3: + self.triElements[nEls] = np.array(finalNds, dtype=int) + nEls = nEls + 1 + ndElim[ndi] = 1 + for el in ndElems[ndi, 1:4]: + elElim[el] = 1 + elif ndElems[ndi, 0] == 4: + abrt = False + for el in ndElems[ndi, 1:5]: + if elElim[el] == 1: + abrt = True + if not abrt: + newElNds = list() + for el in ndElems[ndi, 1:4]: + newElNds.extend(self.triElements[el]) + srtedNds = np.sort(newElNds) + nds12 = list() + for i in range(0, 8): + j = srtedNds[i] + if j != ndi and j == srtedNds[i + 1]: + nds12.append(j) + nds34 = list() + for i in range(0, 8): + j = srtedNds[i] + if j != ndi and j not in nds12: + nds34.append(j) + if len(nds12) == 2 and len(nds34) == 2: + n1 = nds12[0] + n2 = nds12[1] + n3 = nds34[0] + n4 = nds34[1] + v1 = self.nodes[n2] - self.nodes[n1] + v2 = self.nodes[n3] - self.nodes[n1] + v3 = self.nodes[n4] - self.nodes[n1] + mat = np.array([v1, v2]) + det1 = np.linalg.det(mat) + mat = np.array([v2, v3]) + det2 = np.linalg.det(mat) + if det1 * det2 > 0.0: + if elType == "quad": + self.quadElements[nQuad] = np.array([n1, n2, n3, n4]) + nQuad = nQuad + 1 + else: + self.triElements[nEls] = np.array([n1, n2, n3]) + nEls = nEls + 1 + self.triElements[nEls] = np.array([n1, n3, n4]) + nEls = nEls + 1 + else: + if elType == "quad": + self.quadElements[nQuad] = np.array([n1, n2, n4, n3]) + nQuad = nQuad + 1 + else: + self.triElements[nEls] = np.array([n1, n2, n4]) + nEls = nEls + 1 + self.triElements[nEls] = np.array([n1, n4, n3]) + nEls = nEls + 1 + ndElim[ndi] = 1 + for el in ndElems[ndi, 1:5]: + elElim[el] = 1 + self.numTriEls = nEls + self.numQuadEls = nQuad + + if elType == "quad": + elLongEdge = np.zeros(nEls) + + for eli in range(0, nEls): + nds = self.triElements[eli] + longEd = np.linalg.norm(self.nodes[nds[0]] - self.nodes[nds[1]]) + eLen = np.linalg.norm(self.nodes[nds[1]] - self.nodes[nds[2]]) + if eLen > longEd: + longEd = eLen + eLen = np.linalg.norm(self.nodes[nds[2]] - self.nodes[nds[0]]) + if eLen > longEd: + longEd = eLen + elLongEdge[eli] = longEd + + ## Merge quad-forming pairs of triangles + + elElim = self.mergePairsAbove(0.99, elElim, elLongEdge) + elElim = self.mergePairsAbove(0.85, elElim, elLongEdge) + elElim = self.mergePairsAbove(0.75, elElim, elLongEdge) + nQuad = self.numQuadEls + + finalNodes = list() + ndNewInd = -np.ones(nNds, dtype=int) + ndi = 0 + for ni in range(0, nNds): + if ndElim[ni] == 0: + finalNodes.append(self.nodes[ni]) + ndNewInd[ni] = ndi + ndi = ndi + 1 + self.nodes = np.array(finalNodes) + self.numNodes = len(self.nodes) + self.ndSize = self.numNodes + + for eli in range(0, nEls): + if elElim[eli] == 0: + for j in range(0, 3): + nd = self.triElements[eli, j] + self.triElements[eli, j] = ndNewInd[nd] + + for eli in range(0, nQuad): + for j in range(0, 4): + nd = self.quadElements[eli, j] + self.quadElements[eli, j] = ndNewInd[nd] + + newTEind = list() + for i in range(0, nEls): + if elElim[i] == 0: + newTEind.append(i) + + self.triElements = self.triElements[newTEind] + self.numTriEls = len(newTEind) + self.triElSize = self.numTriEls + + def unstructuredPost(self, elType): + # self.plot2DMesh() + self.distributeNodes() + # self.plot2DMesh() + if elType == "quad": + self.unskewNodes() + # self.plot2DMesh() + self.mergeTriEls(elType) + # self.plot2DMesh() + + ## !! rename any calls to creating unstructured mesh as necessary, createPlanarMesh + def createUnstructuredMesh(self, elType): + self.unstructuredPrep(elType) + + elsCreated = True + while elsCreated: + # if(self.numTriEls > 0): + # self.plot2DMesh() + # cnt = input('continue?\n') + # if(cnt != 'y'): + # break + elsCreated = False + nEd = self.numEdges + for edi in range(0, nEd): + if self.edgeElements[edi, 1] == -1: + n1 = self.edgeNodes[edi, 0] + n2 = self.edgeNodes[edi, 1] + uNorm = self.edgeUnitNorms[edi] + midPt = 0.5 * (self.nodes[n1] + self.nodes[n2]) + vec = self.nodes[n1] - self.nodes[n2] + edLen = np.linalg.norm(vec) + projLen = ( + 0.2165 * edLen + 0.75 * self.avgProjLen + ) ## 0.25(sqrt(3)/2)edLen + 0.75avgProjLen + srchPt = midPt + 0.5 * projLen * uNorm + srchRad = 0.5 * np.sqrt(edLen * edLen + projLen * projLen) + found = self.adoptConnectedNode(edi, srchPt, srchRad) + if not found: + srchPt = midPt + projLen * uNorm + found = self.adoptConnectedNode(edi, srchPt, srchRad) + if not found: + srchPt = midPt + 0.5 * projLen * uNorm + found = self.adoptAnyNode(edi, srchPt, srchRad) + if not found: + srchPt = midPt + projLen * uNorm + found = self.adoptAnyNode(edi, srchPt, srchRad) + if not found: + srchPt = midPt + projLen * uNorm + self.createNode(edi, srchPt) + if not found: + srchPt = midPt + 0.5 * projLen * uNorm + self.createNode(edi, srchPt) + if found: + elsCreated = True + for edi in range(0, self.numEdges): + uNMag = np.linalg.norm(self.edgeUnitNorms[edi]) + if uNMag < 1.0e-6: + n1 = self.edgeNodes[edi, 0] + n2 = self.edgeNodes[edi, 1] + midPt = 0.5 * (self.nodes[n1] + self.nodes[n2]) + vec = self.nodes[n2] - self.nodes[n1] + mag = np.linalg.norm(vec) + unitVec = (1.0 / mag) * vec + uNorm = np.array([-unitVec[1], unitVec[0]]) + eL1 = self.edgeElements[edi, 0] + for i in range(0, 3): + ni = self.triElements[eL1, i] + if ni != n1 and ni != n2: + vec = midPt - self.nodes[ni] + dp = np.dot(vec, uNorm) + if dp > 0.0: + self.edgeUnitNorms[edi] = uNorm + else: + self.edgeUnitNorms[edi] = -uNorm + + self.unstructuredPost(elType) + + meshOut = dict() + meshOut["nodes"] = self.nodes + totalEls = self.numTriEls + self.numQuadEls + allEls = -np.ones((totalEls, 4), dtype=int) + allEls[0 : self.numTriEls, 0:3] = self.triElements[0 : self.numTriEls] + allEls[self.numTriEls : totalEls, :] = self.quadElements[0 : self.numQuadEls] + meshOut["elements"] = allEls + + return meshOut + + def plot2DMesh(self): + xLst = self.nodes[0 : self.numNodes, 0] + yLst = self.nodes[0 : self.numNodes, 1] + zLst = np.zeros(self.numNodes) + value = list() + v1 = list() + v2 = list() + v3 = list() + for i in range(0, self.numTriEls): + v1.append(self.triElements[i, 0]) + v2.append(self.triElements[i, 1]) + v3.append(self.triElements[i, 2]) + value.append(np.sin(i)) + for i in range(0, self.numQuadEls): + v1.append(self.quadElements[i, 0]) + v2.append(self.quadElements[i, 1]) + v3.append(self.quadElements[i, 2]) + value.append(np.sin(i)) + v1.append(self.quadElements[i, 0]) + v2.append(self.quadElements[i, 2]) + v3.append(self.quadElements[i, 3]) + value.append(np.sin(i)) + fig = go.Figure( + data=[ + go.Mesh3d( + x=xLst, + y=yLst, + z=zLst, + colorbar_title="", + colorscale=[[0.0, "blue"], [0.5, "yellow"], [1.0, "red"]], + intensity=value, + intensitymode="cell", + i=v1, + j=v2, + k=v3, + name="", + showscale=True, + ) + ] + ) + + fig.show() diff --git a/src/pynumad/shell/mesh3d.py b/src/pynumad/shell/mesh3d.py new file mode 100644 index 0000000..97f8589 --- /dev/null +++ b/src/pynumad/shell/mesh3d.py @@ -0,0 +1,325 @@ +import numpy as np +from scipy import interpolate +from pynumad.shell.spatial_grid_list3d import * + + +class Mesh3D: + def __init__(self, boundaryNodes, boundaryFaces=[]): + self.nodeGL = None + self.faceGL = None + self.tetElGL = None + + self.minFaceArea = 0.0 + self.maxFaceArea = 1.0 + self.avgProjLen = 1.0 + + self.numBndNodes = len(boundaryNodes) + self.numNodes = self.numBndNodes + self.ndSize = self.numBndNodes + self.nodes = np.array(boundaryNodes) + + self.numBndFaces = len(boundaryFaces) + self.faceNodes = np.array(boundaryFaces) + self.numFaces = self.numBndFaces + self.faceSize = self.numBndFaces + self.faceElements = np.array([]) + self.faceUnitNorms = np.array([]) + + self.numTetEls = 0 + self.tetElSize = 0 + self.tetElements = np.array([]) + + self.numWedgeEls = 0 + self.wedgeElSize = 0 + self.wedgeElements = np.array([]) + + self.numHexEls = 0 + self.hexElSize = 0 + self.hexElements = np.array([]) + + def createSweptMesh( + self, + sweepMethod, + sweepElements, + sweepDistance=1.0, + point=[], + axis=[], + followNormal=False, + destNodes=[], + interpMethod="linear", + ): + ## sweepMethod = inDirection, toPoint, fromPoint, toDestNodes, revolve + """Object data modified: self.quadElements, self.nodes, self.quadElements + Parameters + ---------- + + Returns + ------- + nodes + elements + """ + nbNds = self.numBndNodes + try: + totSweepEls = sum(sweepElements) + ndSize = nbNds * (totSweepEls + 1) + stages = len(sweepElements) + multiStage = True + except: + totSweepEls = sweepElements + ndSize = nbNds * (sweepElements + 1) + stages = 1 + multiStage = False + tmp = self.nodes.copy() + self.nodes = np.zeros((ndSize, 3)) + self.nodes[0:nbNds] = tmp + self.ndSize = ndSize + self.numNodes = nbNds + + nbFcs = self.numBndFaces + elSize = nbFcs * totSweepEls + + self.wedgeElements = -np.ones((elSize, 6), dtype=int) + self.wedgeElSize = elSize + self.numWedgeEls = 0 + wERank = np.zeros(elSize, dtype=int) + + self.hexElements = -np.ones((elSize, 8), dtype=int) + self.hexElSize = elSize + self.numHexEls = 0 + hERank = np.zeros(elSize, dtype=int) + + methString = "inDirection toPoint fromPoint" + if sweepMethod in methString: + ndDir = list() + if sweepMethod == "inDirection": + mag = np.linalg.norm(axis) + unitAxis = (1.0 / mag) * np.array(axis) + for i in range(0, self.numNodes): + ndDir.append(unitAxis) + else: + pAr = np.array(point) + for i in range(0, self.numNodes): + if sweepMethod == "toPoint": + vec = pAr - nd + else: + vec = nd - pAr + mag = np.linalg.norm(vec) + unitVec = (1.0 / mag) * vec + ndDir.append(unitVec) + rowNds = self.numNodes + rowEls = self.numFaces + stepLen = sweepDistance / sweepElements + nNds = self.numNodes + wE = self.numWedgeEls + hE = self.numHexEls + eli = 0 + for i in range(0, sweepElements): + for j in range(0, rowNds): + newNd = self.nodes[j] + (i + 1) * stepLen * ndDir[j] + self.nodes[nNds] = newNd + nNds = nNds + 1 + for j in range(0, rowEls): + n1 = self.faceNodes[j, 0] + i * rowNds + n2 = self.faceNodes[j, 1] + i * rowNds + n3 = self.faceNodes[j, 2] + i * rowNds + if self.faceNodes[j, 3] == -1: + n4 = n1 + rowNds + n5 = n2 + rowNds + n6 = n3 + rowNds + self.wedgeElements[wE] = np.array([n1, n2, n3, n4, n5, n6]) + wERank[wE] = eli + wE = wE + 1 + else: + n4 = self.faceNodes[j, 3] + i * rowNds + n5 = n1 + rowNds + n6 = n2 + rowNds + n7 = n3 + rowNds + n8 = n4 + rowNds + self.hexElements[hE] = np.array( + [n1, n2, n3, n4, n5, n6, n7, n8] + ) + hERank[hE] = eli + hE = hE + 1 + eli = eli + 1 + self.numNodes = nNds + self.numWedgeEls = wE + self.numHexEls = hE + elif sweepMethod == "toDestNodes": + nNds = self.numNodes + nbNds = self.numBndNodes + wE = self.numWedgeEls + hE = self.numHexEls + eli = 0 + if not multiStage: + sweepElements = [sweepElements] + destNodes = [destNodes] + if interpMethod == "linear": + prevDest = self.nodes.copy() + cumElLay = 0 + for stg in range(0, stages): + dNds = np.array(destNodes[stg]) + ndDir = np.zeros((nbNds, 3)) + for ndi in range(0, nbNds): + ndDir[ndi] = (1.0 / sweepElements[stg]) * ( + dNds[ndi] - prevDest[ndi] + ) + for i in range(0, sweepElements[stg]): + for ndi in range(0, nbNds): + self.nodes[nNds] = self.nodes[nNds - nbNds] + ndDir[ndi] + nNds = nNds + 1 + for fci in range(0, self.numBndFaces): + n1 = self.faceNodes[fci, 0] + cumElLay * nbNds + n2 = self.faceNodes[fci, 1] + cumElLay * nbNds + n3 = self.faceNodes[fci, 2] + cumElLay * nbNds + if self.faceNodes[fci, 3] == -1: + n4 = n1 + nbNds + n5 = n2 + nbNds + n6 = n3 + nbNds + self.wedgeElements[wE] = np.array( + [n1, n2, n3, n4, n5, n6] + ) + wERank[wE] = eli + wE = wE + 1 + else: + n4 = self.faceNodes[fci, 3] + cumElLay * nbNds + n5 = n1 + nbNds + n6 = n2 + nbNds + n7 = n3 + nbNds + n8 = n4 + nbNds + self.hexElements[hE] = np.array( + [n1, n2, n3, n4, n5, n6, n7, n8] + ) + hERank[hE] = eli + hE = hE + 1 + eli = eli + 1 + cumElLay = cumElLay + 1 + prevDest = dNds.copy() + else: ## Smooth interpolation + xMat = np.zeros((nbNds, totSweepEls + 1)) + yMat = np.zeros((nbNds, totSweepEls + 1)) + zMat = np.zeros((nbNds, totSweepEls + 1)) + pDest = (1.0 / stages) * np.array(range(0, stages + 1)) + pAll = (1.0 / totSweepEls) * np.array(range(0, totSweepEls + 1)) + for ndi in range(0, nbNds): + xDest = [self.nodes[ndi, 0]] + yDest = [self.nodes[ndi, 1]] + zDest = [self.nodes[ndi, 2]] + for dNds in destNodes: + xDest.append(dNds[ndi][0]) + yDest.append(dNds[ndi][1]) + zDest.append(dNds[ndi][2]) + xDest = np.array(xDest) + iFun = interpolate.interp1d( + pDest, + xDest, + "cubic", + axis=0, + bounds_error=False, + fill_value="extrapolate", + ) + xAll = iFun(pAll) + xMat[ndi, :] = xAll + yDest = np.array(yDest) + iFun = interpolate.interp1d( + pDest, + yDest, + "cubic", + axis=0, + bounds_error=False, + fill_value="extrapolate", + ) + yAll = iFun(pAll) + yMat[ndi, :] = yAll + zDest = np.array(zDest) + iFun = interpolate.interp1d( + pDest, + zDest, + "cubic", + axis=0, + bounds_error=False, + fill_value="extrapolate", + ) + zAll = iFun(pAll) + zMat[ndi, :] = zAll + eli = 0 + for i in range(0, totSweepEls): + for ndi in range(0, nbNds): + self.nodes[nNds] = np.array( + [xMat[ndi, i + 1], yMat[ndi, i + 1], zMat[ndi, i + 1]] + ) + nNds = nNds + 1 + for fci in range(0, self.numBndFaces): + n1 = self.faceNodes[fci, 0] + i * nbNds + n2 = self.faceNodes[fci, 1] + i * nbNds + n3 = self.faceNodes[fci, 2] + i * nbNds + if self.faceNodes[fci, 3] == -1: + n4 = n1 + nbNds + n5 = n2 + nbNds + n6 = n3 + nbNds + self.wedgeElements[wE] = np.array([n1, n2, n3, n4, n5, n6]) + wERank[wE] = eli + wE = wE + 1 + else: + n4 = self.faceNodes[fci, 3] + i * nbNds + n5 = n1 + nbNds + n6 = n2 + nbNds + n7 = n3 + nbNds + n8 = n4 + nbNds + self.hexElements[hE] = np.array( + [n1, n2, n3, n4, n5, n6, n7, n8] + ) + hERank[hE] = eli + hE = hE + 1 + eli = eli + 1 + self.numNodes = nNds + self.numWedgeEls = wE + self.numHexEls = hE + + for eli in range(0, self.numWedgeEls): + n1 = self.wedgeElements[eli, 0] + n2 = self.wedgeElements[eli, 1] + n3 = self.wedgeElements[eli, 2] + n4 = self.wedgeElements[eli, 3] + v1 = self.nodes[n2] - self.nodes[n1] + v2 = self.nodes[n3] - self.nodes[n1] + v3 = self.nodes[n4] - self.nodes[n1] + mat = np.array([v1, v2, v3]) + detM = np.linalg.det(mat) + if detM < 0.0: + self.wedgeElements[eli, 1] = n3 + self.wedgeElements[eli, 2] = n2 + sw = self.wedgeElements[eli, 4] + self.wedgeElements[eli, 4] = self.wedgeElements[eli, 5] + self.wedgeElements[eli, 5] = sw + + for eli in range(0, self.numHexEls): + n1 = self.hexElements[eli, 0] + n2 = self.hexElements[eli, 1] + n4 = self.hexElements[eli, 3] + n5 = self.hexElements[eli, 4] + v1 = self.nodes[n2] - self.nodes[n1] + v2 = self.nodes[n4] - self.nodes[n1] + v3 = self.nodes[n5] - self.nodes[n1] + mat = np.array([v1, v2, v3]) + detM = np.linalg.det(mat) + if detM < 0.0: + self.hexElements[eli, 1] = n4 + self.hexElements[eli, 3] = n2 + sw = self.hexElements[eli, 5] + self.hexElements[eli, 5] = self.hexElements[eli, 7] + self.hexElements[eli, 7] = sw + + meshOut = dict() + meshOut["nodes"] = self.nodes + totEls = self.numWedgeEls + self.numHexEls + allEls = -np.ones((totEls, 8), dtype=int) + for eli in range(0, self.numWedgeEls): + allEls[wERank[eli], 0:6] = self.wedgeElements[eli] + for eli in range(0, self.numHexEls): + allEls[hERank[eli], 0:8] = self.hexElements[eli] + # if(self.numWedgeEls > 0): + # allEls[0:self.numWedgeEls,0:6] = self.wedgeElements[0:self.numWedgeEls] + # if(self.numHexEls > 0): + # allEls[self.numWedgeEls:totEls,0:8] = self.hexElements[0:self.numHexEls] + meshOut["elements"] = allEls + return meshOut diff --git a/src/pynumad/shell/MeshTools.py b/src/pynumad/shell/mesh_tools.py similarity index 51% rename from src/pynumad/shell/MeshTools.py rename to src/pynumad/shell/mesh_tools.py index ba237d5..3ee474e 100644 --- a/src/pynumad/shell/MeshTools.py +++ b/src/pynumad/shell/mesh_tools.py @@ -1,104 +1,109 @@ import numpy as np import plotly.graph_objects as go -from pynumad.shell.SpatialGridList2DClass import * -from pynumad.shell.SpatialGridList3DClass import * +from pynumad.shell.spatial_grid_list2d import * +from pynumad.shell.spatial_grid_list3d import * -## - Convert list of mesh objects into a single merged mesh, returning sets representing the elements/nodes from the original meshes + +## - Convert list of mesh objects into a single merged mesh, returning sets representing the elements/nodes from the original meshes def mergeDuplicateNodes(meshData): - allNds = meshData['nodes'] - allEls = meshData['elements'] + allNds = meshData["nodes"] + allEls = meshData["elements"] totNds = len(allNds) spaceDim = len(allNds[0]) totEls = len(allEls) elDim = len(allEls[0]) - maxX = np.amax(allNds[:,0]) - minX = np.amin(allNds[:,0]) - maxY = np.amax(allNds[:,1]) - minY = np.amin(allNds[:,1]) - nto1_2 = np.power(totNds,0.5) - nto1_3 = np.power(totNds,0.3333333) - nto1_4 = np.power(totNds,0.25) - if(spaceDim == 3): - maxZ = np.amax(allNds[:,2]) - minZ = np.amin(allNds[:,2]) - dimVec = np.array([(maxX-minX),(maxY-minY),(maxZ-minZ)]) + maxX = np.amax(allNds[:, 0]) + minX = np.amin(allNds[:, 0]) + maxY = np.amax(allNds[:, 1]) + minY = np.amin(allNds[:, 1]) + nto1_2 = np.power(totNds, 0.5) + nto1_3 = np.power(totNds, 0.3333333) + nto1_4 = np.power(totNds, 0.25) + if spaceDim == 3: + maxZ = np.amax(allNds[:, 2]) + minZ = np.amin(allNds[:, 2]) + dimVec = np.array([(maxX - minX), (maxY - minY), (maxZ - minZ)]) meshDim = np.linalg.norm(dimVec) - maxX = maxX + 0.01*meshDim - minX = minX - 0.01*meshDim - maxY = maxY + 0.01*meshDim - minY = minY - 0.01*meshDim - maxZ = maxZ + 0.01*meshDim - minZ = minZ - 0.01*meshDim - xSpacing = 0.5*(maxX - minX)/nto1_3 - ySpacing = 0.5*(maxY - minY)/nto1_3 - zSpacing = 0.5*(maxZ - minZ)/nto1_3 - nodeGL = SpatialGridList3D(minX,maxX,minY,maxY,minZ,maxZ,xSpacing,ySpacing,zSpacing) - tol = 1.0e-6*meshDim/nto1_3 + maxX = maxX + 0.01 * meshDim + minX = minX - 0.01 * meshDim + maxY = maxY + 0.01 * meshDim + minY = minY - 0.01 * meshDim + maxZ = maxZ + 0.01 * meshDim + minZ = minZ - 0.01 * meshDim + xSpacing = 0.5 * (maxX - minX) / nto1_3 + ySpacing = 0.5 * (maxY - minY) / nto1_3 + zSpacing = 0.5 * (maxZ - minZ) / nto1_3 + nodeGL = SpatialGridList3D( + minX, maxX, minY, maxY, minZ, maxZ, xSpacing, ySpacing, zSpacing + ) + tol = 1.0e-6 * meshDim / nto1_3 else: - dimVec = np.array([(maxX-minX),(maxY-minY)]) + dimVec = np.array([(maxX - minX), (maxY - minY)]) meshDim = np.linalg.norm(dimVec) - maxX = maxX + 0.01*meshDim - minX = minX - 0.01*meshDim - maxY = maxY + 0.01*meshDim - minY = minY - 0.01*meshDim - xSpacing = 0.5*(maxX - minX)/nto1_3 - ySpacing = 0.5*(maxY - minY)/nto1_3 - nodeGL = SpatialGridList2D(minX,maxX,minY,maxY,xSpacing,ySpacing) - tol = 1.0e-6*meshDim/nto1_2 - + maxX = maxX + 0.01 * meshDim + minX = minX - 0.01 * meshDim + maxY = maxY + 0.01 * meshDim + minY = minY - 0.01 * meshDim + xSpacing = 0.5 * (maxX - minX) / nto1_3 + ySpacing = 0.5 * (maxY - minY) / nto1_3 + nodeGL = SpatialGridList2D(minX, maxX, minY, maxY, xSpacing, ySpacing) + tol = 1.0e-6 * meshDim / nto1_2 + i = 0 for nd in allNds: - nodeGL.addEntry(i,nd) + nodeGL.addEntry(i, nd) i = i + 1 - - ndElim = -np.ones(totNds,dtype=int) - ndNewInd = -np.ones(totNds,dtype=int) - for n1i in range(0,totNds): - if(ndElim[n1i] == -1): - nearNds = nodeGL.findInRadius(allNds[n1i],tol) + + ndElim = -np.ones(totNds, dtype=int) + ndNewInd = -np.ones(totNds, dtype=int) + for n1i in range(0, totNds): + if ndElim[n1i] == -1: + nearNds = nodeGL.findInRadius(allNds[n1i], tol) for n2i in nearNds: - if(n2i > n1i and ndElim[n2i] == -1): + if n2i > n1i and ndElim[n2i] == -1: proj = allNds[n2i] - allNds[n1i] dist = np.linalg.norm(proj) - if(dist < tol): + if dist < tol: ndElim[n2i] = n1i ndi = 0 nodesFinal = list() - for n1i in range(0,totNds): - if(ndElim[n1i] == -1): + for n1i in range(0, totNds): + if ndElim[n1i] == -1: nodesFinal.append(allNds[n1i]) ndNewInd[n1i] = ndi ndi = ndi + 1 nodesFinal = np.array(nodesFinal) - for eli in range(0,totEls): - for j in range(0,elDim): - nd = allEls[eli,j] - if(nd != -1): - if(ndElim[nd] == -1): - allEls[eli,j] = ndNewInd[nd] + for eli in range(0, totEls): + for j in range(0, elDim): + nd = allEls[eli, j] + if nd != -1: + if ndElim[nd] == -1: + allEls[eli, j] = ndNewInd[nd] else: - allEls[eli,j] = ndNewInd[ndElim[nd]] - - meshData['nodes'] = nodesFinal - meshData['elements'] = allEls - + allEls[eli, j] = ndNewInd[ndElim[nd]] + + meshData["nodes"] = nodesFinal + meshData["elements"] = allEls + return meshData - + + def make3D(meshData): - numNodes = len(meshData['nodes']) - nodes3D = np.zeros((numNodes,3)) - nodes3D[:,0:2] = meshData['nodes'] + numNodes = len(meshData["nodes"]) + nodes3D = np.zeros((numNodes, 3)) + nodes3D[:, 0:2] = meshData["nodes"] dataOut = dict() - dataOut['nodes'] = nodes3D - dataOut['elements'] = meshData['elements'] + dataOut["nodes"] = nodes3D + dataOut["elements"] = meshData["elements"] return dataOut - + + def plotShellMesh(meshData): - xLst = meshData['nodes'][:,0] - yLst = meshData['nodes'][:,1] + xLst = meshData["nodes"][:, 0] + yLst = meshData["nodes"][:, 1] try: - zLst = meshData['nodes'][:,2] + zLst = meshData["nodes"][:, 2] except: zLst = np.zeros(len(xLst)) value = list() @@ -106,50 +111,51 @@ def plotShellMesh(meshData): v2 = list() v3 = list() i = 0 - for el in meshData['elements']: + for el in meshData["elements"]: v1.append(el[0]) v2.append(el[1]) v3.append(el[2]) value.append(np.sin(i)) - if(el[3] != -1): + if el[3] != -1: v1.append(el[0]) v2.append(el[2]) v3.append(el[3]) value.append(np.sin(i)) i = i + 1 - fig = go.Figure(data=[ - go.Mesh3d( - x=xLst, - y=yLst, - z=zLst, - colorbar_title = '', - colorscale=[[0.0, 'white'], - [0.5, 'gray'], - [1.0, 'black']], - intensity=value, - intensitymode='cell', - i=v1, - j=v2, - k=v3, - name='', - showscale=True - ) - ]) + fig = go.Figure( + data=[ + go.Mesh3d( + x=xLst, + y=yLst, + z=zLst, + colorbar_title="", + colorscale=[[0.0, "white"], [0.5, "gray"], [1.0, "black"]], + intensity=value, + intensitymode="cell", + i=v1, + j=v2, + k=v3, + name="", + showscale=True, + ) + ] + ) fig.show() - + + def plotSolidMesh(meshData): - xLst = meshData['nodes'][:,0] - yLst = meshData['nodes'][:,1] - zLst = meshData['nodes'][:,2] + xLst = meshData["nodes"][:, 0] + yLst = meshData["nodes"][:, 1] + zLst = meshData["nodes"][:, 2] value = list() v1 = list() v2 = list() v3 = list() i = 0 - for el in meshData['elements']: + for el in meshData["elements"]: si = np.sin(i) - if(el[4] == -1): + if el[4] == -1: v1.append(el[0]) v2.append(el[1]) v3.append(el[2]) @@ -166,7 +172,7 @@ def plotSolidMesh(meshData): v2.append(el[2]) v3.append(el[3]) value.append(si) - elif(el[6] == -1): + elif el[6] == -1: v1.append(el[0]) v2.append(el[1]) v3.append(el[2]) @@ -183,7 +189,7 @@ def plotSolidMesh(meshData): v2.append(el[3]) v3.append(el[4]) value.append(si) - + v1.append(el[0]) v2.append(el[2]) v3.append(el[3]) @@ -217,7 +223,7 @@ def plotSolidMesh(meshData): v2.append(el[5]) v3.append(el[6]) value.append(si) - + v1.append(el[0]) v2.append(el[1]) v3.append(el[4]) @@ -234,7 +240,7 @@ def plotSolidMesh(meshData): v2.append(el[6]) v3.append(el[7]) value.append(si) - + v1.append(el[0]) v2.append(el[1]) v3.append(el[2]) @@ -252,24 +258,26 @@ def plotSolidMesh(meshData): v3.append(el[7]) value.append(si) i = i + 1 - fig = go.Figure(data=[ - go.Mesh3d( - x=xLst, - y=yLst, - z=zLst, - colorbar_title = '', - colorscale=[[0.0, 'white'], - [0.5, 'gray'], - [1.0, 'black']], - intensity=value, - intensitymode='cell', - i=v1, - j=v2, - k=v3, - name='', - showscale=True - ) - ]) + fig = go.Figure( + data=[ + go.Mesh3d( + x=xLst, + y=yLst, + z=zLst, + colorbar_title="", + colorscale=[[0.0, "white"], [0.5, "gray"], [1.0, "black"]], + intensity=value, + intensitymode="cell", + i=v1, + j=v2, + k=v3, + name="", + showscale=True, + ) + ] + ) fig.show() -## -Create node/element set within a spatial range or radius \ No newline at end of file + + +## -Create node/element set within a spatial range or radius diff --git a/src/pynumad/shell/segment2d.py b/src/pynumad/shell/segment2d.py new file mode 100644 index 0000000..785e6ed --- /dev/null +++ b/src/pynumad/shell/segment2d.py @@ -0,0 +1,114 @@ +import numpy as np +from scipy import interpolate + + +class Segment2D: + def __init__(self, segType, keyPts, numEls): + self.segType = segType ## line, curve, arc + self.keyPts = keyPts + self.numEls = numEls + + def getNodesEdges(self): + nNds = self.numEls + 1 + if self.segType == "line": + pt1 = np.array(self.keyPts[0]) + pt2 = np.array(self.keyPts[1]) + proj = pt2 - pt1 + steps = (1.0 / self.numEls) * np.array(range(0, nNds)) + nds = list() + for st in steps: + nd = pt1 + st * proj + nds.append(nd) + nodes = np.array(nds) + eN1 = np.array(range(0, self.numEls), dtype=int) + eN2 = np.array(range(1, nNds), dtype=int) + edges = np.transpose(np.array([eN1, eN2])) + output = dict() + output["nodes"] = nodes + output["edges"] = edges + return output + elif self.segType == "curve": + kPTp = np.transpose(np.array(self.keyPts)) + numKp = len(self.keyPts) + pKp = (1.0 / (numKp - 1)) * np.array(range(0, numKp)) + pNds = (1.0 / self.numEls) * np.array(range(0, nNds)) + if numKp == 2: + order = "linear" + elif numKp == 3: + order = "quadratic" + else: + order = "cubic" + iFun = interpolate.interp1d( + pKp, + kPTp[0], + order, + axis=0, + bounds_error=False, + fill_value="extrapolate", + ) + xNds = iFun(pNds) + iFun = interpolate.interp1d( + pKp, + kPTp[1], + order, + axis=0, + bounds_error=False, + fill_value="extrapolate", + ) + yNds = iFun(pNds) + eN1 = np.array(range(0, self.numEls), dtype=int) + eN2 = np.array(range(1, nNds), dtype=int) + output = dict() + output["nodes"] = np.transpose(np.array([xNds, yNds])) + output["edges"] = np.transpose(np.array([eN1, eN2])) + return output + elif self.segType == "arc": + kPar = np.array(self.keyPts) + dist13 = np.linalg.norm(kPar[0] - kPar[2]) + if dist13 < 1.0e-12: + center = 0.5 * (kPar[0] + kPar[1]) + rad = np.linalg.norm(kPar[0] - center) + else: + center = 0.3333 * (kPar[0] + kPar[1] + kPar[2]) + ndc = 1.0 + i = 0 + Rvec = np.zeros(2) + dRdC = np.zeros((2, 2)) + while ndc > 1.0e-12 and i < 50: + v1 = kPar[0] - center + v2 = kPar[1] - center + v3 = kPar[2] - center + Rvec[0] = np.dot(v1, v1) - np.dot(v2, v2) + Rvec[1] = np.dot(v1, v1) - np.dot(v3, v3) + dRdC[0] = 2.0 * (v2 - v1) + dRdC[1] = 2.0 * (v3 - v1) + dc = np.linalg.solve(dRdC, -Rvec) + center = center + dc + ndc = np.linalg.norm(dc) + i = i + 1 + rad = np.linalg.norm(kPar[0] - center) + theta = np.zeros(3) + for i in range(0, 3): + xRel = kPar[i, 0] - center[0] + yRel = kPar[i, 1] - center[1] + if abs(xRel) < 1.0e-12: + xRel = 1.0e-12 + if xRel > 0.0: + theta[i] = np.arctan(yRel / xRel) + else: + theta[i] = np.arctan(yRel / xRel) + np.pi + if theta[1] > theta[0] and theta[1] < theta[2]: + thetaNds = np.linspace(theta[0], theta[2], nNds) + else: + t0adj = theta[0] + 2.0 * np.pi + thetaNds = np.linspace(t0adj, theta[2], nNds) + nodes = np.zeros((nNds, 2)) + for thi in range(0, nNds): + nodes[thi, 0] = rad * np.cos(thetaNds[thi]) + center[0] + nodes[thi, 1] = rad * np.sin(thetaNds[thi]) + center[1] + eN1 = np.array(range(0, self.numEls), dtype=int) + eN2 = np.array(range(1, nNds), dtype=int) + output = dict() + output["nodes"] = nodes + output["edges"] = np.transpose(np.array([eN1, eN2])) + return output diff --git a/src/pynumad/shell/shell.py b/src/pynumad/shell/shell.py index 13f9620..4cf2682 100644 --- a/src/pynumad/shell/shell.py +++ b/src/pynumad/shell/shell.py @@ -6,10 +6,11 @@ import pynumad from pynumad.utils.interpolation import interpolator_wrap + ##from pynumad.shell.shellClasses import shellRegion, elementSet, NuMesh3D, spatialGridList2D, spatialGridList3D -from pynumad.shell.SurfaceClass import Surface -from pynumad.shell.Mesh3DClass import Mesh3D -from pynumad.shell.ShellRegionClass import ShellRegion +from pynumad.shell.surface import Surface +from pynumad.shell.mesh3d import Mesh3D +from pynumad.shell.shell_region import ShellRegion from pynumad.analysis.ansys.write import writeAnsysShellModel @@ -25,7 +26,7 @@ def shellMeshGeneral(blade, forSolid, includeAdhesive, elementSize): forSolid: bool includeAdhesive: bool elementSize: float - + Returns ------- meshData: @@ -38,8 +39,8 @@ def shellMeshGeneral(blade, forSolid, includeAdhesive, elementSize): - [n1,n2,n3,n4] - [n1,n2,n3,n4] ... - Set list and section list for the outer shell and shear webs. - These are companion lists with the same length and order, + Set list and section list for the outer shell and shear webs. + These are companion lists with the same length and order, so meshData['sets']['element'][i] corresponds to meshData['sections'][i] sets: element: @@ -75,7 +76,7 @@ def shellMeshGeneral(blade, forSolid, includeAdhesive, elementSize): geomSz = blade.geometry.shape lenGeom = geomSz[0] numXsec = geomSz[2] - XSCurvePts = np.array([],dtype=int) + XSCurvePts = np.array([], dtype=int) ## Determine the key curve points along the OML at each cross section for i in range(numXsec): @@ -83,108 +84,113 @@ def shellMeshGeneral(blade, forSolid, includeAdhesive, elementSize): minDist = 1 lePt = 0 for j in range(lenGeom): - prof = blade.profiles[j,:,i] + prof = blade.profiles[j, :, i] mag = np.linalg.norm(prof) - if (mag < minDist): + if mag < minDist: minDist = mag lePt = j for j in range(5): - kpCrd = blade.keypoints[j,:,i] + kpCrd = blade.keypoints[j, :, i] minDist = blade.ichord[i] pti = 1 for k in range(lePt): - ptCrd = blade.geometry[k,:,i] + ptCrd = blade.geometry[k, :, i] vec = ptCrd - kpCrd mag = np.linalg.norm(vec) - if (mag < minDist): + if mag < minDist: minDist = mag pti = k - keyPts = np.concatenate((keyPts,[pti])) - blade.geometry[pti,:,i] = np.array(kpCrd) + keyPts = np.concatenate((keyPts, [pti])) + blade.geometry[pti, :, i] = np.array(kpCrd) - keyPts = np.concatenate((keyPts,[lePt])) - for j in range(5,10): - kpCrd = blade.keypoints[j,:,i] + keyPts = np.concatenate((keyPts, [lePt])) + for j in range(5, 10): + kpCrd = blade.keypoints[j, :, i] minDist = blade.ichord[i] pti = 1 - for k in range(lePt,lenGeom): - ptCrd = blade.geometry[k,:,i] + for k in range(lePt, lenGeom): + ptCrd = blade.geometry[k, :, i] vec = ptCrd - kpCrd mag = np.linalg.norm(vec) - if (mag < minDist): + if mag < minDist: minDist = mag pti = k - keyPts = np.concatenate((keyPts,[pti])) - blade.geometry[pti,:,i] = np.array(kpCrd) + keyPts = np.concatenate((keyPts, [pti])) + blade.geometry[pti, :, i] = np.array(kpCrd) - keyPts = np.concatenate((keyPts,[lenGeom-1])) + keyPts = np.concatenate((keyPts, [lenGeom - 1])) allPts = np.array([keyPts[0]]) - for j in range(0,len(keyPts) - 1): - secPts = np.linspace(keyPts[j],keyPts[j+1],4) + for j in range(0, len(keyPts) - 1): + secPts = np.linspace(keyPts[j], keyPts[j + 1], 4) secPts = np.round(secPts).astype(int) - allPts = np.concatenate((allPts,secPts[1:4])) + allPts = np.concatenate((allPts, secPts[1:4])) + + XSCurvePts = np.vstack((XSCurvePts, allPts)) if XSCurvePts.size else allPts + rws, cls = XSCurvePts.shape - XSCurvePts = np.vstack((XSCurvePts,allPts)) if XSCurvePts.size else allPts - rws,cls = XSCurvePts.shape - ## Create longitudinal splines down the blade through each of the key X-section points - splineX = blade.geometry[XSCurvePts[0,:],0,0] - splineY = blade.geometry[XSCurvePts[0,:],1,0] - splineZ = blade.geometry[XSCurvePts[0,:],2,0] - for i in range(1,rws): - Xrow = blade.geometry[XSCurvePts[i,:],0,i] - splineX = np.vstack((splineX,Xrow.T)) - Yrow = blade.geometry[XSCurvePts[i,:],1,i] - splineY = np.vstack((splineY,Yrow.T)) - Zrow = blade.geometry[XSCurvePts[i,:],2,i] - splineZ = np.vstack((splineZ,Zrow.T)) - - spParam = np.transpose(np.linspace(0,1,rws)) + splineX = blade.geometry[XSCurvePts[0, :], 0, 0] + splineY = blade.geometry[XSCurvePts[0, :], 1, 0] + splineZ = blade.geometry[XSCurvePts[0, :], 2, 0] + for i in range(1, rws): + Xrow = blade.geometry[XSCurvePts[i, :], 0, i] + splineX = np.vstack((splineX, Xrow.T)) + Yrow = blade.geometry[XSCurvePts[i, :], 1, i] + splineY = np.vstack((splineY, Yrow.T)) + Zrow = blade.geometry[XSCurvePts[i, :], 2, i] + splineZ = np.vstack((splineZ, Zrow.T)) + + spParam = np.transpose(np.linspace(0, 1, rws)) nSpi = rws + 2 * (rws - 1) - spParami = np.transpose(np.linspace(0,1,nSpi)) - splineXi = interpolator_wrap(spParam,splineX[:,0],spParami,'pchip') - splineYi = interpolator_wrap(spParam,splineY[:,0],spParami,'pchip') - splineZi = interpolator_wrap(spParam,splineZ[:,0],spParami,'pchip') - for i in range(1,cls): - splineXi = np.vstack([splineXi,interpolator_wrap(spParam,splineX[:,i],spParami,'pchip')]) - splineYi = np.vstack([splineYi,interpolator_wrap(spParam,splineY[:,i],spParami,'pchip')]) - splineZi = np.vstack([splineZi,interpolator_wrap(spParam,splineZ[:,i],spParami,'pchip')]) + spParami = np.transpose(np.linspace(0, 1, nSpi)) + splineXi = interpolator_wrap(spParam, splineX[:, 0], spParami, "pchip") + splineYi = interpolator_wrap(spParam, splineY[:, 0], spParami, "pchip") + splineZi = interpolator_wrap(spParam, splineZ[:, 0], spParami, "pchip") + for i in range(1, cls): + splineXi = np.vstack( + [splineXi, interpolator_wrap(spParam, splineX[:, i], spParami, "pchip")] + ) + splineYi = np.vstack( + [splineYi, interpolator_wrap(spParam, splineY[:, i], spParami, "pchip")] + ) + splineZi = np.vstack( + [splineZi, interpolator_wrap(spParam, splineZ[:, i], spParami, "pchip")] + ) splineXi = splineXi.T splineYi = splineYi.T splineZi = splineZi.T ## Determine the first spanwise section that needs adhesive - if (includeAdhesive == 1): + if includeAdhesive == 1: stPt = 0 frstXS = 0 - while (frstXS == 0 and stPt < splineXi.shape[0]): - - v1x = splineXi[stPt,6] - splineXi[stPt,4] - v1y = splineYi[stPt,6] - splineYi[stPt,4] - v1z = splineZi[stPt,6] - splineZi[stPt,4] - v2x = splineXi[stPt,30] - splineXi[stPt,32] - v2y = splineYi[stPt,30] - splineYi[stPt,32] - v2z = splineZi[stPt,30] - splineZi[stPt,32] + while frstXS == 0 and stPt < splineXi.shape[0]: + v1x = splineXi[stPt, 6] - splineXi[stPt, 4] + v1y = splineYi[stPt, 6] - splineYi[stPt, 4] + v1z = splineZi[stPt, 6] - splineZi[stPt, 4] + v2x = splineXi[stPt, 30] - splineXi[stPt, 32] + v2y = splineYi[stPt, 30] - splineYi[stPt, 32] + v2z = splineZi[stPt, 30] - splineZi[stPt, 32] mag1 = np.sqrt(v1x * v1x + v1y * v1y + v1z * v1z) mag2 = np.sqrt(v2x * v2x + v2y * v2y + v2z * v2z) dp = (1 / (mag1 * mag2)) * (v1x * v2x + v1y * v2y + v1z * v2z) - if (dp > 0.7071): + if dp > 0.7071: frstXS = stPt stPt = stPt + 3 - if (frstXS == 0): + if frstXS == 0: frstXS = splineXi.shape[0] else: frstXS = splineXi.shape[0] - + ## Generate the mesh using the splines as surface guides bladeSurf = Surface() ## Outer shell sections secList = list() stPt = 0 for i in range(rws - 1): - if (stPt < frstXS): + if stPt < frstXS: stSec = 0 endSec = 11 stSp = 0 @@ -192,274 +198,407 @@ def shellMeshGeneral(blade, forSolid, includeAdhesive, elementSize): stSec = 1 endSec = 10 stSp = 3 - for j in range(stSec,endSec+1): - shellKp = np.array([ - [splineXi[stPt,stSp],splineYi[stPt,stSp],splineZi[stPt,stSp]], - [splineXi[stPt,stSp + 3],splineYi[stPt,stSp + 3],splineZi[stPt,stSp + 3]], - [splineXi[stPt + 3,stSp + 3],splineYi[stPt + 3,stSp + 3],splineZi[stPt + 3,stSp + 3]], - [splineXi[stPt + 3,stSp],splineYi[stPt + 3,stSp],splineZi[stPt + 3,stSp]], - [splineXi[stPt,stSp + 1],splineYi[stPt,stSp + 1],splineZi[stPt,stSp + 1]], - [splineXi[stPt,stSp + 2],splineYi[stPt,stSp + 2],splineZi[stPt,stSp + 2]], - [splineXi[stPt + 1,stSp + 3],splineYi[stPt + 1,stSp + 3],splineZi[stPt + 1,stSp + 3]], - [splineXi[stPt + 2,stSp + 3],splineYi[stPt + 2,stSp + 3],splineZi[stPt + 2,stSp + 3]], - [splineXi[stPt + 3,stSp + 2],splineYi[stPt + 3,stSp + 2],splineZi[stPt + 3,stSp + 2]], - [splineXi[stPt + 3,stSp + 1],splineYi[stPt + 3,stSp + 1],splineZi[stPt + 3,stSp + 1]], - [splineXi[stPt + 2,stSp],splineYi[stPt + 2,stSp],splineZi[stPt + 2,stSp]], - [splineXi[stPt + 1,stSp],splineYi[stPt + 1,stSp],splineZi[stPt + 1,stSp]], - [splineXi[stPt + 1,stSp + 1],splineYi[stPt + 1,stSp + 1],splineZi[stPt + 1,stSp + 1]], - [splineXi[stPt + 1,stSp + 2],splineYi[stPt + 1,stSp + 2],splineZi[stPt + 1,stSp + 2]], - [splineXi[stPt + 2,stSp + 2],splineYi[stPt + 2,stSp + 2],splineZi[stPt + 2,stSp + 2]], - [splineXi[stPt + 2,stSp + 1],splineYi[stPt + 2,stSp + 1],splineZi[stPt + 2,stSp + 1]] - ]) - vec = shellKp[1,:] - shellKp[0,:] + for j in range(stSec, endSec + 1): + shellKp = np.array( + [ + [splineXi[stPt, stSp], splineYi[stPt, stSp], splineZi[stPt, stSp]], + [ + splineXi[stPt, stSp + 3], + splineYi[stPt, stSp + 3], + splineZi[stPt, stSp + 3], + ], + [ + splineXi[stPt + 3, stSp + 3], + splineYi[stPt + 3, stSp + 3], + splineZi[stPt + 3, stSp + 3], + ], + [ + splineXi[stPt + 3, stSp], + splineYi[stPt + 3, stSp], + splineZi[stPt + 3, stSp], + ], + [ + splineXi[stPt, stSp + 1], + splineYi[stPt, stSp + 1], + splineZi[stPt, stSp + 1], + ], + [ + splineXi[stPt, stSp + 2], + splineYi[stPt, stSp + 2], + splineZi[stPt, stSp + 2], + ], + [ + splineXi[stPt + 1, stSp + 3], + splineYi[stPt + 1, stSp + 3], + splineZi[stPt + 1, stSp + 3], + ], + [ + splineXi[stPt + 2, stSp + 3], + splineYi[stPt + 2, stSp + 3], + splineZi[stPt + 2, stSp + 3], + ], + [ + splineXi[stPt + 3, stSp + 2], + splineYi[stPt + 3, stSp + 2], + splineZi[stPt + 3, stSp + 2], + ], + [ + splineXi[stPt + 3, stSp + 1], + splineYi[stPt + 3, stSp + 1], + splineZi[stPt + 3, stSp + 1], + ], + [ + splineXi[stPt + 2, stSp], + splineYi[stPt + 2, stSp], + splineZi[stPt + 2, stSp], + ], + [ + splineXi[stPt + 1, stSp], + splineYi[stPt + 1, stSp], + splineZi[stPt + 1, stSp], + ], + [ + splineXi[stPt + 1, stSp + 1], + splineYi[stPt + 1, stSp + 1], + splineZi[stPt + 1, stSp + 1], + ], + [ + splineXi[stPt + 1, stSp + 2], + splineYi[stPt + 1, stSp + 2], + splineZi[stPt + 1, stSp + 2], + ], + [ + splineXi[stPt + 2, stSp + 2], + splineYi[stPt + 2, stSp + 2], + splineZi[stPt + 2, stSp + 2], + ], + [ + splineXi[stPt + 2, stSp + 1], + splineYi[stPt + 2, stSp + 1], + splineZi[stPt + 2, stSp + 1], + ], + ] + ) + vec = shellKp[1, :] - shellKp[0, :] mag = np.linalg.norm(vec) - nEl = np.array([],dtype=int) - nEl = np.concatenate([nEl,[np.ceil(mag / elementSize).astype(int)]]) - vec = shellKp[2,:] - shellKp[1,:] + nEl = np.array([], dtype=int) + nEl = np.concatenate([nEl, [np.ceil(mag / elementSize).astype(int)]]) + vec = shellKp[2, :] - shellKp[1, :] mag = np.linalg.norm(vec) - nEl = np.concatenate([nEl,[np.ceil(mag / elementSize).astype(int)]]) - vec = shellKp[3,:] - shellKp[2,:] + nEl = np.concatenate([nEl, [np.ceil(mag / elementSize).astype(int)]]) + vec = shellKp[3, :] - shellKp[2, :] mag = np.linalg.norm(vec) - nEl = np.concatenate([nEl,[np.ceil(mag / elementSize).astype(int)]]) - vec = shellKp[0,:] - shellKp[3,:] + nEl = np.concatenate([nEl, [np.ceil(mag / elementSize).astype(int)]]) + vec = shellKp[0, :] - shellKp[3, :] mag = np.linalg.norm(vec) - nEl = np.concatenate([nEl,[np.ceil(mag / elementSize).astype(int)]]) - bladeSurf.addShellRegion('quad3',shellKp,nEl,name=blade.stacks[j,i].name,elType='quad',meshMethod='structured') + nEl = np.concatenate([nEl, [np.ceil(mag / elementSize).astype(int)]]) + bladeSurf.addShellRegion( + "quad3", + shellKp, + nEl, + name=blade.stacks[j, i].name, + elType="quad", + meshMethod="structured", + ) newSec = dict() - newSec['type'] = 'shell' + newSec["type"] = "shell" layup = list() - for pg in blade.stacks[j,i].plygroups: - totThick = pg.thickness*pg.nPlies - ply = [pg.materialid,totThick,pg.angle] + for pg in blade.stacks[j, i].plygroups: + totThick = pg.thickness * pg.nPlies + ply = [pg.materialid, totThick, pg.angle] layup.append(ply) - newSec['layup'] = layup - newSec['elementSet'] = blade.stacks[j,i].name + newSec["layup"] = layup + newSec["elementSet"] = blade.stacks[j, i].name secList.append(newSec) stSp = stSp + 3 stPt = stPt + 3 - + ## Shift the appropriate splines if the mesh is for a solid model seed - if (forSolid == 1): - caseIndex = np.array([ - [9,27,3], - [12,24,3], - [24,12,8], - [27,9,8]]) + if forSolid == 1: + caseIndex = np.array([[9, 27, 3], [12, 24, 3], [24, 12, 8], [27, 9, 8]]) for i in range(caseIndex.shape[0]): - spl = caseIndex[i,0] - tgtSp = caseIndex[i,1] - sec = caseIndex[i,2] + spl = caseIndex[i, 0] + tgtSp = caseIndex[i, 1] + sec = caseIndex[i, 2] stPt = 0 - for j in range(rws-1): + for j in range(rws - 1): totalThick = 0 for k in range(3): - tpp = 0.001 * blade.stacks[sec,j].plygroups[k].thickness - npls = blade.stacks[sec,j].plygroups[k].nPlies + tpp = 0.001 * blade.stacks[sec, j].plygroups[k].thickness + npls = blade.stacks[sec, j].plygroups[k].nPlies totalThick = totalThick + tpp * npls for k in range(3): - vx = splineXi[stPt,tgtSp] - splineXi[stPt,spl] - vy = splineYi[stPt,tgtSp] - splineYi[stPt,spl] - vz = splineZi[stPt,tgtSp] - splineZi[stPt,spl] + vx = splineXi[stPt, tgtSp] - splineXi[stPt, spl] + vy = splineYi[stPt, tgtSp] - splineYi[stPt, spl] + vz = splineZi[stPt, tgtSp] - splineZi[stPt, spl] magInv = 1 / np.sqrt(vx * vx + vy * vy + vz * vz) ux = magInv * vx uy = magInv * vy uz = magInv * vz - splineXi[stPt,spl] = splineXi[stPt,spl] + totalThick * ux - splineYi[stPt,spl] = splineYi[stPt,spl] + totalThick * uy - splineZi[stPt,spl] = splineZi[stPt,spl] + totalThick * uz + splineXi[stPt, spl] = splineXi[stPt, spl] + totalThick * ux + splineYi[stPt, spl] = splineYi[stPt, spl] + totalThick * uy + splineZi[stPt, spl] = splineZi[stPt, spl] + totalThick * uz stPt = stPt + 1 - + ## Shear web sections stPt = 0 web1Sets = np.array([]) web2Sets = np.array([]) - for i in range(rws-1): + for i in range(rws - 1): if blade.swstacks[0][i].plygroups: - shellKp = np.zeros((16,3)) - shellKp[0,:] = np.array([splineXi[stPt,12],splineYi[stPt,12],splineZi[stPt,12]]) - shellKp[1,:] = np.array([splineXi[stPt,24],splineYi[stPt,24],splineZi[stPt,24]]) - shellKp[2,:] = np.array([splineXi[stPt + 3,24],splineYi[stPt + 3,24],splineZi[stPt + 3,24]]) - shellKp[3,:] = np.array([splineXi[stPt + 3,12],splineYi[stPt + 3,12],splineZi[stPt + 3,12]]) - shellKp[6,:] = np.array([splineXi[stPt + 1,24],splineYi[stPt + 1,24],splineZi[stPt + 1,24]]) - shellKp[7,:] = np.array([splineXi[stPt + 2,24],splineYi[stPt + 2,24],splineZi[stPt + 2,24]]) - shellKp[10,:] = np.array([splineXi[stPt + 2,12],splineYi[stPt + 2,12],splineZi[stPt + 2,12]]) - shellKp[11,:] = np.array([splineXi[stPt + 1,12],splineYi[stPt + 1,12],splineZi[stPt + 1,12]]) - shellKp[4,:] = 0.6666 * shellKp[0,:] + 0.3333 * shellKp[1,:] - shellKp[5,:] = 0.3333 * shellKp[0,:] + 0.6666 * shellKp[1,:] - shellKp[8,:] = 0.6666 * shellKp[2,:] + 0.3333 * shellKp[3,:] - shellKp[9,:] = 0.3333 * shellKp[2,:] + 0.6666 * shellKp[3,:] - shellKp[12,:] = 0.6666 * shellKp[11,:] + 0.3333 * shellKp[6,:] - shellKp[13,:] = 0.3333 * shellKp[11,:] + 0.6666 * shellKp[6,:] - shellKp[14,:] = 0.6666 * shellKp[7,:] + 0.3333 * shellKp[10,:] - shellKp[15,:] = 0.3333 * shellKp[7,:] + 0.6666 * shellKp[10,:] - - vec = shellKp[1,:] - shellKp[0,:] + shellKp = np.zeros((16, 3)) + shellKp[0, :] = np.array( + [splineXi[stPt, 12], splineYi[stPt, 12], splineZi[stPt, 12]] + ) + shellKp[1, :] = np.array( + [splineXi[stPt, 24], splineYi[stPt, 24], splineZi[stPt, 24]] + ) + shellKp[2, :] = np.array( + [splineXi[stPt + 3, 24], splineYi[stPt + 3, 24], splineZi[stPt + 3, 24]] + ) + shellKp[3, :] = np.array( + [splineXi[stPt + 3, 12], splineYi[stPt + 3, 12], splineZi[stPt + 3, 12]] + ) + shellKp[6, :] = np.array( + [splineXi[stPt + 1, 24], splineYi[stPt + 1, 24], splineZi[stPt + 1, 24]] + ) + shellKp[7, :] = np.array( + [splineXi[stPt + 2, 24], splineYi[stPt + 2, 24], splineZi[stPt + 2, 24]] + ) + shellKp[10, :] = np.array( + [splineXi[stPt + 2, 12], splineYi[stPt + 2, 12], splineZi[stPt + 2, 12]] + ) + shellKp[11, :] = np.array( + [splineXi[stPt + 1, 12], splineYi[stPt + 1, 12], splineZi[stPt + 1, 12]] + ) + shellKp[4, :] = 0.6666 * shellKp[0, :] + 0.3333 * shellKp[1, :] + shellKp[5, :] = 0.3333 * shellKp[0, :] + 0.6666 * shellKp[1, :] + shellKp[8, :] = 0.6666 * shellKp[2, :] + 0.3333 * shellKp[3, :] + shellKp[9, :] = 0.3333 * shellKp[2, :] + 0.6666 * shellKp[3, :] + shellKp[12, :] = 0.6666 * shellKp[11, :] + 0.3333 * shellKp[6, :] + shellKp[13, :] = 0.3333 * shellKp[11, :] + 0.6666 * shellKp[6, :] + shellKp[14, :] = 0.6666 * shellKp[7, :] + 0.3333 * shellKp[10, :] + shellKp[15, :] = 0.3333 * shellKp[7, :] + 0.6666 * shellKp[10, :] + + vec = shellKp[1, :] - shellKp[0, :] mag = np.linalg.norm(vec) - nEl = np.array([],dtype=int) - nEl = np.concatenate([nEl,[np.ceil(mag / elementSize).astype(int)]]) - vec = shellKp[2,:] - shellKp[1,:] + nEl = np.array([], dtype=int) + nEl = np.concatenate([nEl, [np.ceil(mag / elementSize).astype(int)]]) + vec = shellKp[2, :] - shellKp[1, :] mag = np.linalg.norm(vec) - nEl = np.concatenate([nEl,[np.ceil(mag / elementSize).astype(int)]]) - vec = shellKp[3,:] - shellKp[2,:] + nEl = np.concatenate([nEl, [np.ceil(mag / elementSize).astype(int)]]) + vec = shellKp[3, :] - shellKp[2, :] mag = np.linalg.norm(vec) - nEl = np.concatenate([nEl,[np.ceil(mag / elementSize).astype(int)]]) - vec = shellKp[0,:] - shellKp[3,:] + nEl = np.concatenate([nEl, [np.ceil(mag / elementSize).astype(int)]]) + vec = shellKp[0, :] - shellKp[3, :] mag = np.linalg.norm(vec) - nEl = np.concatenate([nEl,[np.ceil(mag / elementSize).astype(int)]]) + nEl = np.concatenate([nEl, [np.ceil(mag / elementSize).astype(int)]]) - bladeSurf.addShellRegion('quad3',shellKp,nEl,name=blade.swstacks[0][i].name,elType='quad',meshMethod='structured') + bladeSurf.addShellRegion( + "quad3", + shellKp, + nEl, + name=blade.swstacks[0][i].name, + elType="quad", + meshMethod="structured", + ) newSec = dict() - newSec['type'] = 'shell' + newSec["type"] = "shell" layup = list() for pg in blade.swstacks[0][i].plygroups: - totThick = pg.thickness*pg.nPlies - ply = [pg.materialid,totThick,pg.angle] + totThick = pg.thickness * pg.nPlies + ply = [pg.materialid, totThick, pg.angle] layup.append(ply) - newSec['layup'] = layup - newSec['elementSet'] = blade.swstacks[0][i].name + newSec["layup"] = layup + newSec["elementSet"] = blade.swstacks[0][i].name secList.append(newSec) if blade.swstacks[1][i].plygroups: - shellKp = np.zeros((16,3)) - shellKp[0,:] = np.array([splineXi[stPt,27],splineYi[stPt,27],splineZi[stPt,27]]) - shellKp[1,:] = np.array([splineXi[stPt,9],splineYi[stPt,9],splineZi[stPt,9]]) - shellKp[2,:] = np.array([splineXi[stPt + 3,9],splineYi[stPt + 3,9],splineZi[stPt + 3,9]]) - shellKp[3,:] = np.array([splineXi[stPt + 3,27],splineYi[stPt + 3,27],splineZi[stPt + 3,27]]) - shellKp[6,:] = np.array([splineXi[stPt + 1,9],splineYi[stPt + 1,9],splineZi[stPt + 1,9]]) - shellKp[7,:] = np.array([splineXi[stPt + 2,9],splineYi[stPt + 2,9],splineZi[stPt + 2,9]]) - shellKp[10,:] = np.array([splineXi[stPt + 2,27],splineYi[stPt + 2,27],splineZi[stPt + 2,27]]) - shellKp[11,:] = np.array([splineXi[stPt + 1,27],splineYi[stPt + 1,27],splineZi[stPt + 1,27]]) - shellKp[4,:] = 0.6666 * shellKp[0,:] + 0.3333 * shellKp[1,:] - shellKp[5,:] = 0.3333 * shellKp[0,:] + 0.6666 * shellKp[1,:] - shellKp[8,:] = 0.6666 * shellKp[2,:] + 0.3333 * shellKp[3,:] - shellKp[9,:] = 0.3333 * shellKp[2,:] + 0.6666 * shellKp[3,:] - shellKp[12,:] = 0.6666 * shellKp[11,:] + 0.3333 * shellKp[6,:] - shellKp[13,:] = 0.3333 * shellKp[11,:] + 0.6666 * shellKp[6,:] - shellKp[14,:] = 0.6666 * shellKp[7,:] + 0.3333 * shellKp[10,:] - shellKp[15,:] = 0.3333 * shellKp[7,:] + 0.6666 * shellKp[10,:] - - vec = shellKp[1,:] - shellKp[0,:] + shellKp = np.zeros((16, 3)) + shellKp[0, :] = np.array( + [splineXi[stPt, 27], splineYi[stPt, 27], splineZi[stPt, 27]] + ) + shellKp[1, :] = np.array( + [splineXi[stPt, 9], splineYi[stPt, 9], splineZi[stPt, 9]] + ) + shellKp[2, :] = np.array( + [splineXi[stPt + 3, 9], splineYi[stPt + 3, 9], splineZi[stPt + 3, 9]] + ) + shellKp[3, :] = np.array( + [splineXi[stPt + 3, 27], splineYi[stPt + 3, 27], splineZi[stPt + 3, 27]] + ) + shellKp[6, :] = np.array( + [splineXi[stPt + 1, 9], splineYi[stPt + 1, 9], splineZi[stPt + 1, 9]] + ) + shellKp[7, :] = np.array( + [splineXi[stPt + 2, 9], splineYi[stPt + 2, 9], splineZi[stPt + 2, 9]] + ) + shellKp[10, :] = np.array( + [splineXi[stPt + 2, 27], splineYi[stPt + 2, 27], splineZi[stPt + 2, 27]] + ) + shellKp[11, :] = np.array( + [splineXi[stPt + 1, 27], splineYi[stPt + 1, 27], splineZi[stPt + 1, 27]] + ) + shellKp[4, :] = 0.6666 * shellKp[0, :] + 0.3333 * shellKp[1, :] + shellKp[5, :] = 0.3333 * shellKp[0, :] + 0.6666 * shellKp[1, :] + shellKp[8, :] = 0.6666 * shellKp[2, :] + 0.3333 * shellKp[3, :] + shellKp[9, :] = 0.3333 * shellKp[2, :] + 0.6666 * shellKp[3, :] + shellKp[12, :] = 0.6666 * shellKp[11, :] + 0.3333 * shellKp[6, :] + shellKp[13, :] = 0.3333 * shellKp[11, :] + 0.6666 * shellKp[6, :] + shellKp[14, :] = 0.6666 * shellKp[7, :] + 0.3333 * shellKp[10, :] + shellKp[15, :] = 0.3333 * shellKp[7, :] + 0.6666 * shellKp[10, :] + + vec = shellKp[1, :] - shellKp[0, :] mag = np.linalg.norm(vec) - nEl = np.array([],dtype=int) - nEl = np.concatenate([nEl,[np.ceil(mag / elementSize).astype(int)]]) - vec = shellKp[2,:] - shellKp[1,:] + nEl = np.array([], dtype=int) + nEl = np.concatenate([nEl, [np.ceil(mag / elementSize).astype(int)]]) + vec = shellKp[2, :] - shellKp[1, :] mag = np.linalg.norm(vec) - nEl = np.concatenate([nEl,[np.ceil(mag / elementSize).astype(int)]]) - vec = shellKp[3,:] - shellKp[2,:] + nEl = np.concatenate([nEl, [np.ceil(mag / elementSize).astype(int)]]) + vec = shellKp[3, :] - shellKp[2, :] mag = np.linalg.norm(vec) - nEl = np.concatenate([nEl,[np.ceil(mag / elementSize).astype(int)]]) - vec = shellKp[0,:] - shellKp[3,:] + nEl = np.concatenate([nEl, [np.ceil(mag / elementSize).astype(int)]]) + vec = shellKp[0, :] - shellKp[3, :] mag = np.linalg.norm(vec) - nEl = np.concatenate([nEl,[np.ceil(mag / elementSize).astype(int)]]) + nEl = np.concatenate([nEl, [np.ceil(mag / elementSize).astype(int)]]) - bladeSurf.addShellRegion('quad3',shellKp,nEl,name=blade.swstacks[1][i].name,elType='quad',meshMethod='structured') + bladeSurf.addShellRegion( + "quad3", + shellKp, + nEl, + name=blade.swstacks[1][i].name, + elType="quad", + meshMethod="structured", + ) newSec = dict() - newSec['type'] = 'shell' + newSec["type"] = "shell" layup = list() for pg in blade.swstacks[1][i].plygroups: - totThick = pg.thickness*pg.nPlies - ply = [pg.materialid,totThick,pg.angle] + totThick = pg.thickness * pg.nPlies + ply = [pg.materialid, totThick, pg.angle] layup.append(ply) - newSec['layup'] = layup - newSec['elementSet'] = blade.swstacks[1][i].name + newSec["layup"] = layup + newSec["elementSet"] = blade.swstacks[1][i].name secList.append(newSec) stPt = stPt + 3 - + ## Generate Shell mesh - + shellData = bladeSurf.getSurfaceMesh() - shellData['sections'] = secList - + shellData["sections"] = secList + ## Generate mesh for trailing edge adhesive if requested - if (includeAdhesive == 1): + if includeAdhesive == 1: stPt = frstXS - v1x = splineXi[stPt,6] - splineXi[stPt,4] - v1y = splineYi[stPt,6] - splineYi[stPt,4] - v1z = splineZi[stPt,6] - splineZi[stPt,4] + v1x = splineXi[stPt, 6] - splineXi[stPt, 4] + v1y = splineYi[stPt, 6] - splineYi[stPt, 4] + v1z = splineZi[stPt, 6] - splineZi[stPt, 4] mag1 = np.sqrt(v1x * v1x + v1y * v1y + v1z * v1z) - v2x = splineXi[stPt,30] - splineXi[stPt,32] - v2y = splineYi[stPt,30] - splineYi[stPt,32] - v2z = splineZi[stPt,30] - splineZi[stPt,32] + v2x = splineXi[stPt, 30] - splineXi[stPt, 32] + v2y = splineYi[stPt, 30] - splineYi[stPt, 32] + v2z = splineZi[stPt, 30] - splineZi[stPt, 32] mag2 = np.sqrt(v2x * v2x + v2y * v2y + v2z * v2z) - v3x = splineXi[stPt,6] - splineXi[stPt,30] - v3y = splineYi[stPt,6] - splineYi[stPt,30] - v3z = splineZi[stPt,6] - splineZi[stPt,30] + v3x = splineXi[stPt, 6] - splineXi[stPt, 30] + v3y = splineYi[stPt, 6] - splineYi[stPt, 30] + v3z = splineZi[stPt, 6] - splineZi[stPt, 30] mag3 = np.sqrt(v3x * v3x + v3y * v3y + v3z * v3z) - v4x = splineXi[stPt,4] - splineXi[stPt,32] - v4y = splineYi[stPt,4] - splineYi[stPt,32] - v4z = splineZi[stPt,4] - splineZi[stPt,32] + v4x = splineXi[stPt, 4] - splineXi[stPt, 32] + v4y = splineYi[stPt, 4] - splineYi[stPt, 32] + v4z = splineZi[stPt, 4] - splineZi[stPt, 32] mag4 = np.sqrt(v4x * v4x + v4y * v4y + v4z * v4z) nE1 = np.ceil(mag1 / elementSize).astype(int) nE2 = np.ceil(mag3 / elementSize).astype(int) nE3 = np.ceil(mag2 / elementSize).astype(int) nE4 = np.ceil(mag4 / elementSize).astype(int) - nEl = np.array([nE1,nE2,nE3,nE4]) + nEl = np.array([nE1, nE2, nE3, nE4]) gdLayer = 0 sweepElements = [] guideNds = [] - while (stPt < splineXi.shape[0]): - shellKp = np.zeros((9,3)) - shellKp[0,:] = np.array([splineXi[stPt,4],splineYi[stPt,4],splineZi[stPt,4]]) - shellKp[1,:] = np.array([splineXi[stPt,6],splineYi[stPt,6],splineZi[stPt,6]]) - shellKp[2,:] = np.array([splineXi[stPt,30],splineYi[stPt,30],splineZi[stPt,30]]) - shellKp[3,:] = np.array([splineXi[stPt,32],splineYi[stPt,32],splineZi[stPt,32]]) - shellKp[4,:] = np.array([splineXi[stPt,5],splineYi[stPt,5],splineZi[stPt,5]]) - shellKp[5,:] = 0.5 * shellKp[1,:] + 0.5 * shellKp[2,:] - shellKp[6,:] = np.array([splineXi[stPt,31],splineYi[stPt,31],splineZi[stPt,31]]) - shellKp[7,:] = 0.5 * shellKp[0,:] + 0.5 * shellKp[3,:] - shellKp[8,:] = 0.5 * shellKp[4,:] + 0.5 * shellKp[6,:] - sReg = ShellRegion('quad2',shellKp,nEl,elType='quad',meshMethod='free') + while stPt < splineXi.shape[0]: + shellKp = np.zeros((9, 3)) + shellKp[0, :] = np.array( + [splineXi[stPt, 4], splineYi[stPt, 4], splineZi[stPt, 4]] + ) + shellKp[1, :] = np.array( + [splineXi[stPt, 6], splineYi[stPt, 6], splineZi[stPt, 6]] + ) + shellKp[2, :] = np.array( + [splineXi[stPt, 30], splineYi[stPt, 30], splineZi[stPt, 30]] + ) + shellKp[3, :] = np.array( + [splineXi[stPt, 32], splineYi[stPt, 32], splineZi[stPt, 32]] + ) + shellKp[4, :] = np.array( + [splineXi[stPt, 5], splineYi[stPt, 5], splineZi[stPt, 5]] + ) + shellKp[5, :] = 0.5 * shellKp[1, :] + 0.5 * shellKp[2, :] + shellKp[6, :] = np.array( + [splineXi[stPt, 31], splineYi[stPt, 31], splineZi[stPt, 31]] + ) + shellKp[7, :] = 0.5 * shellKp[0, :] + 0.5 * shellKp[3, :] + shellKp[8, :] = 0.5 * shellKp[4, :] + 0.5 * shellKp[6, :] + sReg = ShellRegion("quad2", shellKp, nEl, elType="quad", meshMethod="free") regMesh = sReg.createShellMesh() - - if (stPt == frstXS): - adhesMesh = Mesh3D(regMesh['nodes'],regMesh['elements']) + + if stPt == frstXS: + adhesMesh = Mesh3D(regMesh["nodes"], regMesh["elements"]) else: - guideNds.append(regMesh['nodes']) - layerSwEl = np.ceil((splineZi[stPt,4] - splineZi[(stPt-3),4]) / elementSize).astype(int) + guideNds.append(regMesh["nodes"]) + layerSwEl = np.ceil( + (splineZi[stPt, 4] - splineZi[(stPt - 3), 4]) / elementSize + ).astype(int) sweepElements.append(layerSwEl) stPt = stPt + 3 - adMeshData = adhesMesh.createSweptMesh('toDestNodes',sweepElements,destNodes=guideNds,interpMethod='smooth') - shellData['adhesiveEls'] = adMeshData['elements'] - shellData['adhesiveNds'] = adMeshData['nodes'] - adEls = len(adMeshData['elements']) + adMeshData = adhesMesh.createSweptMesh( + "toDestNodes", sweepElements, destNodes=guideNds, interpMethod="smooth" + ) + shellData["adhesiveEls"] = adMeshData["elements"] + shellData["adhesiveNds"] = adMeshData["nodes"] + adEls = len(adMeshData["elements"]) adhesSet = dict() - adhesSet['name'] = 'ahesiveElements' - labList = list(range(0,adEls)) - adhesSet['labels'] = labList - shellData['adhesiveElSet'] = adhesSet - + adhesSet["name"] = "ahesiveElements" + labList = list(range(0, adEls)) + adhesSet["labels"] = labList + shellData["adhesiveElSet"] = adhesSet + return shellData - - -def generateShellModel(blade, feaCode, includeAdhesive, meshData=None): + + +def generateShellModel(blade, feaCode, includeAdhesive, meshData=None): # This method generates a shell FEA model in one of the supported FEA codes; w/ or w/o adhesieve - - if str(feaCode.lower()) == str('ansys'): - ansysPath = pynumad.path_data['ansysPath'] + + if str(feaCode.lower()) == str("ansys"): + ansysPath = pynumad.path_data["ansysPath"] # define ANSYS model settings (can be options in generateFEA) config = {} - config["BoundaryCondition"] = 'cantilevered' - config["elementType"] = '181' - config["MultipleLayerBehavior"] = 'multiply' + config["BoundaryCondition"] = "cantilevered" + config["elementType"] = "181" + config["MultipleLayerBehavior"] = "multiply" config["dbgen"] = 1 - config["dbname"] = 'master' + config["dbname"] = "master" # Generate a mesh using shell elements - APDLname = 'buildAnsysShell.src' - ansys_product = 'ANSYS' - blade.paths['job'] = getcwd() - filename = join(blade.paths['job'],APDLname) + APDLname = "buildAnsysShell.src" + ansys_product = "ANSYS" + blade.paths["job"] = getcwd() + filename = join(blade.paths["job"], APDLname) if not meshData: forSolid = 0 - meshData = shellMeshGeneral(blade, forSolid,includeAdhesive) + meshData = shellMeshGeneral(blade, forSolid, includeAdhesive) - writeAnsysShellModel(blade,filename,meshData,config) + writeAnsysShellModel(blade, filename, meshData, config) if config["dbgen"]: - assert not len(ansysPath)==0, 'Path to ANSYS not specified. Aborting. Operation Not Permitted' + assert ( + not len(ansysPath) == 0 + ), "Path to ANSYS not specified. Aborting. Operation Not Permitted" try: - #tcl: exec "$ANSYS_path" -b -p $AnsysProductVariable -I shell7.src -o output.txt - ansys_call = '"%s" -b -p %s -I %s -o output.txt' % (ansysPath,ansys_product,APDLname) + # tcl: exec "$ANSYS_path" -b -p $AnsysProductVariable -I shell7.src -o output.txt + ansys_call = '"%s" -b -p %s -I %s -o output.txt' % ( + ansysPath, + ansys_product, + APDLname, + ) process = subprocess.run(ansys_call, shell=True) # if status==0: # # dos command completed successfully; log written to output.txt @@ -476,117 +615,122 @@ def generateShellModel(blade, feaCode, includeAdhesive, meshData=None): finally: pass else: - raise Exception('FEA code "%s" not supported.',feaCode) - + raise Exception('FEA code "%s" not supported.', feaCode) + return meshData def solidMeshFromShell(blade, shellMesh, layerNumEls=[]): - shNodes = shellMesh['nodes'] - shElements = shellMesh['elements'] - elSets = shellMesh['sets']['element'] - sectns = shellMesh['sections'] - + shNodes = shellMesh["nodes"] + shElements = shellMesh["elements"] + elSets = shellMesh["sets"]["element"] + sectns = shellMesh["sections"] + ## Initialize 3D solid mesh from the shell mesh - bladeMesh = Mesh3D(shNodes,shElements) + bladeMesh = Mesh3D(shNodes, shElements) ## Calculate unit normal vectors for all nodes numNds = len(shNodes) numShEls = len(shElements) - nodeNorms = np.zeros((numNds,3)) - for i in range(0,len(shElements)): - n1 = shElements[i,0] - n2 = shElements[i,1] - n3 = shElements[i,2] - n4 = shElements[i,3] - if (n4 == -1): - v1 = shNodes[n3,:] - shNodes[n1,:] - v2 = shNodes[n2,:] - shNodes[n1,:] + nodeNorms = np.zeros((numNds, 3)) + for i in range(0, len(shElements)): + n1 = shElements[i, 0] + n2 = shElements[i, 1] + n3 = shElements[i, 2] + n4 = shElements[i, 3] + if n4 == -1: + v1 = shNodes[n3, :] - shNodes[n1, :] + v2 = shNodes[n2, :] - shNodes[n1, :] else: - v1 = shNodes[n4,:] - shNodes[n2,:] - v2 = shNodes[n3,:] - shNodes[n1,:] + v1 = shNodes[n4, :] - shNodes[n2, :] + v2 = shNodes[n3, :] - shNodes[n1, :] v3x = v1[1] * v2[2] - v1[2] * v2[1] v3y = v1[2] * v2[0] - v1[0] * v2[2] v3z = v1[0] * v2[1] - v1[1] * v2[0] - v3 = np.array([v3x,v3y,v3z]) + v3 = np.array([v3x, v3y, v3z]) mag = np.linalg.norm(v3) - uNorm = (1.0/mag)*v3 + uNorm = (1.0 / mag) * v3 for j in range(4): - nj = shElements[i,j] - if (nj != -1): - nodeNorms[nj,:] = nodeNorms[nj,:] + uNorm - + nj = shElements[i, j] + if nj != -1: + nodeNorms[nj, :] = nodeNorms[nj, :] + uNorm + for i in range(numNds): mag = np.linalg.norm(nodeNorms[i]) - nodeNorms[i] = (1.0/mag)*nodeNorms[i] - + nodeNorms[i] = (1.0 / mag) * nodeNorms[i] + ## Extrude shell mesh into solid mesh - if (len(layerNumEls)==0): - layerNumEls = np.array([1,1,1]) - + if len(layerNumEls) == 0: + layerNumEls = np.array([1, 1, 1]) + prevLayer = shNodes.copy() guideNds = list() - for i in range(0,len(layerNumEls)): + for i in range(0, len(layerNumEls)): nodeDist = np.zeros(numNds) - nodeHitCt = np.zeros(numNds,dtype=int) - numSec,numStat = blade.stacks.shape + nodeHitCt = np.zeros(numNds, dtype=int) + numSec, numStat = blade.stacks.shape j = 0 for es in elSets: - layerThick = 0.001*sectns[j]['layup'][i][1] - for el in es['labels']: + layerThick = 0.001 * sectns[j]["layup"][i][1] + for el in es["labels"]: for nd in shElements[el]: - if(nd != -1): + if nd != -1: nodeDist[nd] = nodeDist[nd] + layerThick nodeHitCt[nd] = nodeHitCt[nd] + 1 j = j + 1 - newLayer = np.zeros((numNds,3)) - for j in range(0,numNds): - if(nodeHitCt[j] != 0): - nodeDist[j] = nodeDist[j]/nodeHitCt[j] - newLayer[j] = prevLayer[j] + nodeDist[j]*nodeNorms[j] - + newLayer = np.zeros((numNds, 3)) + for j in range(0, numNds): + if nodeHitCt[j] != 0: + nodeDist[j] = nodeDist[j] / nodeHitCt[j] + newLayer[j] = prevLayer[j] + nodeDist[j] * nodeNorms[j] + ## - # print('layer ' + str(i)) + # print('layer ' + str(i)) # for deb in range(0,10): - # print(newLayer[deb]) + # print(newLayer[deb]) ## guideNds.append(newLayer) prevLayer = newLayer.copy() - - solidMesh = bladeMesh.createSweptMesh(sweepMethod='toDestNodes',sweepElements=layerNumEls,destNodes=guideNds,interpMethod='linear') - + + solidMesh = bladeMesh.createSweptMesh( + sweepMethod="toDestNodes", + sweepElements=layerNumEls, + destNodes=guideNds, + interpMethod="linear", + ) + ## Construct the element set list, extrapolated from the shell model newSetList = list() newSectList = list() esi = 0 for es in elSets: - elArray = np.array(es['labels']) + elArray = np.array(es["labels"]) elLayer = 0 li = 1 for lne in layerNumEls: newSet = dict() - newSet['name'] = es['name'] + 'layer_' + str(li) + newSet["name"] = es["name"] + "layer_" + str(li) newLabels = list() - for i in range(0,lne): - newLabels.extend(elArray + numShEls*elLayer) + for i in range(0, lne): + newLabels.extend(elArray + numShEls * elLayer) elLayer = elLayer + 1 - newSet['labels'] = newLabels + newSet["labels"] = newLabels newSetList.append(newSet) newSec = dict() - newSec['type'] = 'solid' - newSec['elementSet'] = newSet['name'] - newSec['material'] = sectns[esi]['layup'][li-1][0] + newSec["type"] = "solid" + newSec["elementSet"] = newSet["name"] + newSec["material"] = sectns[esi]["layup"][li - 1][0] newSectList.append(newSec) li = li + 1 esi = esi + 1 - - solidMesh['sets'] = dict() - solidMesh['sets']['element'] = newSetList - solidMesh['sections'] = newSectList - - solidMesh['adhesiveNds'] = shellMesh['adhesiveNds'] - solidMesh['adhesiveEls'] = shellMesh['adhesiveEls'] - solidMesh['adhesiveElSet'] = shellMesh['adhesiveElSet'] - + + solidMesh["sets"] = dict() + solidMesh["sets"]["element"] = newSetList + solidMesh["sections"] = newSectList + + solidMesh["adhesiveNds"] = shellMesh["adhesiveNds"] + solidMesh["adhesiveEls"] = shellMesh["adhesiveEls"] + solidMesh["adhesiveElSet"] = shellMesh["adhesiveElSet"] + return solidMesh @@ -595,11 +739,12 @@ def getSolidMesh(blade, layerNumEls, elementSize): blade.editStacksForSolidMesh() ## Create shell mesh as seed ## Note the new output structure of shellMeshGeneral, as a single python dictionary -E Anderson - shellMesh = shellMeshGeneral(blade,1,1,elementSize) - print('finished shell mesh') - solidMesh = solidMeshFromShell(blade,shellMesh,layerNumEls) + shellMesh = shellMeshGeneral(blade, 1, 1, elementSize) + print("finished shell mesh") + solidMesh = solidMeshFromShell(blade, shellMesh, layerNumEls) return solidMesh -def getShellMesh(blade, includeAdhesive, elementSize): - meshData = shellMeshGeneral(blade,0,includeAdhesive, elementSize) - return meshData \ No newline at end of file + +def getShellMesh(blade, includeAdhesive, elementSize): + meshData = shellMeshGeneral(blade, 0, includeAdhesive, elementSize) + return meshData diff --git a/src/pynumad/shell/shell_region.py b/src/pynumad/shell/shell_region.py new file mode 100644 index 0000000..d5e05e8 --- /dev/null +++ b/src/pynumad/shell/shell_region.py @@ -0,0 +1,623 @@ +from scipy import interpolate +from pynumad.shell.segment2d import * +from pynumad.shell.boundary2d import * +from pynumad.shell.mesh2d import * +import pynumad.shell.mesh_tools as mt +import numpy as np + +class ShellRegion: + """ + Attributes + ----------- + type : str + keyPts : list + edgeEls : list + """ + + def __init__( + self, + regType, + keyPoints, + numEdgeEls, + natSpaceCrd=[], + elType="quad", + meshMethod="free", + ): + self.regType = regType + self.keyPts = np.array(keyPoints) + self.edgeEls = numEdgeEls + if len(natSpaceCrd) == 0: + if regType == "quad1": + self.natSpaceCrd = np.array( + [[-1.0, -1.0], [1.0, -1.0], [1.0, 1.0], [-1.0, 1.0]] + ) + elif regType == "quad2": + self.natSpaceCrd = np.array( + [ + [-1.0, -1.0], + [1.0, -1.0], + [1.0, 1.0], + [-1.0, 1.0], + [0.0, -1.0], + [1.0, 0.0], + [0.0, 1.0], + [-1.0, 0.0], + [0.0, 0.0], + ] + ) + elif regType == "quad3": + r3 = 1.0 / 3.0 + self.natSpaceCrd = np.array( + [ + [-1.0, -1.0], + [1.0, -1.0], + [1.0, 1.0], + [-1.0, 1.0], + [-r3, -1.0], + [r3, -1.0], + [1.0, -r3], + [1.0, r3], + [r3, 1.0], + [-r3, 1.0], + [-1.0, r3], + [-1.0, -r3], + [-r3, -r3], + [r3, -r3], + [r3, r3], + [-r3, r3], + ] + ) + else: + self.natSpaceCrd = np.array(natSpaceCrd) + self.elType = elType + self.meshMethod = meshMethod + + def createShellMesh(self): + """Object data modified: none + Parameters + ---------- + elType + method : str + + Returns + ------- + nodes + elements + """ + if self.meshMethod == "structured": + if "quad" in self.regType: + ee = self.edgeEls + if ee[0] >= ee[2]: + xNodes = ee[0] + 1 + else: + xNodes = ee[2] + 1 + if ee[1] >= ee[3]: + yNodes = ee[1] + 1 + else: + yNodes = ee[3] + 1 + totNds = xNodes * yNodes + seg = Segment2D("line", [[-1.0, -1.0], [1.0, -1.0]], (xNodes - 1)) + bnd = seg.getNodesEdges() + mesh = Mesh2D(bnd["nodes"], bnd["edges"]) + mData = mesh.createSweptMesh( + "inDirection", (yNodes - 1), sweepDistance=2.0, axis=[0.0, 1.0] + ) + + moved = False + if self.edgeEls[0] < self.edgeEls[2]: + seg = Segment2D( + "line", [[-1.0, -1.0], [1.0, -1.0]], self.edgeEls[0] + ) + bnd = seg.getNodesEdges() + segNds = bnd["nodes"] + meshNds = mData["nodes"] + for ndi in range(0, xNodes): + minDist = 2.0 + for sN in segNds: + vec = meshNds[ndi] - sN + dist = np.linalg.norm(vec) + if dist < minDist: + minDist = dist + minPt = sN + meshNds[ndi] = minPt + mData["nodes"] = meshNds + moved = True + elif self.edgeEls[2] < self.edgeEls[0]: + seg = Segment2D("line", [[-1.0, 1.0], [1.0, 1.0]], self.edgeEls[2]) + bnd = seg.getNodesEdges() + segNds = bnd["nodes"] + meshNds = mData["nodes"] + for ndi in range((totNds - xNodes), totNds): + minDist = 2.0 + for sN in segNds: + vec = meshNds[ndi] - sN + dist = np.linalg.norm(vec) + if dist < minDist: + minDist = dist + minPt = sN + meshNds[ndi] = minPt + mData["nodes"] = meshNds + moved = True + if self.edgeEls[1] < self.edgeEls[3]: + seg = Segment2D("line", [[1.0, -1.0], [1.0, 1.0]], self.edgeEls[1]) + bnd = seg.getNodesEdges() + segNds = bnd["nodes"] + meshNds = mData["nodes"] + for ndi in range((xNodes - 1), totNds, xNodes): + minDist = 2.0 + for sN in segNds: + vec = meshNds[ndi] - sN + dist = np.linalg.norm(vec) + if dist < minDist: + minDist = dist + minPt = sN + meshNds[ndi] = minPt + mData["nodes"] = meshNds + moved = True + elif self.edgeEls[3] < self.edgeEls[1]: + seg = Segment2D( + "line", [[-1.0, 1.0], [-1.0, -1.0]], self.edgeEls[3] + ) + bnd = seg.getNodesEdges() + segNds = bnd["nodes"] + meshNds = mData["nodes"] + for ndi in range(0, totNds, xNodes): + minDist = 2.0 + for sN in segNds: + vec = meshNds[ndi] - sN + dist = np.linalg.norm(vec) + if dist < minDist: + minDist = dist + minPt = sN + meshNds[ndi] = minPt + mData["nodes"] = meshNds + moved = True + + if moved: + mData = mt.mergeDuplicateNodes(mData) + elLst = mData["elements"] + ndLst = mData["nodes"] + for eli in range(0, len(elLst)): + srted = np.sort(elLst[eli]) + for i in range(0, 3): + if srted[i + 1] == srted[i]: + srted[i + 1] = srted[3] + srted[3] = -1 + elLst[eli] = srted + if elLst[eli, 3] == -1: + n1 = elLst[eli, 0] + n2 = elLst[eli, 1] + n3 = elLst[eli, 2] + v1 = ndLst[n2] - ndLst[n1] + v2 = ndLst[n3] - ndLst[n1] + k = v1[0] * v2[1] - v1[1] * v2[0] + if k < 0.0: + elLst[eli, 1] = n3 + elLst[eli, 2] = n2 + else: + elLst = mData["elements"] + ndLst = mData["nodes"] + + XYZ = self.XYZCoord(ndLst) + + mData["nodes"] = XYZ + mData["elements"] = elLst + return mData + + else: + raise Exception( + "Only quadrilateral shell regions can use the structured meshing option" + ) + + else: + bndData = self.initialBoundary() + mesh = Mesh2D(bndData["nodes"], bndData["elements"]) + mData = mesh.createUnstructuredMesh(self.elType) + XYZ = self.XYZCoord(mData["nodes"]) + mData["nodes"] = XYZ + return mData + + def initialBoundary(self): + """Object data modified: none + Parameters + ---------- + + Returns + ------- + nodes + edges + """ + if "quad" in self.regType: + bnd = Boundary2D() + bnd.addSegment("line", [[-1.0, -1.0], [1.0, -1.0]], self.edgeEls[0]) + bnd.addSegment("line", [[1.0, -1.0], [1.0, 1.0]], self.edgeEls[1]) + bnd.addSegment("line", [[1.0, 1.0], [-1.0, 1.0]], self.edgeEls[2]) + bnd.addSegment("line", [[-1.0, 1.0], [-1.0, -1.0]], self.edgeEls[3]) + bData = bnd.getBoundaryMesh() + return bData + elif "tri" in self.regType: + bnd = Boundary2D() + bnd.addSegment("line", [[0.0, 0.0], [1.0, 0.0]], self.edgeEls[0]) + bnd.addSegment("line", [[1.0, 0.0], [0.0, 1.0]], self.edgeEls[1]) + bnd.addSegment("line", [[0.0, 1.0], [0.0, 0.0]], self.edgeEls[2]) + bData = bnd.getBoundaryMesh() + return bData + elif "sphere" in self.regType: + pi_2 = 0.5 * np.pi + bnd = Boundary2D() + bnd.addSegment( + "arc", [[pi_2, 0.0], [-pi_2, 0.0], [pi_2, 0.0]], self.edgeEls[0] + ) + bData = bnd.getBoundaryMesh() + return bData + + def XYZCoord(self, eta): + """ + Parameters + ---------- + eta + + Returns + ------- + XYZ + """ + # if('1' in self.regType): + # xCrd = interpolate.griddata(self.natSpaceCrd,self.keyPts[:,0],eta,method='linear') + # yCrd = interpolate.griddata(self.natSpaceCrd,self.keyPts[:,1],eta,method='linear') + # zCrd = interpolate.griddata(self.natSpaceCrd,self.keyPts[:,2],eta,method='linear') + # return np.transpose(np.array([xCrd,yCrd,zCrd])) + # elif('2' in self.regType or '3' in self.regType): + # xCrd = interpolate.griddata(self.natSpaceCrd,self.keyPts[:,0],eta,method='cubic') + # yCrd = interpolate.griddata(self.natSpaceCrd,self.keyPts[:,1],eta,method='cubic') + # zCrd = interpolate.griddata(self.natSpaceCrd,self.keyPts[:,2],eta,method='cubic') + # return np.transpose(np.array([xCrd,yCrd,zCrd])) + numPts = len(eta) + if "quad1" == self.regType: + # Nvec = np.zeros((1,4)) + Nmat = np.zeros((numPts, 4)) + for i in range(0, numPts): + Nmat[i, 0] = 0.25 * (eta[i, 0] - 1.0) * (eta[i, 1] - 1.0) + Nmat[i, 1] = -0.25 * (eta[i, 0] + 1.0) * (eta[i, 1] - 1.0) + Nmat[i, 2] = 0.25 * (eta[0] + 1.0) * (eta[i, 1] + 1.0) + Nmat[i, 3] = -0.25 * (eta[0] - 1.0) * (eta[i, 1] + 1.0) + XYZ = np.matmul(Nmat, self.keyPts) + elif "quad2" == self.regType: + r1 = -1 + r2 = 0 + r3 = 1 + Nmat = np.zeros((numPts, 9)) + for i in range(0, numPts): + Nmat[i, 0] = ( + 0.25 + * (eta[i, 0] - r2) + * (eta[i, 0] - r3) + * (eta[i, 1] - r2) + * (eta[i, 1] - r3) + ) + Nmat[i, 1] = ( + 0.25 + * (eta[i, 0] - r1) + * (eta[i, 0] - r2) + * (eta[i, 1] - r2) + * (eta[i, 1] - r3) + ) + Nmat[i, 2] = ( + 0.25 + * (eta[i, 0] - r1) + * (eta[i, 0] - r2) + * (eta[i, 1] - r1) + * (eta[i, 1] - r2) + ) + Nmat[i, 3] = ( + 0.25 + * (eta[i, 0] - r2) + * (eta[i, 0] - r3) + * (eta[i, 1] - r1) + * (eta[i, 1] - r2) + ) + Nmat[i, 4] = ( + -0.5 + * (eta[i, 0] - r1) + * (eta[i, 0] - r3) + * (eta[i, 1] - r2) + * (eta[i, 1] - r3) + ) + Nmat[i, 5] = ( + -0.5 + * (eta[i, 0] - r1) + * (eta[i, 0] - r2) + * (eta[i, 1] - r1) + * (eta[i, 1] - r3) + ) + Nmat[i, 6] = ( + -0.5 + * (eta[i, 0] - r1) + * (eta[i, 0] - r3) + * (eta[i, 1] - r1) + * (eta[i, 1] - r2) + ) + Nmat[i, 7] = ( + -0.5 + * (eta[i, 0] - r2) + * (eta[i, 0] - r3) + * (eta[i, 1] - r1) + * (eta[i, 1] - r3) + ) + Nmat[i, 8] = ( + (eta[i, 0] - r1) + * (eta[i, 0] - r3) + * (eta[i, 1] - r1) + * (eta[i, 1] - r3) + ) + XYZ = np.matmul(Nmat, self.keyPts) + elif "quad3" == self.regType: + r1 = -1 + r2 = -0.333333333333333 + r3 = 0.333333333333333 + r4 = 1 + coef = np.array( + [ + 0.31640625, + -0.31640625, + 0.31640625, + -0.31640625, + -0.94921875, + 0.94921875, + 0.94921875, + -0.94921875, + -0.94921875, + 0.94921875, + 0.94921875, + -0.94921875, + 2.84765625, + -2.84765625, + 2.84765625, + -2.84765625, + ] + ) + Nmat = np.zeros((numPts, 16)) + for i in range(0, numPts): + Nmat[i, 0] = ( + coef[0] + * (eta[i, 0] - r2) + * (eta[i, 0] - r3) + * (eta[i, 0] - r4) + * (eta[i, 1] - r2) + * (eta[i, 1] - r3) + * (eta[i, 1] - r4) + ) + Nmat[i, 1] = ( + coef[1] + * (eta[i, 0] - r1) + * (eta[i, 0] - r2) + * (eta[i, 0] - r3) + * (eta[i, 1] - r2) + * (eta[i, 1] - r3) + * (eta[i, 1] - r4) + ) + Nmat[i, 2] = ( + coef[2] + * (eta[i, 0] - r1) + * (eta[i, 0] - r2) + * (eta[i, 0] - r3) + * (eta[i, 1] - r1) + * (eta[i, 1] - r2) + * (eta[i, 1] - r3) + ) + Nmat[i, 3] = ( + coef[3] + * (eta[i, 0] - r2) + * (eta[i, 0] - r3) + * (eta[i, 0] - r4) + * (eta[i, 1] - r1) + * (eta[i, 1] - r2) + * (eta[i, 1] - r3) + ) + Nmat[i, 4] = ( + coef[4] + * (eta[i, 0] - r1) + * (eta[i, 0] - r3) + * (eta[i, 0] - r4) + * (eta[i, 1] - r2) + * (eta[i, 1] - r3) + * (eta[i, 1] - r4) + ) + Nmat[i, 5] = ( + coef[5] + * (eta[i, 0] - r1) + * (eta[i, 0] - r2) + * (eta[i, 0] - r4) + * (eta[i, 1] - r2) + * (eta[i, 1] - r3) + * (eta[i, 1] - r4) + ) + Nmat[i, 6] = ( + coef[6] + * (eta[i, 0] - r1) + * (eta[i, 0] - r2) + * (eta[i, 0] - r3) + * (eta[i, 1] - r1) + * (eta[i, 1] - r3) + * (eta[i, 1] - r4) + ) + Nmat[i, 7] = ( + coef[7] + * (eta[i, 0] - r1) + * (eta[i, 0] - r2) + * (eta[i, 0] - r3) + * (eta[i, 1] - r1) + * (eta[i, 1] - r2) + * (eta[i, 1] - r4) + ) + Nmat[i, 8] = ( + coef[8] + * (eta[i, 0] - r1) + * (eta[i, 0] - r2) + * (eta[i, 0] - r4) + * (eta[i, 1] - r1) + * (eta[i, 1] - r2) + * (eta[i, 1] - r3) + ) + Nmat[i, 9] = ( + coef[9] + * (eta[i, 0] - r1) + * (eta[i, 0] - r3) + * (eta[i, 0] - r4) + * (eta[i, 1] - r1) + * (eta[i, 1] - r2) + * (eta[i, 1] - r3) + ) + Nmat[i, 10] = ( + coef[10] + * (eta[i, 0] - r2) + * (eta[i, 0] - r3) + * (eta[i, 0] - r4) + * (eta[i, 1] - r1) + * (eta[i, 1] - r2) + * (eta[i, 1] - r4) + ) + Nmat[i, 11] = ( + coef[11] + * (eta[i, 0] - r2) + * (eta[i, 0] - r3) + * (eta[i, 0] - r4) + * (eta[i, 1] - r1) + * (eta[i, 1] - r3) + * (eta[i, 1] - r4) + ) + Nmat[i, 12] = ( + coef[12] + * (eta[i, 0] - r1) + * (eta[i, 0] - r3) + * (eta[i, 0] - r4) + * (eta[i, 1] - r1) + * (eta[i, 1] - r3) + * (eta[i, 1] - r4) + ) + Nmat[i, 13] = ( + coef[13] + * (eta[i, 0] - r1) + * (eta[i, 0] - r2) + * (eta[i, 0] - r4) + * (eta[i, 1] - r1) + * (eta[i, 1] - r3) + * (eta[i, 1] - r4) + ) + Nmat[i, 14] = ( + coef[14] + * (eta[i, 0] - r1) + * (eta[i, 0] - r2) + * (eta[i, 0] - r4) + * (eta[i, 1] - r1) + * (eta[i, 1] - r2) + * (eta[i, 1] - r4) + ) + Nmat[i, 15] = ( + coef[15] + * (eta[i, 0] - r1) + * (eta[i, 0] - r3) + * (eta[i, 0] - r4) + * (eta[i, 1] - r1) + * (eta[i, 1] - r2) + * (eta[i, 1] - r4) + ) + XYZ = np.matmul(Nmat, self.keyPts) + elif "tri1" == self.regType: + Nmat = np.zeros((numPts, 3)) + for i in range(0, numPts): + Nmat[i, 0] = 1 - eta[i, 0] - eta[i, 1] + Nmat[i, 1] = eta[i, 0] + Nmat[i, 2] = eta[i, 1] + XYZ = np.matmul(Nmat, self.keyPts) + elif "tri2" == self.regType: + Nmat = np.zeros((numPts, 6)) + for i in range(0, numPts): + Nmat[i, 0] = ( + 2 * (eta[i, 0] + eta[i, 1] - 1) * (eta[i, 0] + eta[i, 1] - 0.5) + ) + Nmat[i, 1] = 2 * eta[i, 0] * (eta[i, 0] - 0.5) + Nmat[i, 2] = 2 * eta[i, 1] * (eta[i, 1] - 0.5) + Nmat[i, 3] = -4 * eta[i, 0] * (eta[i, 0] + eta[i, 1] - 1) + Nmat[i, 4] = 4 * eta[i, 0] * eta[i, 1] + Nmat[i, 5] = -4 * eta[i, 1] * (eta[i, 0] + eta[i, 1] - 1) + XYZ = np.matmul(Nmat, self.keyPts) + elif "tri3" == self.type: + r2 = 1 / 3 + r3 = 2 / 3 + coef = np.array([-4.5, 4.5, 4.5, 13.5, -13.5, 13.5, 13.5, -13.5, 13.5, -27]) + Nmat = np.zeros((numPts, 10)) + for i in range(0, numPts): + Nmat[i, 0] = ( + coef[0] + * (eta[i, 0] + eta[i, 1] - r2) + * (eta[i, 0] + eta[i, 1] - r3) + * (eta[i, 0] + eta[i, 1] - 1) + ) + Nmat[i, 1] = coef[1] * eta[i, 0] * (eta[i, 0] - r2) * (eta[i, 0] - r3) + Nmat[i, 2] = coef[2] * eta[i, 1] * (eta[i, 1] - r2) * (eta[i, 1] - r3) + Nmat[i, 3] = ( + coef[3] + * eta[i, 0] + * (eta[i, 0] + eta[i, 1] - r3) + * (eta[i, 0] + eta[i, 1] - 1) + ) + Nmat[i, 4] = ( + coef[4] * eta[i, 0] * (eta[i, 0] - r2) * (eta[i, 0] + eta[i, 1] - 1) + ) + Nmat[i, 5] = coef[5] * eta[i, 0] * eta[i, 1] * (eta[i, 0] - r2) + Nmat[i, 6] = coef[6] * eta[i, 0] * eta[i, 1] * (eta[i, 1] - r2) + Nmat[i, 7] = ( + coef[7] * eta[i, 1] * (eta[i, 1] - r2) * (eta[i, 0] + eta[i, 1] - 1) + ) + Nmat[i, 8] = ( + coef[8] + * eta[i, 1] + * (eta[i, 0] + eta[i, 1] - r3) + * (eta[i, 0] + eta[i, 1] - 1) + ) + Nmat[i, 9] = ( + coef[9] * eta[i, 0] * eta[i, 1] * (eta[i, 0] + eta[i, 1] - 1) + ) + XYZ = np.matmul(Nmat, self.keyPts) + elif self.regType == "sphere": + vec = self.keyPts[1, :] - self.keyPts[0, :] ## local x-direction + outerRad = np.linalg.norm(vec) + XYZ = np.zeros((len(eta), 3)) + ndi = 0 + for nd in eta: + phiComp = np.linalg.norm(nd) + if nd[0] > 1.0e-12: + theta = np.arctan(nd[1] / nd[0]) + else: + if nd[0] < 1.0e-12: + theta = np.pi + np.arctan(nd[1] / nd[0]) + else: + theta = np.arctan(nd[1] / 1.0e-12) + phi = 0.5 * np.pi - phiComp + xloc = outerRad * np.cos(theta) * np.cos(phi) + yloc = outerRad * np.sin(theta) * np.cos(phi) + zloc = outerRad * np.sin(phi) + XYZLoc = np.array([xloc, yloc, zloc]) + a1 = (1 / outerRad) * vec + vec2 = self.keyPts[2, :] - self.keyPts[1, :] + vec3 = np.array( + [ + (vec[1] * vec2[2] - vec[2] * vec2[1]), + (vec[2] * vec2[0] - vec[0] * vec2[2]), + (vec[0] * vec2[1] - vec[1] * vec2[0]), + ] + ) + mag = np.sqrt(vec3 * vec3.T) + a3 = (1 / mag) * vec3 + a2 = np.array( + [ + (a3[1] * a1[2] - a3[2] * a1[1]), + (a3[2] * a1[0] - a3[0] * a1[2]), + (a3[0] * a1[1] - a3[1] * a1[0]), + ] + ) + alpha = np.array([[a1], [a2], [a3]]) + XYZ[ndi] = np.matmul(XYZLoc, alpha) + self.keyPts[0, :] + ndi = ndi + 1 + + return XYZ diff --git a/src/pynumad/shell/SpatialGridList2DClass.py b/src/pynumad/shell/spatial_grid_list2d.py similarity index 55% rename from src/pynumad/shell/SpatialGridList2DClass.py rename to src/pynumad/shell/spatial_grid_list2d.py index 6bb05a2..7fab53b 100644 --- a/src/pynumad/shell/SpatialGridList2DClass.py +++ b/src/pynumad/shell/spatial_grid_list2d.py @@ -1,7 +1,7 @@ import numpy as np -class SpatialGridList2D(): +class SpatialGridList2D: def __init__(self, minimumX, maximumX, minimumY, maximumY, xGridSize, yGridSize): self.xMin = minimumX self.yMin = minimumY @@ -9,52 +9,52 @@ def __init__(self, minimumX, maximumX, minimumY, maximumY, xGridSize, yGridSize) self.yGSz = yGridSize xLen = maximumX - minimumX yLen = maximumY - minimumY - self.xRows = int(np.ceil(xLen/xGridSize)) - self.yRows = int(np.ceil(yLen/yGridSize)) + self.xRows = int(np.ceil(xLen / xGridSize)) + self.yRows = int(np.ceil(yLen / yGridSize)) self.fullList = list() - for i in range(0,self.xRows): + for i in range(0, self.xRows): xList = list() - for j in range(0,self.yRows): + for j in range(0, self.yRows): xList.append(list()) self.fullList.append(xList) - + def addEntry(self, val, coord): - xRow = int(np.floor((coord[0] - self.xMin)/self.xGSz)) - yRow = int(np.floor((coord[1] - self.yMin)/self.yGSz)) + xRow = int(np.floor((coord[0] - self.xMin) / self.xGSz)) + yRow = int(np.floor((coord[1] - self.yMin) / self.yGSz)) self.fullList[xRow][yRow].append(val) - - def findInXYMargin(self,point,Xmargin,Ymargin): - if(Xmargin == -1): + + def findInXYMargin(self, point, Xmargin, Ymargin): + if Xmargin == -1: iMax = self.xRows iMin = 0 else: # outStr = 'point[0] ' + str(point[0]) + ' Xmargin ' + str(Xmargin) + ' xMin ' + str(self.xMin) + ' xGSz ' + str(self.xGSz) # print(outStr) - iMax = int(np.ceil((point[0] + Xmargin - self.xMin)/self.xGSz)) - if(iMax > self.xRows): + iMax = int(np.ceil((point[0] + Xmargin - self.xMin) / self.xGSz)) + if iMax > self.xRows: iMax = self.xRows - iMin = int(np.floor((point[0] - Xmargin - self.xMin)/self.xGSz)) - if(iMin < 0): + iMin = int(np.floor((point[0] - Xmargin - self.xMin) / self.xGSz)) + if iMin < 0: iMin = 0 - - if(Ymargin == -1): + + if Ymargin == -1: jMax = self.yRows jMin = 0 else: - jMax = int(np.ceil((point[1] + Ymargin - self.yMin)/self.yGSz)) - if(jMax > self.yRows): + jMax = int(np.ceil((point[1] + Ymargin - self.yMin) / self.yGSz)) + if jMax > self.yRows: jMax = self.yRows - jMin = int(np.floor((point[1] - Ymargin - self.yMin)/self.yGSz)) - if(jMin < 0): + jMin = int(np.floor((point[1] - Ymargin - self.yMin) / self.yGSz)) + if jMin < 0: jMin = 0 - + labelList = list() - for i in range(iMin,iMax): - for j in range(jMin,jMax): + for i in range(iMin, iMax): + for j in range(jMin, jMax): labelList.extend(self.fullList[i][j]) - + return labelList - + def findInRadius(self, point, radius): - labelList = self.findInXYMargin(point,radius,radius) - return labelList \ No newline at end of file + labelList = self.findInXYMargin(point, radius, radius) + return labelList diff --git a/src/pynumad/shell/spatial_grid_list3d.py b/src/pynumad/shell/spatial_grid_list3d.py new file mode 100644 index 0000000..5ea3e34 --- /dev/null +++ b/src/pynumad/shell/spatial_grid_list3d.py @@ -0,0 +1,89 @@ +import numpy as np + + +class SpatialGridList3D: + def __init__( + self, + minimumX, + maximumX, + minimumY, + maximumY, + minimumZ, + maximumZ, + xGridSize, + yGridSize, + zGridSize, + ): + self.xMin = minimumX + self.yMin = minimumY + self.zMin = minimumZ + self.xGSz = xGridSize + self.yGSz = yGridSize + self.zGSz = zGridSize + xLen = maximumX - minimumX + yLen = maximumY - minimumY + zLen = maximumZ - minimumZ + self.xRows = int(np.ceil(xLen / xGridSize)) + self.yRows = int(np.ceil(yLen / yGridSize)) + self.zRows = int(np.ceil(zLen / zGridSize)) + self.fullList = list() + for i in range(0, self.xRows): + xList = list() + for j in range(0, self.yRows): + yList = list() + for k in range(0, self.zRows): + yList.append(list()) + xList.append(yList) + self.fullList.append(xList) + + def addEntry(self, val, coord): + xRow = int(np.floor((coord[0] - self.xMin) / self.xGSz)) + yRow = int(np.floor((coord[1] - self.yMin) / self.yGSz)) + zRow = int(np.floor((coord[2] - self.zMin) / self.zGSz)) + self.fullList[xRow][yRow][zRow].append(val) + + def findInXYZMargin(self, point, Xmargin, Ymargin, Zmargin): + if Xmargin == -1: + iMax = self.xRows + iMin = 0 + else: + iMax = int(np.ceil((point[0] + Xmargin - self.xMin) / self.xGSz)) + if iMax > self.xRows: + iMax = self.xRows + iMin = int(np.floor((point[0] - Xmargin - self.xMin) / self.xGSz)) + if iMin < 0: + iMin = 0 + + if Ymargin == -1: + jMax = self.yRows + jMin = 0 + else: + jMax = int(np.ceil((point[1] + Ymargin - self.yMin) / self.yGSz)) + if jMax > self.yRows: + jMax = self.yRows + jMin = int(np.floor((point[1] - Ymargin - self.yMin) / self.yGSz)) + if jMin < 0: + jMin = 0 + + if Zmargin == -1: + kMax = self.zRows + kMin = 0 + else: + kMax = int(np.ceil((point[2] + Zmargin - self.zMin) / self.zGSz)) + if kMax > self.zRows: + kMax = self.zRows + kMin = int(np.floor((point[2] - Zmargin - self.zMin) / self.zGSz)) + if kMin < 0: + kMin = 0 + + labelList = list() + for i in range(iMin, iMax): + for j in range(jMin, jMax): + for k in range(kMin, kMax): + labelList.extend(self.fullList[i][j][k]) + + return labelList + + def findInRadius(self, point, radius): + labelList = self.findInXYZMargin(point, radius, radius, radius) + return labelList diff --git a/src/pynumad/shell/SurfaceClass.py b/src/pynumad/shell/surface.py similarity index 53% rename from src/pynumad/shell/SurfaceClass.py rename to src/pynumad/shell/surface.py index 43feaed..1c9fc3e 100644 --- a/src/pynumad/shell/SurfaceClass.py +++ b/src/pynumad/shell/surface.py @@ -1,37 +1,48 @@ import numpy as np -import pynumad.shell.MeshTools as mt -from pynumad.shell.ShellRegionClass import * +import pynumad.shell.mesh_tools as mt +from pynumad.shell.shell_region import * -class Surface(): - def __init__(self,regionList=[],regionNames=[],meshList=[],meshNames=[]): +class Surface: + def __init__(self, regionList=[], regionNames=[], meshList=[], meshNames=[]): self.shellRegions = list() self.shellRegions.extend(regionList) self.regionNames = list() self.regionNames.extend(regionNames) self.meshes = list() - self.meshes.extend(meshList) + self.meshes.extend(meshList) self.meshNames = list() self.meshNames.extend(meshNames) - - def addShellRegion(self,regType,keyPts,numEls,name=None,natSpaceCrds=[],elType='quad',meshMethod='free'): - self.shellRegions.append(ShellRegion(regType,keyPts,numEls,natSpaceCrds,elType,meshMethod)) - if(name == None): + + def addShellRegion( + self, + regType, + keyPts, + numEls, + name=None, + natSpaceCrds=[], + elType="quad", + meshMethod="free", + ): + self.shellRegions.append( + ShellRegion(regType, keyPts, numEls, natSpaceCrds, elType, meshMethod) + ) + if name == None: numReg = len(self.shellRegions) - regName = 'Sub-Region_' + str(numReg) + regName = "Sub-Region_" + str(numReg) self.regionNames.append(regName) else: self.regionNames.append(name) - - def addMesh(self,meshData,name=None): + + def addMesh(self, meshData, name=None): self.meshes.append(meshData) - if(name == None): + if name == None: numMsh = len(self.meshes) - meshName = 'Sub-Mesh_' + str(numMsh) + meshName = "Sub-Mesh_" + str(numMsh) self.meshNames.append(meshName) else: self.meshNames.append(name) - + def getSurfaceMesh(self): allNds = list() allEls = list() @@ -41,46 +52,46 @@ def getSurfaceMesh(self): regi = 0 for reg in self.shellRegions: regMesh = reg.createShellMesh() - allNds.extend(regMesh['nodes']) + allNds.extend(regMesh["nodes"]) setList = list() eli = 0 - for el in regMesh['elements']: - for i in range(0,4): - if(el[i] != -1): + for el in regMesh["elements"]: + for i in range(0, 4): + if el[i] != -1: el[i] = el[i] + numNds allEls.append(el) setList.append((eli + numEls)) eli = eli + 1 thisSet = dict() - thisSet['name'] = self.regionNames[regi] - thisSet['labels'] = setList + thisSet["name"] = self.regionNames[regi] + thisSet["labels"] = setList elSetList.append(thisSet) numNds = len(allNds) numEls = len(allEls) regi = regi + 1 mshi = 0 for msh in self.meshes: - allNds.extend(msh['nodes']) + allNds.extend(msh["nodes"]) setList = list() eli = 0 - for el in msh['elements']: - for i in range(0,4): - if(el[i] != -1): + for el in msh["elements"]: + for i in range(0, 4): + if el[i] != -1: el[i] = el[i] + numNds allEls.append(el) setList.append((eli + numEls)) eli = eli + 1 thisSet = dict() - thisSet['name'] = self.meshNames[mshi] - thisSet['labels'] = setList + thisSet["name"] = self.meshNames[mshi] + thisSet["labels"] = setList elSetList.append(thisSet) numNds = len(allNds) numEls = len(allEls) mshi = mshi + 1 mData = dict() - mData['nodes'] = np.array(allNds) - mData['elements'] = np.array(allEls) + mData["nodes"] = np.array(allNds) + mData["elements"] = np.array(allEls) mData = mt.mergeDuplicateNodes(mData) - mData['sets'] = dict() - mData['sets']['element'] = elSetList - return mData \ No newline at end of file + mData["sets"] = dict() + mData["sets"]["element"] = elSetList + return mData diff --git a/src/pynumad/tests/test_affinetrans.py b/src/pynumad/tests/test_affinetrans.py index 50d35da..8a048f4 100644 --- a/src/pynumad/tests/test_affinetrans.py +++ b/src/pynumad/tests/test_affinetrans.py @@ -2,10 +2,10 @@ from pynumad.utils.affinetrans import rotation, translation -class TestAffinetrans(unittest.TestCase): +class TestAffinetrans(unittest.TestCase): def test_rotation(self): pass def test_translation(self): - pass \ No newline at end of file + pass diff --git a/src/pynumad/tests/test_airfoil.py b/src/pynumad/tests/test_airfoil.py index d6bbb12..ff11b2e 100644 --- a/src/pynumad/tests/test_airfoil.py +++ b/src/pynumad/tests/test_airfoil.py @@ -6,18 +6,20 @@ test_data_dir = DATA_PATH + class TestAirfoil(unittest.TestCase): @classmethod def setUpClass(self): - self.xmlfile = join(test_data_dir,"airfoils", "DU91-W-250.txt") + self.xmlfile = join(test_data_dir, "airfoils", "DU91-W-250.txt") def test_load_xml(self): - x = Airfoil(filename = self.xmlfile) + x = Airfoil(filename=self.xmlfile) - #check reference + # check reference self.assertEqual(x.reference, "Points generated by BRR for NuMAD, 6/2/2011") - #TODO: check coords + # TODO: check coords + if __name__ == "__main__": - unittest.main() \ No newline at end of file + unittest.main() diff --git a/src/pynumad/tests/test_blade_io.py b/src/pynumad/tests/test_blade_io.py index 04e2b72..60798e0 100644 --- a/src/pynumad/tests/test_blade_io.py +++ b/src/pynumad/tests/test_blade_io.py @@ -6,17 +6,19 @@ test_data_dir = DATA_PATH + class TestBladeIO(unittest.TestCase): @classmethod def setUpClass(self): - self.xlsxfile = join(test_data_dir,"blade_excels","Excel2ObjectExample.xlsx") - self.yamlfile = join(test_data_dir,"blade_yamls","myBlade_modified.yaml") - + self.xlsxfile = join(test_data_dir, "blade_excels", "Excel2ObjectExample.xlsx") + self.yamlfile = join(test_data_dir, "blade_yamls", "myBlade_modified.yaml") + def test_xlsx_blade(self): xlsxblade = Blade(self.xlsxfile) def test_yaml_blade(self): yamlblade = Blade(self.yamlfile) + if __name__ == "__main__": - unittest.main() \ No newline at end of file + unittest.main() diff --git a/src/pynumad/tests/test_interpolator.py b/src/pynumad/tests/test_interpolator.py index 37706fb..e40220a 100644 --- a/src/pynumad/tests/test_interpolator.py +++ b/src/pynumad/tests/test_interpolator.py @@ -4,65 +4,72 @@ from pynumad.utils.interpolation import interpolator_wrap + class TestInterpolator(unittest.TestCase): @classmethod def setUpClass(self): - self.x = np.linspace(0,10,5) + self.x = np.linspace(0, 10, 5) v1 = np.sin(self.x) v2 = self.x**2 - self.v = np.array((v1,v2)).transpose() - self.xq = np.linspace(0,10,10) - + self.v = np.array((v1, v2)).transpose() + self.xq = np.linspace(0, 10, 10) + def test_linear(self): - vq = interpolator_wrap(self.x,self.v,self.xq,method='linear') - correct_vq = np.array([ - [ 0.00000000e+00, 0.00000000e+00], - [ 2.65987620e-01, 2.77777778e+00], - [ 5.31975239e-01, 5.55555556e+00], - [ 7.93400045e-02, 1.25000000e+01], - [-6.12836182e-01, 2.08333333e+01], - [-5.37385552e-01, 3.19444444e+01], - [ 3.05691893e-01, 4.58333333e+01], - [ 7.73330967e-01, 6.11111111e+01], - [ 1.14654928e-01, 8.05555556e+01], - [-5.44021111e-01, 1.00000000e+02] - ]) - array_equal = np.isclose(vq,correct_vq).all() + vq = interpolator_wrap(self.x, self.v, self.xq, method="linear") + correct_vq = np.array( + [ + [0.00000000e00, 0.00000000e00], + [2.65987620e-01, 2.77777778e00], + [5.31975239e-01, 5.55555556e00], + [7.93400045e-02, 1.25000000e01], + [-6.12836182e-01, 2.08333333e01], + [-5.37385552e-01, 3.19444444e01], + [3.05691893e-01, 4.58333333e01], + [7.73330967e-01, 6.11111111e01], + [1.14654928e-01, 8.05555556e01], + [-5.44021111e-01, 1.00000000e02], + ] + ) + array_equal = np.isclose(vq, correct_vq).all() self.assertTrue(array_equal) - def test_pchip(self): - vq = interpolator_wrap(self.x,self.v,self.xq,method='pchip') - correct_vq = np.array([ - [ 0. , 0. ], - [ 0.47952836, 1.57750343], - [ 0.59634519, 5.21262003], - [ 0.1947027 , 10.76388889], - [ -0.76238042, 19.843107 ], - [ -0.71953191, 30.69415866], - [ 0.4462048 , 44.48302469], - [ 0.92197998, 60.40237769], - [ 0.50904307, 78.89803384], - [ -0.54402111, 100. ] - ]) - array_equal = np.isclose(vq,correct_vq).all() + vq = interpolator_wrap(self.x, self.v, self.xq, method="pchip") + correct_vq = np.array( + [ + [0.0, 0.0], + [0.47952836, 1.57750343], + [0.59634519, 5.21262003], + [0.1947027, 10.76388889], + [-0.76238042, 19.843107], + [-0.71953191, 30.69415866], + [0.4462048, 44.48302469], + [0.92197998, 60.40237769], + [0.50904307, 78.89803384], + [-0.54402111, 100.0], + ] + ) + array_equal = np.isclose(vq, correct_vq).all() self.assertTrue(array_equal) pass def test_spline(self): - vq = interpolator_wrap(self.x,self.v,self.xq,method='spline') - correct_vq = np.array([ - [ 0. , 0. ], - [ 1.09041923, 1.2345679 ], - [ 0.79794503, 4.9382716 ], - [ -0.11178833, 11.11111111], - [ -0.87314655, 19.75308642], - [ -0.75463378, 30.86419753], - [ 0.19006212, 44.44444444], - [ 1.12206955, 60.49382716], - [ 1.16837848, 79.01234568], - [ -0.54402111, 100. ]]) - array_equal = np.isclose(vq,correct_vq).all() + vq = interpolator_wrap(self.x, self.v, self.xq, method="spline") + correct_vq = np.array( + [ + [0.0, 0.0], + [1.09041923, 1.2345679], + [0.79794503, 4.9382716], + [-0.11178833, 11.11111111], + [-0.87314655, 19.75308642], + [-0.75463378, 30.86419753], + [0.19006212, 44.44444444], + [1.12206955, 60.49382716], + [1.16837848, 79.01234568], + [-0.54402111, 100.0], + ] + ) + array_equal = np.isclose(vq, correct_vq).all() self.assertTrue(array_equal) pass diff --git a/src/pynumad/tests/test_mesh.py b/src/pynumad/tests/test_mesh.py index 52e0895..6f7e871 100644 --- a/src/pynumad/tests/test_mesh.py +++ b/src/pynumad/tests/test_mesh.py @@ -7,16 +7,14 @@ test_data_dir = DATA_PATH + class TestMesh(unittest.TestCase): @classmethod def setUpClass(self): - self.yamlfile = join(test_data_dir,"blade_yamls","myBlade_modified.yaml") - + self.yamlfile = join(test_data_dir, "blade_yamls", "myBlade_modified.yaml") + def test_mesh(self): blade = Blade(self.yamlfile) elementSize = 0.2 adhes = 1 meshData = getShellMesh(blade, includeAdhesive=adhes, elementSize=elementSize) - - - diff --git a/src/pynumad/tests/test_misc.py b/src/pynumad/tests/test_misc.py index 3cb3913..3d6c062 100644 --- a/src/pynumad/tests/test_misc.py +++ b/src/pynumad/tests/test_misc.py @@ -3,8 +3,8 @@ from pynumad.utils.misc_utils import LARCetaL, LARCetaT, _parse_data -class TestMisc(unittest.TestCase): +class TestMisc(unittest.TestCase): def test_larcetat(self): alp0 = 53.0 etat = LARCetaT(alp0) @@ -15,11 +15,13 @@ def test_larcetal(self): YC = -10000000000.0 alp0 = 53.0 etal = LARCetaL(SL, YC, alp0) - self.assertAlmostEqual(etal,-0.7610479585895458) + self.assertAlmostEqual(etal, -0.7610479585895458) def test_parse(self): - cases = [50.0, '1e10', '6.4023E8'] - self.assertEqual(_parse_data(cases[0]),50.0) - self.assertEqual(_parse_data(cases[1]),10000000000.0) - truths = np.isclose(_parse_data(cases), np.array([50.0, 10000000000.0, 640230000.0])) - self.assertTrue(truths.all()) \ No newline at end of file + cases = [50.0, "1e10", "6.4023E8"] + self.assertEqual(_parse_data(cases[0]), 50.0) + self.assertEqual(_parse_data(cases[1]), 10000000000.0) + truths = np.isclose( + _parse_data(cases), np.array([50.0, 10000000000.0, 640230000.0]) + ) + self.assertTrue(truths.all()) diff --git a/src/pynumad/utils/__init__.py b/src/pynumad/utils/__init__.py index 5a4829a..e69de29 100644 --- a/src/pynumad/utils/__init__.py +++ b/src/pynumad/utils/__init__.py @@ -1,2 +0,0 @@ -""" -""" \ No newline at end of file diff --git a/src/pynumad/utils/affinetrans.py b/src/pynumad/utils/affinetrans.py index b0a2850..ec384fc 100644 --- a/src/pynumad/utils/affinetrans.py +++ b/src/pynumad/utils/affinetrans.py @@ -1,21 +1,23 @@ from scipy.spatial.transform import Rotation import numpy as np + def rotation(axis, angle): """ Designed to replace matlab's makehgtform """ r = Rotation.from_euler(axis, angle) rmatrix = np.eye(4) - rmatrix[0:3,0:3] = r.as_matrix() + rmatrix[0:3, 0:3] = r.as_matrix() return rmatrix -def translation(xtrans,ytrans,ztrans): + +def translation(xtrans, ytrans, ztrans): tmatrix = np.eye(4) - tmatrix[0:3,3] = [xtrans,ytrans,ztrans] + tmatrix[0:3, 3] = [xtrans, ytrans, ztrans] return tmatrix if __name__ == "__main__": - print(translation(2,3,4)) - print(rotation('z', 1)) \ No newline at end of file + print(translation(2, 3, 4)) + print(rotation("z", 1)) diff --git a/src/pynumad/utils/fatigue.py b/src/pynumad/utils/fatigue.py index 2a9e327..baec739 100644 --- a/src/pynumad/utils/fatigue.py +++ b/src/pynumad/utils/fatigue.py @@ -1,85 +1,137 @@ import numpy as np import warnings - -def calcFatigue(blade = None, - meshData = None, - IEC = None, - Ltheta = None, - LthetaPlus90 = None, - Mtheta = None, - MthetaPlus90 = None, - binnedElements = None, - plateStrainsTheta = None, - plateStrainsThetaPlus90 = None, - iSegment = None): - #blade.materials read in to obtain fatigue exponents - #Initialize damage variables, get section data, and get element strains - + + +def calcFatigue( + blade=None, + meshData=None, + IEC=None, + Ltheta=None, + LthetaPlus90=None, + Mtheta=None, + MthetaPlus90=None, + binnedElements=None, + plateStrainsTheta=None, + plateStrainsThetaPlus90=None, + iSegment=None, +): + # blade.materials read in to obtain fatigue exponents + # Initialize damage variables, get section data, and get element strains + numElem = np.asarray(binnedElements).size if numElem == 0: if iSegment != 13: - raise Exception('"zwidth" is smaller than element size. Decrease element size or increase zwdith') + raise Exception( + '"zwidth" is smaller than element size. Decrease element size or increase zwdith' + ) else: - fatigueDamage = np.zeros((1,10)) + fatigueDamage = np.zeros((1, 10)) plotFatigue = [] - return fatigueDamage,plotFatigue - - analysis = 'LU' - env = 'noEnv' - temp = 'noTemp' - mfg = 'basicFlaw' - calca = 'test' - loada = '12_directions' - SFs = np.round(setIEC5(analysis,env,temp,mfg,calca,loada),2) + return fatigueDamage, plotFatigue + + analysis = "LU" + env = "noEnv" + temp = "noTemp" + mfg = "basicFlaw" + calca = "test" + loada = "12_directions" + SFs = np.round(setIEC5(analysis, env, temp, mfg, calca, loada), 2) # analysis='LF'; -# loada='2_directions'; -# calcb ='measuredSlope'; -# loadb = 'Markov'; -# SFf=setIEC5(analysis,env,temp,mfg,calca,loada,calcb,loadb); - + # loada='2_directions'; + # calcb ='measuredSlope'; + # loadb = 'Markov'; + # SFf=setIEC5(analysis,env,temp,mfg,calca,loada,calcb,loadb); + SFf = SFs - warnings.warn('SFf = SFs for BAR project since we are doing more than 2 load directions but less than 12') - fatigueDamage = np.zeros((numElem,10)) + warnings.warn( + "SFf = SFs for BAR project since we are doing more than 2 load directions but less than 12" + ) + fatigueDamage = np.zeros((numElem, 10)) plotFatigue = [] - for i in np.arange(1,numElem+1).reshape(-1): + for i in np.arange(1, numElem + 1).reshape(-1): elemFD = 0 elemFDlayer = 0 elemFDmat = 0 elemFDflap = 0 elemFDedge = 0 - elNo = plateStrainsTheta(binnedElements(i),1) - coordSys = 'local' - localFieldsTheta = extractFieldsThruThickness(plateStrainsTheta,meshData,blade.materials,blade.stacks,blade.swstacks,elNo,coordSys) - localFieldsThetaPlus90 = extractFieldsThruThickness(plateStrainsThetaPlus90,meshData,blade.materials,blade.stacks,blade.swstacks,elNo,coordSys) - npts = np.asarray(getattr(localFieldsTheta,(np.array(['element',num2str(elNo)]))).x3).size + elNo = plateStrainsTheta(binnedElements(i), 1) + coordSys = "local" + localFieldsTheta = extractFieldsThruThickness( + plateStrainsTheta, + meshData, + blade.materials, + blade.stacks, + blade.swstacks, + elNo, + coordSys, + ) + localFieldsThetaPlus90 = extractFieldsThruThickness( + plateStrainsThetaPlus90, + meshData, + blade.materials, + blade.stacks, + blade.swstacks, + elNo, + coordSys, + ) + npts = np.asarray( + getattr(localFieldsTheta, (np.array(["element", num2str(elNo)]))).x3 + ).size nptsPerLayer = 2 - layer = __builtint__.sorted(np.array([np.arange(1,npts / nptsPerLayer+1),np.arange(1,npts / nptsPerLayer+1)])) - for ix3 in np.arange(1,npts+1).reshape(-1): - matNumber = getattr(localFieldsTheta,(np.array(['element',num2str(elNo)]))).matNumber(ix3) - if not len(blade.materials(matNumber).m)==0 : - #Determine mean and amplitude stress - MthetaFactor = getattr(localFieldsTheta,(np.array(['element',num2str(elNo)]))).sig11(ix3) / Mtheta - MthetaPlus90Factor = getattr(localFieldsThetaPlus90,(np.array(['element',num2str(elNo)]))).sig11(ix3) / MthetaPlus90 + layer = __builtint__.sorted( + np.array( + [ + np.arange(1, npts / nptsPerLayer + 1), + np.arange(1, npts / nptsPerLayer + 1), + ] + ) + ) + for ix3 in np.arange(1, npts + 1).reshape(-1): + matNumber = getattr( + localFieldsTheta, (np.array(["element", num2str(elNo)])) + ).matNumber(ix3) + if not len(blade.materials(matNumber).m) == 0: + # Determine mean and amplitude stress + MthetaFactor = ( + getattr( + localFieldsTheta, (np.array(["element", num2str(elNo)])) + ).sig11(ix3) + / Mtheta + ) + MthetaPlus90Factor = ( + getattr( + localFieldsThetaPlus90, (np.array(["element", num2str(elNo)])) + ).sig11(ix3) + / MthetaPlus90 + ) LthetaWithFactor = Ltheta LthetaPlus90WithFactor = LthetaPlus90 - LthetaWithFactor[1,:] = LthetaWithFactor[0,:] * MthetaFactor - LthetaWithFactor[:,1] = np.abs(LthetaWithFactor[:,0] * MthetaFactor) - LthetaPlus90WithFactor[1,:] = LthetaPlus90WithFactor[0,:] * MthetaPlus90Factor - LthetaPlus90WithFactor[:,1] = np.abs(LthetaPlus90WithFactor[:,0] * MthetaPlus90Factor) + LthetaWithFactor[1, :] = LthetaWithFactor[0, :] * MthetaFactor + LthetaWithFactor[:, 1] = np.abs(LthetaWithFactor[:, 0] * MthetaFactor) + LthetaPlus90WithFactor[1, :] = ( + LthetaPlus90WithFactor[0, :] * MthetaPlus90Factor + ) + LthetaPlus90WithFactor[:, 1] = np.abs( + LthetaPlus90WithFactor[:, 0] * MthetaPlus90Factor + ) m = blade.materials(matNumber).m XTEN = blade.materials(matNumber).uts(1) * SFs XCMP = blade.materials(matNumber).ucs(1) * SFs # Determine the maximum number of cycles for failure based # on available fatigue failure criterion or from data - #Calculate fatigue damage value, layer, and material for flap and edge cycles + # Calculate fatigue damage value, layer, and material for flap and edge cycles if isfinite(MthetaFactor): - if 'Shifted Goodman' == IEC.fatigueCriterion: - layerFDtheta = shiftedGoodman(LthetaWithFactor,XTEN,XCMP,m,SFs,SFf) + if "Shifted Goodman" == IEC.fatigueCriterion: + layerFDtheta = shiftedGoodman( + LthetaWithFactor, XTEN, XCMP, m, SFs, SFf + ) else: layerFDtheta = 0 if isfinite(MthetaPlus90Factor): - if 'Shifted Goodman' == IEC.fatigueCriterion: - layerFDthetaPlus90 = shiftedGoodman(LthetaPlus90WithFactor,XTEN,XCMP,m,SFs,SFf) + if "Shifted Goodman" == IEC.fatigueCriterion: + layerFDthetaPlus90 = shiftedGoodman( + LthetaPlus90WithFactor, XTEN, XCMP, m, SFs, SFf + ) else: layerFDthetaPlus90 = 0 layerFD = layerFDtheta + layerFDthetaPlus90 @@ -95,46 +147,44 @@ def calcFatigue(blade = None, elemFDedge = layerFDthetaPlus90 elemFDedgeLayer = layer(ix3) elemFDedgeMat = matNumber - FDvalue = np.array([elemFD,elemFDflap,elemFDedge]) - FDlayer = np.array([elemFDlayer,elemFDflapLayer,elemFDedgeLayer]) - FDmat = np.array([elemFDmat,elemFDflapMat,elemFDedgeMat]) - fatigueDamage[i,:] = np.array([elNo,FDvalue,FDlayer,FDmat]) + FDvalue = np.array([elemFD, elemFDflap, elemFDedge]) + FDlayer = np.array([elemFDlayer, elemFDflapLayer, elemFDedgeLayer]) + FDmat = np.array([elemFDmat, elemFDflapMat, elemFDedgeMat]) + fatigueDamage[i, :] = np.array([elNo, FDvalue, FDlayer, FDmat]) # plotFatigue=[plotFatigue;elements(binnedElements(i),1) FDvalue(1)]; - - #Output results in comma-delimited format - #table(fatigue_damage(:,1),fatigue_damage(:,2),fatigue_damage(:,5),fatigue_damage(:,8)) - - __,imax = np.amax(fatigueDamage[:,1]) - fatigueDamage = fatigueDamage[imax,:] - - + + # Output results in comma-delimited format + # table(fatigue_damage(:,1),fatigue_damage(:,2),fatigue_damage(:,5),fatigue_damage(:,8)) + + __, imax = np.amax(fatigueDamage[:, 1]) + fatigueDamage = fatigueDamage[imax, :] + # ######################### # for j=1:nLayers #Loop through layers # #Get material values # matNumber = sections.layers{sections.secID==sec_num}(j,3); - + # if ~isempty(blade.materials(matNumber).m) # #Determine mean and amplitude stress # MthetaFactor=stressesTheta(binnedElements(i),j+2)/Mtheta; # MthetaPlus90Factor=stressesThetaPlus90(binnedElements(i),j+2)/MthetaPlus90; - + # LthetaWithFactor=Ltheta; #Initialize for every layer # LthetaPlus90WithFactor=LthetaPlus90; - + # LthetaWithFactor(1,:)=LthetaWithFactor(1,:)*MthetaFactor; #Means # LthetaWithFactor(:,1)=abs(LthetaWithFactor(:,1)*MthetaFactor); #Amplitudes - + # LthetaPlus90WithFactor(1,:)=LthetaPlus90WithFactor(1,:)*MthetaPlus90Factor; #Means # LthetaPlus90WithFactor(:,1)=abs(LthetaPlus90WithFactor(:,1)*MthetaPlus90Factor); #Amplitudes - + # m=blade.materials(matNumber).m; # XTEN=materials(matNumber).XTEN*SFs; #Unfactor the factored resistance # XCMP=materials(matNumber).XCMP*SFs; #Unfactor the factored resistance - - + # # Determine the maximum number of cycles for failure based # # on available fatigue failure criterion or from data - + # #Calculate fatigue damage value, layer, and material for flap and edge cycles # if isfinite(MthetaFactor) # switch IEC.fatigueCriterion @@ -144,7 +194,7 @@ def calcFatigue(blade = None, # else # layerFDtheta=0; # end - + # if isfinite(MthetaPlus90Factor) # switch IEC.fatigueCriterion # case 'Shifted Goodman' #one case for now @@ -153,14 +203,12 @@ def calcFatigue(blade = None, # else # layerFDthetaPlus90=0; # end - - - + # layerFD=layerFDtheta+layerFDthetaPlus90; # # if elNo==1950 # # keyboard # # end - + # if layerFD>elemFD # elemFD=layerFD; # elemFDlayer=j; @@ -176,7 +224,7 @@ def calcFatigue(blade = None, # elemFDedgeLayer=j; # elemFDedgeMat=matNumber; # end - + # FDvalue=[elemFD elemFDflap elemFDedge]; # FDlayer=[elemFDlayer elemFDflapLayer elemFDedgeLayer]; # FDmat=[elemFDmat elemFDflapMat elemFDedgeMat]; @@ -184,90 +232,112 @@ def calcFatigue(blade = None, # end # end # # plotFatigue=[plotFatigue;elements(binnedElements(i),1) FDvalue(1)]; - + # end # #Output results in comma-delimited format # #table(fatigue_damage(:,1),fatigue_damage(:,2),fatigue_damage(:,5),fatigue_damage(:,8)) - + # [~,imax]=max(fatigueDamage(:,2)); # fatigueDamage = fatigueDamage(imax,:); - return fatigueDamage,plotFatigue - - -def getMomentMarkov(rccdata = None, - wt = None, - Yr = None, - simtime = None, - markovSize = None, - chSpan = None, - direction = None): + return fatigueDamage, plotFatigue + + +def getMomentMarkov( + rccdata=None, + wt=None, + Yr=None, + simtime=None, + markovSize=None, + chSpan=None, + direction=None, +): if chSpan == 1: - baseStr = 'Root' + baseStr = "Root" else: - baseStr = np.array(['Spn',int2str(chSpan - 1)]) - - #Search through first windspeed column of rccdata for the first appearance of - #baseStr + M + direction e.g. RootMyb1 + baseStr = np.array(["Spn", int2str(chSpan - 1)]) + + # Search through first windspeed column of rccdata for the first appearance of + # baseStr + M + direction e.g. RootMyb1 ct = 1 - - # of baseStr + M + direction e.g. RootMyb1. - while not (contains(rccdata[ct,1].label,baseStr) and contains(rccdata[ct,1].label,'M') and contains(rccdata[ct,1].label,direction)) : + # of baseStr + M + direction e.g. RootMyb1. + while not ( + contains(rccdata[ct, 1].label, baseStr) + and contains(rccdata[ct, 1].label, "M") + and contains(rccdata[ct, 1].label, direction) + ): ct = ct + 1 - means = [] - + ampl = [] - + cycles = [] - for w in np.arange(1,rccdata.shape[2-1]+1).reshape(-1): + for w in np.arange(1, rccdata.shape[2 - 1] + 1).reshape(-1): # put data from rccdata structure for this channel and wind speed into a temporary variable, data - data = rccdata[ct,w] + data = rccdata[ct, w] # Make sure that fatigue data are only summed accross # windspeeds for the same channel. - if w == 1 or str(data.label) == str(rccdata[ct,w - 1].label): - means = np.array([[means],[data.means]]) - ampl = np.array([[ampl],[data.amplitudes]]) - cycles = np.array([[cycles],[data.cycles / simtime * (60 * 60 * 24 * 365.24 * Yr) * wt(w)]]) + if w == 1 or str(data.label) == str(rccdata[ct, w - 1].label): + means = np.array([[means], [data.means]]) + ampl = np.array([[ampl], [data.amplitudes]]) + cycles = np.array( + [ + [cycles], + [data.cycles / simtime * (60 * 60 * 24 * 365.24 * Yr) * wt(w)], + ] + ) else: - raise Exception('Data channel name from current wind speed does not match the previous wind speed. Fatigue cycles cannot be summed.') - - Ni,EDGESi,BINi = histcounts(ampl,markovSize) - - Nj,EDGESj,BINj = histcounts(means,markovSize) - - #BIN, bin number assignment for each element in ampl or means - - markov = np.zeros((markovSize + 1,markovSize + 1)) - markov[np.arange[2,end()+1],1] = 0.5 * np.transpose((EDGESi(np.arange(1,end() - 1+1)) + EDGESi(np.arange(2,end()+1)))) - - markov[1,np.arange[2,end()+1]] = 0.5 * (EDGESj(np.arange(1,end() - 1+1)) + EDGESj(np.arange(2,end()+1))) - #indChecki=zeros(1,markovSize); + raise Exception( + "Data channel name from current wind speed does not match the previous wind speed. Fatigue cycles cannot be summed." + ) + + Ni, EDGESi, BINi = histcounts(ampl, markovSize) + + Nj, EDGESj, BINj = histcounts(means, markovSize) + + # BIN, bin number assignment for each element in ampl or means + + markov = np.zeros((markovSize + 1, markovSize + 1)) + markov[np.arange[2, end() + 1], 1] = 0.5 * np.transpose( + (EDGESi(np.arange(1, end() - 1 + 1)) + EDGESi(np.arange(2, end() + 1))) + ) + + markov[1, np.arange[2, end() + 1]] = 0.5 * ( + EDGESj(np.arange(1, end() - 1 + 1)) + EDGESj(np.arange(2, end() + 1)) + ) + # indChecki=zeros(1,markovSize); for j in range(markovSize): ampIndecies = np.find(BINj == j) - #indCheck=0; + # indCheck=0; for i in range(markovSize): meanIndecies = np.find(BINi == i) - Indecies = np.intersect(ampIndecies,meanIndecies) - markov[i + 1,j + 1] = sum(cycles(Indecies)) - - #surf(markov(2:end,2:end)) + Indecies = np.intersect(ampIndecies, meanIndecies) + markov[i + 1, j + 1] = sum(cycles(Indecies)) + + # surf(markov(2:end,2:end)) return markov - - -def shiftedGoodman(markov, XTEN, XCMP, m, SFs, SFf): + + +def shiftedGoodman(markov, XTEN, XCMP, m, SFs, SFf): num_range = markov.shape[0] - 1 num_mean = markov.shape[1] - 1 damage = 0 FSloads = 1.0 - + for i in range(num_range): for j in range(num_mean): - sa = markov[i + 1,0] - sm = markov[0,j + 1] - n = markov(i + 1,j + 1) - N = ((XTEN + np.abs(XCMP) - np.abs(2 * sm * SFs * FSloads - XTEN + np.abs(XCMP))) / (2 * sa * (SFf) * FSloads)) ** m + sa = markov[i + 1, 0] + sm = markov[0, j + 1] + n = markov(i + 1, j + 1) + N = ( + ( + XTEN + + np.abs(XCMP) + - np.abs(2 * sm * SFs * FSloads - XTEN + np.abs(XCMP)) + ) + / (2 * sa * (SFf) * FSloads) + ) ** m damage = damage + n / N - - return damage \ No newline at end of file + + return damage diff --git a/src/pynumad/utils/interpolation.py b/src/pynumad/utils/interpolation.py index 1e9d027..b9aa679 100644 --- a/src/pynumad/utils/interpolation.py +++ b/src/pynumad/utils/interpolation.py @@ -1,36 +1,39 @@ import numpy as np -from scipy.interpolate import interp1d, PchipInterpolator,\ - PPoly, CubicSpline +from scipy.interpolate import interp1d, PchipInterpolator, PPoly, CubicSpline -def interpolator_wrap(x, v, xq, method = 'linear', axis = 0, extrapolation = None): +def interpolator_wrap(x, v, xq, method="linear", axis=0, extrapolation=None): """This function is designed to emulate the arg structure and output of matlabs interp1d function. Parameters ---------- x : array - v : array - xq : array - method : str + v : array + xq : array + method : str Defaults to 'linear'. - axis : int + axis : int Defaults to 0. - extrapolation :bool + extrapolation :bool Defaults to None. Returns: array : """ - if method == 'linear': - interpolator = interp1d(x,v,'linear', axis,bounds_error=False, fill_value= 'extrapolate') + if method == "linear": + interpolator = interp1d( + x, v, "linear", axis, bounds_error=False, fill_value="extrapolate" + ) vq = interpolator(xq) - elif method == 'pchip': - interpolator = PchipInterpolator(x,v,axis,extrapolate=True) + elif method == "pchip": + interpolator = PchipInterpolator(x, v, axis, extrapolate=True) vq = interpolator(xq) - elif method == 'spline': - interpolator = interp1d(x,v,'cubic', axis,bounds_error=False,fill_value='extrapolate') + elif method == "spline": + interpolator = interp1d( + x, v, "cubic", axis, bounds_error=False, fill_value="extrapolate" + ) vq = interpolator(xq) return vq # if method == 'pp': @@ -46,14 +49,14 @@ def interpolator_wrap(x, v, xq, method = 'linear', axis = 0, extrapolation = Non # if method == 'previous': # raise Exception("Method error for interpolator_wrap. 'previous' not implemented") - + def calcGenLinePP(blade_struct: dict): # TODO: docstring # Calculate blade reference line piecewise polynomials # blade_struct = calcGenLinePP(blade_struct) updates the piecewise # polynomial representation of the blade's Presweep and Precurve # reference lines. This function is called by NuMAD_genline. - + # The fields PresweepRef and PrecurveRef are required in blade_struct. # Each of these fields has the following data structure: # method: 'normal' | 'shear' @@ -73,35 +76,35 @@ def calcGenLinePP(blade_struct: dict): # disabled = returns straight line # pp: piecewise polynomial data created by this function # dpp: piecewise polynomial data of reference line's derivative - + # See also NuMAD_genline, PPoly, interp1. - + # PresweepRef spline_type = blade_struct["PresweepRef"]["pptype"] PresweepRef = blade_struct["PresweepRef"]["table"] - if spline_type in ['linear','spline','pchip']: + if spline_type in ["linear", "spline", "pchip"]: if PresweepRef.shape[0] > 1: - pp = CubicSpline(PresweepRef[:,0],PresweepRef[:,1]) + pp = CubicSpline(PresweepRef[:, 0], PresweepRef[:, 1]) else: - pp = CubicSpline([0,1], [0,0]) - + pp = CubicSpline([0, 1], [0, 0]) + blade_struct["PresweepRef"]["pp"] = pp # dc = np.diag(np.arange(pp.order - 1,1+- 1,- 1),1) - + # blade_struct["PresweepRef"]["dpp"] = PPoly(pp.breaks,pp.coefs * dc) - + # PrecurveRef spline_type = blade_struct["PrecurveRef"]["pptype"] PrecurveRef = blade_struct["PrecurveRef"]["table"] - if spline_type in ['linear','spline','pchip']: + if spline_type in ["linear", "spline", "pchip"]: if PrecurveRef.shape[0] > 1: - pp = CubicSpline(PrecurveRef[0,:],PrecurveRef[1,:]) + pp = CubicSpline(PrecurveRef[0, :], PrecurveRef[1, :]) else: - pp = CubicSpline([0,1], [0,0]) - + pp = CubicSpline([0, 1], [0, 0]) + blade_struct["PrecurveRef"]["pp"] = pp # dc = np.diag(np.arange(pp.order - 1,1+- 1,- 1),1) - + # blade_struct["PrecurveRef"]["dpp"] = PPoly(pp.breaks,pp.coefs * dc) - - return blade_struct \ No newline at end of file + + return blade_struct diff --git a/src/pynumad/utils/misc_utils.py b/src/pynumad/utils/misc_utils.py index 8b2e864..192780d 100644 --- a/src/pynumad/utils/misc_utils.py +++ b/src/pynumad/utils/misc_utils.py @@ -6,24 +6,25 @@ import numpy as np -def fullKeysFromSubStrings(key_list,subtring_list): - ''' - Example Usage: + +def fullKeysFromSubStrings(key_list, subtring_list): + """ + Example Usage: subString = ['B1N3TDx'] res=getFullKeyFromSubString(df.keys(),subString) - + output: ['B1N3TDxr_[m]'] - - Example Usage: + + Example Usage: subString = ['B2','TDx'] res=getFullKeyFromSubString(df.keys(),subString) - + output: ['B2N1TDxr_[m]', 'B2N2TDxr_[m]', 'B2N3TDxr_[m]', 'B2N4TDxr_[m]', 'B2N5TDxr_[m]', 'B2N6TDxr_[m]', 'B2N7TDxr_[m]', 'B2N8TDxr_[m]', 'B2N9TDxr_[m]', 'B2TipTDxr_[m]'] - - ''' + + """ matched_keys = [] for key in key_list: if all(substring in key.lower() for substring in subtring_list): @@ -37,8 +38,8 @@ def fullKeysFromSubStrings(key_list,subtring_list): # SED-like substitution def copy_and_replace(fin, fout, replacements): - inf = open(fin, 'r') - outf = open(fout, 'w') + inf = open(fin, "r") + outf = open(fout, "w") for line in inf: for src, target in replacements.items(): line = line.replace(src, target) @@ -46,8 +47,9 @@ def copy_and_replace(fin, fout, replacements): inf.close() outf.close() + def LARCetaT(alp0): - #TODO complete docstring + # TODO complete docstring """Compute the coefficient of transverse influence required for Larc failure criteria. "In the absence of biaxial test data, ?L can be estimated from the longitudinal @@ -58,21 +60,20 @@ def LARCetaT(alp0): ---------- alp0 Material fracture angle, degrees - + Returns ------- - etaT + etaT """ num = -1 - denom = np.tan(np.deg2rad(2*alp0)) - with np.errstate(divide='ignore', invalid='ignore'): + denom = np.tan(np.deg2rad(2 * alp0)) + with np.errstate(divide="ignore", invalid="ignore"): etaT = num / denom return etaT - -def LARCetaL(SL,YC,alp0): - #TODO complete docstring +def LARCetaL(SL, YC, alp0): + # TODO complete docstring """Compute the coefficient of longitudinal influence required for Larc failure criteria. "In the absence of biaxial test data, ?L can be estimated from the longitudinal @@ -82,9 +83,9 @@ def LARCetaL(SL,YC,alp0): Parameters ---------- SL - Lateral shear strength + Lateral shear strength YC - Transverse compressive strength + Transverse compressive strength alp0 Material fracture angle, degrees Returns @@ -92,23 +93,24 @@ def LARCetaL(SL,YC,alp0): etaL """ if alp0: - num = -SL * np.cos(np.deg2rad(2*alp0)) - denom = (YC*np.cos(np.deg2rad(alp0))**2) - with np.errstate(divide='ignore', invalid='ignore'): + num = -SL * np.cos(np.deg2rad(2 * alp0)) + denom = YC * np.cos(np.deg2rad(alp0)) ** 2 + with np.errstate(divide="ignore", invalid="ignore"): etaL = num / denom else: etaL = None return etaL + def _parse_data(data): """Helper function for parsing data from blade yaml files. - + Parameters ---------- - data + data a number or list of numbers where numbers can be floats or strings e.g. 3.0 or '1.2e2' - + Returns ------- parsed_data @@ -116,10 +118,12 @@ def _parse_data(data): """ try: # detect whether data is list - data+[] - except TypeError: # case for single data point + data + [] + except TypeError: # case for single data point parsed_data = float(data) else: - parsed_data = np.array([float(val) for val in data]) # case for list of data points + parsed_data = np.array( + [float(val) for val in data] + ) # case for list of data points finally: - return parsed_data \ No newline at end of file + return parsed_data