Skip to content

Commit

Permalink
Merge pull request #5 from PML-UCF/develop
Browse files Browse the repository at this point in the history
Release version 0.0.2
  • Loading branch information
felipeacviana authored Jul 29, 2019
2 parents ec103d7 + 0488a82 commit a5e0b5f
Show file tree
Hide file tree
Showing 25 changed files with 469,448 additions and 139 deletions.
1 change: 1 addition & 0 deletions pinn/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -46,3 +46,4 @@
"""

from . import layers
__version__ = '0.0.2'
9 changes: 9 additions & 0 deletions pinn/layers/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -45,13 +45,22 @@
""" PINN layers
"""

from .util import interpolate

from .core import getScalingDenseLayer
from .core import inputsSelection

from .core import TableInterpolation

from .physics import StressIntensityRange
from .physics import ParisLaw
from .physics import SNCurve

from .physics import WalkerModel

from .rnn import CumulativeDamageCell

del physics
del rnn
del core
del util
105 changes: 95 additions & 10 deletions pinn/layers/core.py
Original file line number Diff line number Diff line change
Expand Up @@ -41,27 +41,112 @@
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# ==============================================================================

""" Core PINN layers
"""
from tensorflow.python.keras.layers import Dense

from tensorflow.python.keras import initializers
from tensorflow.python.keras import regularizers
from tensorflow.python.keras import constraints

from tensorflow.keras.layers import Dense
from tensorflow.python.keras.engine.base_layer import Layer

from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import common_shapes
from tensorflow.python.framework import ops

from tensorflow.linalg import diag as tfDiag
from tensorflow.math import reciprocal
from tensorflow.python.ops import array_ops

from pinn.layers import interpolate

from tensorflow import shape, expand_dims, constant, cast

def getScalingDenseLayer(input_location, input_scale, dtype):
input_location = ops.convert_to_tensor(input_location, dtype=dtype)
input_scale = ops.convert_to_tensor(input_scale, dtype=dtype)
recip_input_scale = reciprocal(input_scale)
import numpy as np


def getScalingDenseLayer(input_location, input_scale):
recip_input_scale = np.reciprocal(input_scale)

waux = tfDiag(recip_input_scale)
waux = np.diag(recip_input_scale)
baux = -input_location*recip_input_scale

dL = Dense(input_location.get_shape()[0], activation = None, input_shape = input_location.shape)
dL = Dense(input_location.shape[0], activation = None, input_shape = input_location.shape)
dL.build(input_shape = input_location.shape)
dL.set_weights([waux, baux])
dL.trainable = False
return dL


def inputsSelection(inputs_shape, ndex):
if not hasattr(ndex,'index'):
ndex = list(ndex)
input_mask = np.zeros([inputs_shape[-1], len(ndex)])
for i in range(inputs_shape[-1]):
for v in ndex:
if i == v:
input_mask[i,ndex.index(v)] = 1

dL = Dense(len(ndex), activation = None, input_shape = inputs_shape,
use_bias = False)
dL.build(input_shape = inputs_shape)
dL.set_weights([input_mask])
dL.trainable = False
return dL


class TableInterpolation(Layer):
""" Table lookup and interpolation implementation.
Interrogates provided query points using provided table and outputs the interpolation result.
Remarks on this class:
- Only supports 2-D tables (f(x1,x2) = y)
- If a 1-D table is to be used, it needs to be converted to a 2-D table (see file /samples/core/table_lookup/run01_table_lookup_sample.py)
- Extrapolation is not supported (provide a table grid large enough for your case)
- Class returns limit values in case of extrapolation attempt.
- Provided tables should be equally spaced.
"""
def __init__(self,
kernel_initializer = 'glorot_uniform',
kernel_regularizer=None,
kernel_constraint=None,
table_shape=(1,4,4,1),
**kwargs):
if 'input_shape' not in kwargs and 'input_dim' in kwargs:
kwargs['input_shape'] = (kwargs.pop('input_dim'),)
super(TableInterpolation, self).__init__(**kwargs)
self.kernel_initializer = initializers.get(kernel_initializer)
self.kernel_regularizer = regularizers.get(kernel_regularizer)
self.kernel_constraint = constraints.get(kernel_constraint)

self.table_shape = table_shape

def build(self, input_shape, **kwargs):
self.grid = self.add_weight("grid",
shape = self.table_shape,
initializer = self.kernel_initializer,
dtype = self.dtype,
trainable = self.trainable,
**kwargs)
self.bounds = self.add_weight("bounds",
shape = [2,2],
initializer = self.kernel_initializer,
dtype = self.dtype,
trainable = self.trainable,
**kwargs)
self.built = True

def call(self, inputs):
self.grid = ops.convert_to_tensor(self.grid, dtype=self.dtype)
self.bounds = ops.convert_to_tensor(self.bounds,dtype=self.dtype)
inputs = ops.convert_to_tensor(inputs, dtype=self.dtype)
queryPoints_ind = ((cast(shape(self.grid)[1:3], dtype=self.dtype))-constant(1.0))*(inputs-self.bounds[0])/(self.bounds[1]-self.bounds[0])
if common_shapes.rank(inputs) == 2:
queryPoints_ind = expand_dims(queryPoints_ind,0)
output = interpolate(self.grid, queryPoints_ind)
if common_shapes.rank(inputs) == 2:
output = array_ops.reshape(output,(array_ops.shape(output)[1],) + (array_ops.shape(output)[2],))
return output

def compute_output_shape(self, input_shape):
aux_shape = tensor_shape.TensorShape((None,1))

return aux_shape[:-1].concatenate(1)
125 changes: 116 additions & 9 deletions pinn/layers/physics.py
Original file line number Diff line number Diff line change
Expand Up @@ -49,12 +49,18 @@

from tensorflow.python.keras.engine.base_layer import Layer

#TODO: addept to tf2
from tensorflow.compat.v1 import placeholder

from tensorflow.python.ops import gen_math_ops, array_ops

from tensorflow import reshape

from tensorflow.python.keras import initializers
from tensorflow.python.keras import regularizers
from tensorflow.python.keras import constraints

from tensorflow.python.framework import ops
from tensorflow.python.ops import gen_math_ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import common_shapes

Expand Down Expand Up @@ -83,9 +89,7 @@ def __init__(self,

def build(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape)
if input_shape[-1].value is None:
raise ValueError('The last dimension of the inputs to `StressIntensityRange` '
'should be defined. Found `None`.')

self.kernel = self.add_weight("kernel",
shape = [1],
initializer = self.kernel_initializer,
Expand All @@ -96,17 +100,21 @@ def build(self, input_shape):
self.built = True

def call(self, inputs):
inputs = ops.convert_to_tensor(inputs, dtype=self.dtype)
if common_shapes.rank(inputs) is not 2:
inputs = ops.convert_to_tensor(inputs, dtype=self.dtype)
if common_shapes.rank(inputs) is not 2:
raise ValueError('`StressIntensityRange` only takes "rank 2" inputs.')
output = self.kernel*inputs[:,1]*gen_math_ops.sqrt(np.pi*inputs[:,0])

output = gen_math_ops.mul(self.kernel*inputs[:,1], gen_math_ops.sqrt(np.pi*inputs[:, 0]))
output = array_ops.reshape(output, (array_ops.shape(output)[0], 1))

# outputs should be (None, 1), so it is still rank = 2
return output

def compute_output_shape(self, input_shape):
aux_shape = tensor_shape.TensorShape((None,1))
return aux_shape[:-1].concatenate(1)


class ParisLaw(Layer):
"""Just your regular Paris law implementation.
`ParisLaw` implements the operation:
Expand Down Expand Up @@ -145,4 +153,103 @@ def call(self, inputs):
def compute_output_shape(self, input_shape):
aux_shape = tensor_shape.TensorShape((None,1))
return aux_shape[:-1].concatenate(1)



class SNCurve(Layer):
""" SN-Curve implementation (REF: https://en.wikipedia.org/wiki/Fatigue_(material)#Stress-cycle_(S-N)_curve)
`output = 1/10**(a*inputs+b)`
where:
* `a`,`b` parametric constants for linear curve,
* input is cyclic stress, load, or temperature (depends on the application) in log10 space,
* output is delta damage
Notes:
* This layer represents SN-Curve linearized in log10-log10 space
* (a*inputs+b) expression gives number of cycles in log10 space corresponding to stress level
Linearization:
* For an SN-Curve with an equation of N = C1*(S**C2) , take log10 of both sides
* log10(N) = log10(C1) + C2*log10(S), yields to:
C2 = a
log10(C1) = b
log10(S) = inputs
"""
def __init__(self,
kernel_initializer = 'glorot_uniform',
kernel_regularizer=None,
kernel_constraint=None,
**kwargs):
if 'input_shape' not in kwargs and 'input_dim' in kwargs:
kwargs['input_shape'] = (kwargs.pop('input_dim'),)
super(SNCurve, self).__init__(**kwargs)
self.kernel_initializer = initializers.get(kernel_initializer)
self.kernel_regularizer = regularizers.get(kernel_regularizer)
self.kernel_constraint = constraints.get(kernel_constraint)

def build(self, input_shape, **kwargs):
self.kernel = self.add_weight("kernel",
shape = [2],
initializer = self.kernel_initializer,
dtype = self.dtype,
trainable = self.trainable,
**kwargs)
self.built = True

def call(self, inputs):
output = 1/10**(self.kernel[0]*inputs+self.kernel[1])
return output

def compute_output_shape(self, input_shape):
aux_shape = tensor_shape.TensorShape((None,1))
return aux_shape[:-1].concatenate(1)


class WalkerModel(Layer):
"""A modified version of Paris law to take into account the stress ratio effect.
`WalkerModel` implements the operation:
`output = C*(inputs[:,0]**m)`
where `C` and `m` are constants, and `C` is obtained from the following
relation:
`C = Co/((1-inputs[:,1])**(m*(1-gamma))))`
* input[:,0] is the nominal stress range
* input[:,1] is the stress ratio, and
* sig is a custumized sigmoid function to calibrate Walker's coefficient (gamma)
with respect to the stress ratio value.
"""
def __init__(self,
kernel_initializer = 'glorot_uniform',
kernel_regularizer=None,
kernel_constraint=None,
**kwargs):
if 'input_shape' not in kwargs and 'input_dim' in kwargs:
kwargs['input_shape'] = (kwargs.pop('input_dim'),)
super(WalkerModel, self).__init__(**kwargs)
self.kernel_initializer = initializers.get(kernel_initializer)
self.kernel_regularizer = regularizers.get(kernel_regularizer)
self.kernel_constraint = constraints.get(kernel_constraint)

def build(self, input_shape, **kwargs):
self.kernel = self.add_weight("kernel",
shape = [4],
initializer = self.kernel_initializer,
dtype = self.dtype,
trainable = True,
**kwargs)
self.built = True

def call(self, inputs):
inputs = ops.convert_to_tensor(inputs, dtype=self.dtype)
if common_shapes.rank(inputs) is not 2:
raise ValueError('`WalkerModel` only takes "rank 2" inputs.')

sig = 1/(1+gen_math_ops.exp(self.kernel[0]*inputs[:,1]))
gamma = sig*self.kernel[1]
C = self.kernel[2]/((1-inputs[:,1])**(self.kernel[3]*(1-gamma)))
output = C*(inputs[:,0]**self.kernel[3])
output = array_ops.reshape(output,(array_ops.shape(output)[0],1))
return output

def compute_output_shape(self, input_shape):
aux_shape = tensor_shape.TensorShape((None,1))
return aux_shape[:-1].concatenate(1)

2 changes: 1 addition & 1 deletion pinn/layers/rnn.py
Original file line number Diff line number Diff line change
Expand Up @@ -86,7 +86,7 @@ def get_initial_state(self, inputs=None, batch_size=None, dtype=None):
if self.initial_damage is None:
initial_state = _generate_zero_filled_state_for_cell(self, inputs, batch_size, dtype)
else:
initial_state = self.initial_damage
initial_state = ops.convert_to_tensor(self.initial_damage, dtype=self.dtype)

return initial_state

Expand Down
Loading

0 comments on commit a5e0b5f

Please sign in to comment.