Skip to content

Commit

Permalink
Merge pull request #1088 from liuchangshiye/model-selection-psql-msmlp
Browse files Browse the repository at this point in the history
Add the SumErrorLayer implementation
  • Loading branch information
nudles authored Sep 7, 2023
2 parents ec2d81c + 2cf2a5b commit e6cb95f
Showing 1 changed file with 16 additions and 9 deletions.
25 changes: 16 additions & 9 deletions examples/model_selection_psql/msmlp/model.py
Original file line number Diff line number Diff line change
Expand Up @@ -32,9 +32,7 @@

singa_dtype = {"float16": tensor.float16, "float32": tensor.float32}

#### self-defined loss begin

### reference from autograd.py
### refer to autograd.py
class SumError(Operator):

def __init__(self):
Expand All @@ -52,7 +50,7 @@ def forward(self, x):
# self.n *= s
# loss /= self.n
return loss

def backward(self, dy=1.0):
# dx = self.err
dev = device.get_default_device()
Expand All @@ -62,12 +60,21 @@ def backward(self, dy=1.0):
dx *= dy
return dx

### called in the MSMLP class for sum error loss gradients
def se_loss(x):
# assert x.shape == t.shape, "input and target shape different: %s, %s" % (
# x.shape, t.shape)
return SumError()(x)[0]

### refer to layer.py
class SumErrorLayer(Layer):
"""
Generate a SumError Layer
"""

def __init__(self):
super(SumErrorLayer, self).__init__()

def forward(self, x):
return se_loss(x)

class MSMLP(model.Model):

def __init__(self, data_size=10, perceptron_size=100, num_classes=10):
Expand All @@ -79,8 +86,8 @@ def __init__(self, data_size=10, perceptron_size=100, num_classes=10):
self.linear1 = layer.Linear(perceptron_size)
self.linear2 = layer.Linear(num_classes)
self.softmax_cross_entropy = layer.SoftMaxCrossEntropy()
self.sum_error = SumErrorLayer()

self.sum_error = SumErrorLayer() # for synflow backward
def forward(self, inputs):
y = self.linear1(inputs)
y = self.relu(y)
Expand Down

0 comments on commit e6cb95f

Please sign in to comment.