Skip to content

Commit

Permalink
Add div layer.
Browse files Browse the repository at this point in the history
  • Loading branch information
liuliu committed Dec 15, 2023
1 parent 8e4a5a0 commit a916bbf
Show file tree
Hide file tree
Showing 3 changed files with 104 additions and 0 deletions.
59 changes: 59 additions & 0 deletions lib/nnc/ccv_cnnp_model_addons.c
Original file line number Diff line number Diff line change
Expand Up @@ -1684,6 +1684,65 @@ static ccv_cnnp_model_t* _ccv_cnnp_scalar_mul_copy(const ccv_cnnp_model_t* const
return ccv_cnnp_scalar_mul(self->a, self->super.name);
}

// MARK - Div Layer

typedef struct {
ccv_cnnp_model_t super;
ccv_nnc_tensor_symbol_t output;
int reciprocal;
} ccv_cnnp_model_div_t;

static void _ccv_cnnp_div_build(ccv_cnnp_model_t* const super, ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_tensor_symbol_t* const inputs, const int input_size, ccv_nnc_tensor_symbol_t* const outputs, const int output_size)
{
const ccv_cnnp_model_div_t* const self = (const ccv_cnnp_model_div_t*)super;
assert(output_size == 1);
ccv_nnc_tensor_param_t input_params[2];
int i;
ccv_nnc_tensor_param_t output_params;
const ccv_nnc_cmd_t div = CMD_EWDIV_FORWARD();
if (self->reciprocal)
{
assert(input_size == 1);
input_params[0] = ccv_nnc_tensor_symbol_params(graph, inputs[0]);
input_params[1] = ccv_nnc_tensor_symbol_params(graph, inputs[0]);
ccv_nnc_hint_tensor_auto(div, input_params, 2, ccv_nnc_no_hint, &output_params, 1);
outputs[0] = ccv_nnc_tensor_symbol_new(graph, output_params, 0);
ccv_nnc_graph_exec_symbol_new(graph, div, TENSOR_SYMBOL_LIST(NO_TENSOR_SYMBOL, inputs[0]), outputs, output_size, "div");
} else {
assert(input_size == 2);
for (i = 0; i < 2; i++)
input_params[i] = ccv_nnc_tensor_symbol_params(graph, inputs[i]);
ccv_nnc_hint_tensor_auto(div, input_params, input_size, ccv_nnc_no_hint, &output_params, 1);
outputs[0] = ccv_nnc_tensor_symbol_new(graph, output_params, 0);
ccv_nnc_graph_exec_symbol_new(graph, div, inputs, input_size, outputs, output_size, "div");
}
}

static ccv_cnnp_model_t* _ccv_cnnp_div_copy(const ccv_cnnp_model_t* const self, void* const context);

static const ccv_cnnp_model_vtab_t ccv_cnnp_div_isa = {
.build = _ccv_cnnp_div_build,
.copy = _ccv_cnnp_div_copy,
};

ccv_cnnp_model_t* ccv_cnnp_div(const int reciprocal, const char* const name)
{
ccv_cnnp_model_div_t* const model_div = (ccv_cnnp_model_div_t*)cccalloc(1, sizeof(ccv_cnnp_model_div_t));
model_div->super.isa = &ccv_cnnp_div_isa;
model_div->super.input_size = reciprocal ? 1 : 2;
model_div->super.outputs = &model_div->output;
model_div->super.output_size = 1;
model_div->reciprocal = reciprocal;
ccv_cnnp_model_copy_name(&model_div->super, name);
return (ccv_cnnp_model_t*)model_div;
}

static ccv_cnnp_model_t* _ccv_cnnp_div_copy(const ccv_cnnp_model_t* const super, void* const context)
{
const ccv_cnnp_model_div_t* const self = (const ccv_cnnp_model_div_t*)super;
return ccv_cnnp_div(self->reciprocal, self->super.name);
}

// MARK - Transpose Layer

typedef struct {
Expand Down
7 changes: 7 additions & 0 deletions lib/nnc/ccv_nnc.h
Original file line number Diff line number Diff line change
Expand Up @@ -4272,6 +4272,13 @@ CCV_WARN_UNUSED(ccv_cnnp_model_t*) ccv_cnnp_mul(const float p, const char* const
* @return A scalar multiplication model.
*/
CCV_WARN_UNUSED(ccv_cnnp_model_t*) ccv_cnnp_scalar_mul(const float a, const char* const name);
/**
* Divide two input tensors together.
* @param reciprocal Only take one tensor input, effectively compute 1 / input.
* @param name The unique name of the model.
* @return A model that can be applied with two inputs, and generate output that is a product of the inputs.
*/
CCV_WARN_UNUSED(ccv_cnnp_model_t*) ccv_cnnp_div(const int reciprocal, const char* const name);
/**
* A matrix transpose model.
* @param axis_a The axis to be exchanged with axis_b
Expand Down
38 changes: 38 additions & 0 deletions test/unit/nnc/dynamic.graph.tests.c
Original file line number Diff line number Diff line change
Expand Up @@ -461,6 +461,44 @@ TEST_CASE("dynamic graph to evaluate cnnp model without any parameters")
ccv_nnc_dynamic_graph_free(graph);
}

TEST_CASE("dynamic graph to evaluate cnnp model without any parameters with div")
{
ccv_nnc_dynamic_graph_t* const graph = ccv_nnc_dynamic_graph_new();
ccv_nnc_tensor_variable_t a = ccv_nnc_tensor_variable_new(graph, CPU_TENSOR_NHWC(32F, 1));
ccv_nnc_tensor_from_variable(graph, a)->data.f32[0] = 1.23;
ccv_nnc_tensor_variable_t b = ccv_nnc_tensor_variable_new(graph, CPU_TENSOR_NHWC(32F, 1));
ccv_nnc_tensor_from_variable(graph, b)->data.f32[0] = 2;
ccv_nnc_tensor_variable_t c = ccv_nnc_tensor_variable_new(graph, CPU_TENSOR_NHWC(32F, 1));
ccv_cnnp_model_t* const div = ccv_cnnp_div(0, "div");
ccv_nnc_dynamic_graph_evaluate(graph, div, 1, TENSOR_VARIABLE_LIST(a, b), TENSOR_VARIABLE_LIST(c), 0, 0);
ccv_nnc_tensor_variable_t da = ccv_nnc_tensor_variable_new(graph);
ccv_nnc_dynamic_graph_backward(graph, TENSOR_VARIABLE_LIST(c), 0, TENSOR_VARIABLE_LIST(a), TENSOR_VARIABLE_LIST(da), 0);
ccv_cnnp_model_set_minimizer(div, CMD_SGD_FORWARD(0, 0.01, 1, 0.01, 0, 0), 0, 0, 0);
ccv_nnc_dynamic_graph_apply_gradients(graph, CMD_SGD_FORWARD(0, 0.01, 1, 0.01, 0, 0), TENSOR_VARIABLE_LIST(), TENSOR_VARIABLE_LIST(), 0, 0, 0);
ccv_cnnp_model_free(div);
REQUIRE_EQ_WITH_TOLERANCE(ccv_nnc_tensor_from_variable(graph, c)->data.f32[0], 1.23 / 2, 1e-5, "should be equal");
REQUIRE_EQ_WITH_TOLERANCE(ccv_nnc_tensor_from_variable(graph, da)->data.f32[0], 1.0 / 2, 1e-5, "should be equal");
ccv_nnc_dynamic_graph_free(graph);
}

TEST_CASE("dynamic graph to evaluate cnnp model without any parameters with reciprocal")
{
ccv_nnc_dynamic_graph_t* const graph = ccv_nnc_dynamic_graph_new();
ccv_nnc_tensor_variable_t a = ccv_nnc_tensor_variable_new(graph, CPU_TENSOR_NHWC(32F, 1));
ccv_nnc_tensor_from_variable(graph, a)->data.f32[0] = 1.23;
ccv_nnc_tensor_variable_t c = ccv_nnc_tensor_variable_new(graph, CPU_TENSOR_NHWC(32F, 1));
ccv_cnnp_model_t* const div = ccv_cnnp_div(1, "div");
ccv_nnc_dynamic_graph_evaluate(graph, div, 1, TENSOR_VARIABLE_LIST(a), TENSOR_VARIABLE_LIST(c), 0, 0);
ccv_nnc_tensor_variable_t da = ccv_nnc_tensor_variable_new(graph);
ccv_nnc_dynamic_graph_backward(graph, TENSOR_VARIABLE_LIST(c), 0, TENSOR_VARIABLE_LIST(a), TENSOR_VARIABLE_LIST(da), 0);
ccv_cnnp_model_set_minimizer(div, CMD_SGD_FORWARD(0, 0.01, 1, 0.01, 0, 0), 0, 0, 0);
ccv_nnc_dynamic_graph_apply_gradients(graph, CMD_SGD_FORWARD(0, 0.01, 1, 0.01, 0, 0), TENSOR_VARIABLE_LIST(), TENSOR_VARIABLE_LIST(), 0, 0, 0);
ccv_cnnp_model_free(div);
REQUIRE_EQ_WITH_TOLERANCE(ccv_nnc_tensor_from_variable(graph, c)->data.f32[0], 1 / 1.23, 1e-5, "should be equal");
REQUIRE_EQ_WITH_TOLERANCE(ccv_nnc_tensor_from_variable(graph, da)->data.f32[0], -1 / (1.23 * 1.23), 1e-5, "should be equal");
ccv_nnc_dynamic_graph_free(graph);
}

TEST_CASE("dynamic graph to evaluate cnnp model and simply accumulate gradients")
{
ccv_nnc_dynamic_graph_t* const graph = ccv_nnc_dynamic_graph_new();
Expand Down

0 comments on commit a916bbf

Please sign in to comment.