Skip to content

Commit

Permalink
Fix quadratic complexity when building a ffnn model.
Browse files Browse the repository at this point in the history
  • Loading branch information
bluescarni committed Nov 10, 2023
1 parent e63aae3 commit 9016ef8
Showing 1 changed file with 12 additions and 4 deletions.
16 changes: 12 additions & 4 deletions src/model/ffnn.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,7 @@

#include <heyoka/config.hpp>
#include <heyoka/expression.hpp>
#include <heyoka/math/sum.hpp>
#include <heyoka/model/ffnn.hpp>

HEYOKA_BEGIN_NAMESPACE
Expand All @@ -43,21 +44,28 @@ std::vector<expression> compute_layer(su32 layer_id, const std::vector<expressio
auto n_neurons_prev_layer = su32(inputs.size());
auto n_neurons_curr_layer = n_neurons[layer_id];

std::vector<expression> retval(static_cast<std::vector<expression>::size_type>(n_neurons_curr_layer), 0_dbl);
std::vector<expression> retval, tmp_sum;
retval.reserve(n_neurons_curr_layer);

for (su32 i = 0; i < n_neurons_curr_layer; ++i) {
// Clear the summation terms.
tmp_sum.clear();

for (su32 j = 0; j < n_neurons_prev_layer; ++j) {

// Add the weight and update the weight counter.
retval[i] += nn_wb[wcounter] * inputs[j];
tmp_sum.push_back(nn_wb[wcounter] * inputs[j]);
++wcounter;
}

// Add the bias and update the counter.
retval[i] += nn_wb[bcounter + n_net_w];
tmp_sum.push_back(nn_wb[bcounter + n_net_w]);
++bcounter;

// Activation function.
retval[i] = activation(retval[i]);
retval.push_back(activation(sum(tmp_sum)));
}

return retval;
}

Expand Down

0 comments on commit 9016ef8

Please sign in to comment.