Skip to content

Commit

Permalink
[TEST] using builddir/android_build_result to build test
Browse files Browse the repository at this point in the history
This PR includes changes in Android.mk to use
builddir/android_build_result. In order to use, soft link of
android_build_reuslt dir is necessary in upper dir (../)

ln -s ../../buildir/android_build_result ../nntrainer

Resolves:

**Self evaluation:**
1. Build test:	 [X]Passed [ ]Failed [ ]Skipped
2. Run test:	 [X]Passed [ ]Failed [ ]Skipped

Signed-off-by: jijoong.moon <[email protected]>
  • Loading branch information
jijoongmoon committed Nov 9, 2024
1 parent 9fb25a2 commit 659c73c
Show file tree
Hide file tree
Showing 11 changed files with 54 additions and 40 deletions.
1 change: 1 addition & 0 deletions debian/nntrainer-dev.install
Original file line number Diff line number Diff line change
Expand Up @@ -26,6 +26,7 @@
/usr/include/nntrainer/layer_devel.h
/usr/include/nntrainer/layer_impl.h
/usr/include/nntrainer/acti_func.h
/usr/include/nntrainer/loss_layer.h
# custom layer kits
/usr/include/nntrainer/app_context.h
# logger
Expand Down
12 changes: 6 additions & 6 deletions nntrainer/graph/network_graph.h
Original file line number Diff line number Diff line change
Expand Up @@ -327,19 +327,19 @@ class NetworkGraph {
* @param lnode layer node to finalize and set run context
* @param prev_inputs previous input information
*/
std::vector<Var_Grad *> finalizeContext(
const std::shared_ptr<LayerNode> &lnode,
const std::vector<Var_Grad *> &prev_inputs);
std::vector<Var_Grad *>
finalizeContext(const std::shared_ptr<LayerNode> &lnode,
const std::vector<Var_Grad *> &prev_inputs);

/**
* @brief Recreate run layer context from the given init layer context
*
* @param lnode layer node to finalize and set run context
* @param prev_inputs previous input information
*/
std::vector<Var_Grad *> refinalizeContext(
const std::shared_ptr<LayerNode> &lnode,
const std::vector<Var_Grad *> &prev_inputs);
std::vector<Var_Grad *>
refinalizeContext(const std::shared_ptr<LayerNode> &lnode,
const std::vector<Var_Grad *> &prev_inputs);

/** Interface for manager */

Expand Down
4 changes: 2 additions & 2 deletions nntrainer/layers/layer_node.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -817,8 +817,8 @@ void LayerNode::forwarding(bool training) {
if (executeInPlace() == InPlace::NONE) {
for (unsigned int i = 0; i < run_context->getNumOutputs(); ++i) {
run_context->getOutput(i).setValue(0);
if(!run_context->getOutputGradUnsafe(i).isValid())
run_context->getOutputGradUnsafe(i).setValue(0);
if (!run_context->getOutputGradUnsafe(i).isValid())
run_context->getOutputGradUnsafe(i).setValue(0);
}
for (unsigned int i = 0; i < run_context->getNumWeights(); ++i) {
if (run_context->weightHasGradient(i)) {
Expand Down
4 changes: 2 additions & 2 deletions nntrainer/layers/loss/loss_layer.h
Original file line number Diff line number Diff line change
Expand Up @@ -47,7 +47,7 @@ class LossLayer : public Layer {
*/
virtual bool supportBackwarding() const override { return true; }

bool supportInPlace() const override {return is_inplace;}
bool supportInPlace() const override { return is_inplace; }

/**
* @copydoc Layer::requireLabel()
Expand All @@ -72,7 +72,7 @@ class LossLayer : public Layer {
Tensor
l; /**< loss tensor to store intermediate value to calculate loss value */

bool is_inplace;
bool is_inplace;
};

} // namespace nntrainer
Expand Down
1 change: 0 additions & 1 deletion nntrainer/layers/loss/mse_loss_layer.h
Original file line number Diff line number Diff line change
Expand Up @@ -51,7 +51,6 @@ class MSELossLayer : public LossLayer {
const std::string getType() const override { return MSELossLayer::type; };

inline static const std::string type = "mse";

};
} // namespace nntrainer

Expand Down
2 changes: 1 addition & 1 deletion nntrainer/layers/lstm.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -58,7 +58,7 @@ void LSTMLayer::forwardingBatchFirstLSTM(
TensorDim input_tensor_dim({feature_size}, tensor_type);
TensorDim unit_tensor_dim({unit}, tensor_type);
TensorDim num_gate_unit_tensor_dim({NUM_GATE * unit}, tensor_type);

for (unsigned int batch = 0; batch < batch_size; ++batch) {
const Tensor input_sample = input_.getBatchSlice(batch, 1);
Tensor hidden_state_sample = hidden_state_.getBatchSlice(batch, 1);
Expand Down
8 changes: 5 additions & 3 deletions nntrainer/tensor/weight.h
Original file line number Diff line number Diff line change
Expand Up @@ -288,9 +288,11 @@ class Weight : public Var_Grad {
/**
* @brief Apply the gradient to the weight
*/
void applyGradient(double lr) { var->add_i(*grad.get(), -lr);
std::cout << var->getName() << " --------------------------" <<std::endl;
var->print(std::cout);}
void applyGradient(double lr) {
var->add_i(*grad.get(), -lr);
std::cout << var->getName() << " --------------------------" << std::endl;
var->print(std::cout);
}

/**
* @brief Apply the gradient to the weight with updated gradient
Expand Down
5 changes: 3 additions & 2 deletions nntrainer/utils/base_properties.h
Original file line number Diff line number Diff line change
Expand Up @@ -720,7 +720,8 @@ class TensorFormat final : public EnumProperty<TensorFormatInfo> {
};

// /**
// * @brief trainable property, use this to set and check how if certain layer is
// * @brief trainable property, use this to set and check how if certain layer
// is
// * trainable
// *
// */
Expand All @@ -734,7 +735,7 @@ class TensorFormat final : public EnumProperty<TensorFormatInfo> {
// static constexpr const char *key = "trainable";
// using prop_tag = bool_prop_tag;
// };

} // namespace props

} // namespace nntrainer
Expand Down
4 changes: 1 addition & 3 deletions packaging/nntrainer.spec
Original file line number Diff line number Diff line change
Expand Up @@ -573,9 +573,7 @@ cp -r result %{buildroot}%{_datadir}/nntrainer/unittest/
%{_includedir}/nntrainer/util_func.h
%{_includedir}/nntrainer/fp16.h
%{_includedir}/nntrainer/util_simd.h
# In the current version, Neon SIMD is enabled only when FP16 is enabled with AArch64.
# This may be subject to change in future versions.
%ifarch aarch64
%{_includedir}/nntrainer/loss_layer.h
%if 0%{?enable_fp16}
%{_includedir}/nntrainer/util_simd_neon.h
%{_includedir}/nntrainer/blas_neon.h
Expand Down
8 changes: 8 additions & 0 deletions test/jni/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -13,3 +13,11 @@ please do
#cp ${ANDROIND_SDK_HOME}/Sdk/ndk/${NDK_VERSION}/sources/third_party/googletest .
```

and to use android builddir/android_build_result, do
``` bash
#ln -s ../../builddir/android_build_result ../nntrainer
```



45 changes: 25 additions & 20 deletions test/unittest/jni/Android.mk
Original file line number Diff line number Diff line change
Expand Up @@ -7,33 +7,19 @@ ifndef ANDROID_NDK
$(error ANDROID_NDK is not defined!)
endif

ifndef NNTRAINER_ROOT
NNTRAINER_ROOT := $(LOCAL_PATH)/../../..
endif
NNTRAINER_ROOT := ../nntrainer

ML_API_COMMON_INCLUDES := ${NNTRAINER_ROOT}/ml_api_common/include
NNTRAINER_INCLUDES := $(NNTRAINER_ROOT)/nntrainer \
$(NNTRAINER_ROOT)/nntrainer/dataset \
$(NNTRAINER_ROOT)/nntrainer/models \
$(NNTRAINER_ROOT)/nntrainer/layers \
$(NNTRAINER_ROOT)/nntrainer/compiler \
$(NNTRAINER_ROOT)/nntrainer/graph \
$(NNTRAINER_ROOT)/nntrainer/optimizers \
$(NNTRAINER_ROOT)/nntrainer/tensor \
$(NNTRAINER_ROOT)/nntrainer/utils \
$(NNTRAINER_ROOT)/api \
$(NNTRAINER_ROOT)/api/ccapi/include \
${ML_API_COMMON_INCLUDES}
NNTRAINER_INCLUDES := $(NNTRAINER_ROOT)/include/

LOCAL_MODULE := nntrainer
LOCAL_SRC_FILES := $(NNTRAINER_ROOT)/libs/$(TARGET_ARCH_ABI)/libnntrainer.so
LOCAL_SRC_FILES := $(NNTRAINER_ROOT)/lib/$(TARGET_ARCH_ABI)/libnntrainer.so
LOCAL_EXPORT_C_INCLUDES := $(NNTRAINER_INCLUDES)

include $(PREBUILT_SHARED_LIBRARY)

include $(CLEAR_VARS)

LOCAL_MODULE := ccapi-nntrainer
LOCAL_SRC_FILES := $(NNTRAINER_ROOT)/libs/$(TARGET_ARCH_ABI)/libccapi-nntrainer.so
LOCAL_SRC_FILES := $(NNTRAINER_ROOT)/lib/$(TARGET_ARCH_ABI)/libccapi-nntrainer.so
LOCAL_EXPORT_C_INCLUDES := $(NNTRAINER_INCLUDES) $(NNTRAINER_INCLUDES)/nntrainer

include $(PREBUILT_SHARED_LIBRARY)

Expand Down Expand Up @@ -67,3 +53,22 @@ LOCAL_C_INCLUDES += $(NNTRAINER_INCLUDES)
LOCAL_SHARED_LIBRARIES := nntrainer ccapi-nntrainer
LOCAL_STATIC_LIBRARIES := googletest_main
include $(BUILD_EXECUTABLE)

include $(CLEAR_VARS)

LOCAL_MODULE := tensor_unittest
LOCAL_CFLAGS := -Itests/googletest/include -Itests -pthread -fexceptions -fopenmp -static-openmp -DMIN_CPP_VERSION=201703L -DNNTR_NUM_THREADS=1 -D__LOGGING__=1 -DENABLE_TEST=1 -DREDUCE_TOLERANCE=1 -march=armv8.2-a+fp16 -mfpu=neon-fp16 -mfloat-abi=softfp -O3 -frtti -DENABLE_FP16=1
LOCAL_CXXFLAGS += -std=c++17 -frtti -fexceptions
LOCAL_LDLIBS := -llog -landroid -fopenmp -static-openmp

LOCAL_SRC_FILES := \
tests/unittest_nntrainer_tensor_neon_fp16.cpp \
# tests/unittest_nntrainer_tensor_fp16.cpp \
tests/nntrainer_test_util.cpp

LOCAL_C_INCLUDES += $(NNTRAINER_INCLUDES)

LOCAL_SHARED_LIBRARIES := nntrainer ccapi-nntrainer
LOCAL_STATIC_LIBRARIES := googletest_main
include $(BUILD_EXECUTABLE)

0 comments on commit 659c73c

Please sign in to comment.