Skip to content

Commit

Permalink
[Test] strided tensor multiplication unit test
Browse files Browse the repository at this point in the history
This PR includes unit tests for verifying multiply_strided() in the TensorV2 class.
Note that all unit tests are the same as tests in unittest_nntrainer_tensor.cpp file.

Self-evaluation:
1. Build test: [X]Passed [ ]Failed [ ]Skipped
2. Run test:   [X]Passed [ ]Failed [ ]Skipped

Signed-off-by: Donghyeon Jeong <[email protected]>
  • Loading branch information
djeong20 authored and jijoongmoon committed Feb 14, 2024
1 parent ebcfbb1 commit 77ade7c
Show file tree
Hide file tree
Showing 2 changed files with 305 additions and 0 deletions.
145 changes: 145 additions & 0 deletions test/unittest/unittest_nntrainer_tensor_v2.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -748,6 +748,151 @@ TEST(nntrainer_Tensor, multiply_float_01_p) {
EXPECT_EQ(result, expected);
}

TEST(nntrainer_Tensor, multiply_strided_01_p) {
int status = ML_ERROR_NONE;
int batch = 3;
int channel = 1;
int height = 3;
int width = 10;

nntrainer::TensorV2 input(batch, channel, height, width);
GEN_TEST_INPUT(input, i * (batch * height) + j * (width) + k + 1);

nntrainer::TensorV2 result = input.multiply_strided(input);

float *data = result.getData<float>();
ASSERT_NE(nullptr, data);
float *indata = input.getData<float>();
ASSERT_NE(nullptr, indata);

float *outdata = new float[(input.size())];

std::transform(indata, indata + batch * channel * height * width, indata,
outdata, std::multiplies<float>());

for (int i = 0; i < batch * height * width; ++i) {
if (data[i] != outdata[i]) {
status = ML_ERROR_RESULT_OUT_OF_RANGE;
break;
}
}

delete[] outdata;

EXPECT_EQ(status, ML_ERROR_NONE);
}

TEST(nntrainer_Tensor, multiply_strided_02_n) {
int batch = 3;
int channel = 1;
int height = 3;
int width = 10;

nntrainer::TensorV2 input(batch, channel, height, width);
GEN_TEST_INPUT(input, i * (batch * height) + j * (width) + k + 1);

nntrainer::TensorV2 test(batch - 1, height - 1, width - 1);

EXPECT_THROW({ input.multiply_strided(test); }, std::invalid_argument);
}

TEST(nntrainer_Tensor, multiply_strided_03_n) {
int batch = 3;
int channel = 1;
int height = 3;
int width = 10;

nntrainer::TensorDim dim(batch, channel, height, width);
// input is not allocated now : alloc_now == false
nntrainer::TensorV2 input(dim, false);
nntrainer::TensorV2 test(dim);
GEN_TEST_INPUT(test, i * (batch * height) + j * (width) + k + 1);

EXPECT_THROW(input.multiply_strided(test), std::invalid_argument);
}

TEST(nntrainer_Tensor, multiply_strided_04_n) {
int batch = 3;
int channel = 1;
int height = 3;
int width = 10;

nntrainer::TensorDim dim(batch, channel, height, width);

nntrainer::TensorV2 input(dim);
GEN_TEST_INPUT(input, i * (batch * height) + j * (width) + k + 1);
// test is not allocated.
nntrainer::TensorV2 test(dim, false);

EXPECT_THROW(input.multiply_strided(test), std::invalid_argument);
}

TEST(nntrainer_Tensor, multiply_strided_05_n) {
int batch = 3;
int channel = 1;
int height = 3;
int width = 10;

nntrainer::TensorDim dim(batch, channel, height, width);

nntrainer::TensorV2 input(dim);
GEN_TEST_INPUT(input, i * (batch * height) + j * (width) + k + 1);
nntrainer::TensorV2 test(dim);
GEN_TEST_INPUT(test, i * (batch * height) + j * (width) + k + 1);
// output is not allocated
nntrainer::TensorV2 output(dim, false);

EXPECT_THROW(input.multiply_strided(test, output), std::invalid_argument);
}

TEST(nntrainer_Tensor, multiply_strided_06_p) {
int status = ML_ERROR_NONE;
int batch = 3;
int channel = 1;
int height = 3;
int width = 10;

nntrainer::TensorV2 input(batch, channel, height, width);
GEN_TEST_INPUT(input, i * (batch * height) + j * (width) + k + 1);

nntrainer::TensorV2 output(batch, channel, height, width);
GEN_TEST_INPUT(output, i * (batch * height) + j * (width) + k + 1);

float *indata = input.getData<float>();
ASSERT_NE(nullptr, indata);

float *outdata_beta = new float[(input.size())];
float *indata_mul = new float[(input.size())];
float *outdata = new float[(input.size())];

std::transform(
indata, indata + batch * channel * height * width, outdata_beta,
std::bind(std::multiplies<float>(), std::placeholders::_1, 10.0));

std::transform(indata, indata + batch * channel * height * width, indata,
indata_mul, std::multiplies<float>());
std::transform(indata_mul, indata_mul + batch * channel * height * width,
outdata_beta, outdata, std::plus<float>());

input.multiply_strided(input, output, 10.0);

float *data = output.getData<float>();
ASSERT_NE(nullptr, data);

for (int i = 0; i < batch * height * width; ++i) {
if (data[i] != outdata[i]) {
status = ML_ERROR_RESULT_OUT_OF_RANGE;
break;
}
}

delete[] outdata_beta;
delete[] indata_mul;
delete[] outdata;

EXPECT_EQ(status, ML_ERROR_NONE);
}

int main(int argc, char **argv) {
int result = -1;

Expand Down
160 changes: 160 additions & 0 deletions test/unittest/unittest_nntrainer_tensor_v2_fp16.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -907,6 +907,166 @@ TEST(nntrainer_Tensor, multiply_float_01_p) {
EXPECT_EQ(result, expected);
}

TEST(nntrainer_Tensor, multiply_strided_01_p) {
int status = ML_ERROR_NONE;
int batch = 3;
int channel = 1;
int height = 3;
int width = 10;

nntrainer::TensorV2 input(
batch, channel, height, width,
{nntrainer::Tformat::NCHW, nntrainer::Tdatatype::FP16});
GEN_TEST_INPUT(input, i * (batch * height) + j * (width) + k + 1);

nntrainer::TensorV2 result = input.multiply_strided(input);

_FP16 *data = result.getData<_FP16>();
ASSERT_NE(nullptr, data);
_FP16 *indata = input.getData<_FP16>();
ASSERT_NE(nullptr, indata);

_FP16 *outdata = new _FP16[(input.size())];

std::transform(indata, indata + batch * height * width * channel, indata,
outdata, std::multiplies<_FP16>());

for (int i = 0; i < batch * height * width; ++i) {
if (data[i] != outdata[i]) {
status = ML_ERROR_RESULT_OUT_OF_RANGE;
break;
}
}

delete[] outdata;

EXPECT_EQ(status, ML_ERROR_NONE);
}

TEST(nntrainer_Tensor, multiply_strided_02_n) {
int batch = 3;
int channel = 1;
int height = 3;
int width = 10;

nntrainer::TensorV2 input(
batch, channel, height, width,
{nntrainer::Tformat::NCHW, nntrainer::Tdatatype::FP16});
GEN_TEST_INPUT(input, i * (batch * height) + j * (width) + k + 1);

nntrainer::TensorV2 test(batch - 1, height - 1, width - 1);

EXPECT_THROW({ input.multiply_strided(test); }, std::invalid_argument);
}

TEST(nntrainer_Tensor, multiply_strided_03_n) {
int batch = 3;
int channel = 1;
int height = 3;
int width = 10;

nntrainer::TensorDim dim(
batch, channel, height, width,
{nntrainer::Tformat::NCHW, nntrainer::Tdatatype::FP16});
// input is not allocated now : alloc_now == false
nntrainer::TensorV2 input(dim, false);
nntrainer::TensorV2 test(dim);
GEN_TEST_INPUT(test, i * (batch * height) + j * (width) + k + 1);

EXPECT_THROW(input.multiply_strided(test), std::invalid_argument);
}

TEST(nntrainer_Tensor, multiply_strided_04_n) {
int batch = 3;
int channel = 1;
int height = 3;
int width = 10;

nntrainer::TensorDim dim(
batch, channel, height, width,
{nntrainer::Tformat::NCHW, nntrainer::Tdatatype::FP16});

nntrainer::TensorV2 input(dim);
GEN_TEST_INPUT(input, i * (batch * height) + j * (width) + k + 1);
// test is not allocated.
nntrainer::TensorV2 test(dim, false);

EXPECT_THROW(input.multiply_strided(test), std::invalid_argument);
}

TEST(nntrainer_Tensor, multiply_strided_05_n) {
int batch = 3;
int channel = 1;
int height = 3;
int width = 10;

nntrainer::TensorDim dim(
batch, channel, height, width,
{nntrainer::Tformat::NCHW, nntrainer::Tdatatype::FP16});

nntrainer::TensorV2 input(dim);
GEN_TEST_INPUT(input, i * (batch * height) + j * (width) + k + 1);
nntrainer::TensorV2 test(dim);
GEN_TEST_INPUT(test, i * (batch * height) + j * (width) + k + 1);
// output is not allocated
nntrainer::TensorV2 output(dim, false);

EXPECT_THROW(input.multiply_strided(test, output), std::invalid_argument);
}

TEST(nntrainer_Tensor, multiply_strided_06_p) {
int status = ML_ERROR_NONE;
int batch = 3;
int channel = 1;
int height = 3;
int width = 10;

nntrainer::TensorV2 input(
batch, channel, height, width,
{nntrainer::Tformat::NCHW, nntrainer::Tdatatype::FP16});
GEN_TEST_INPUT(input, i * (batch * height) + j * (width) + k + 1);

nntrainer::TensorV2 output(
batch, channel, height, width,
{nntrainer::Tformat::NCHW, nntrainer::Tdatatype::FP16});
GEN_TEST_INPUT(output, i * (batch * height) + j * (width) + k + 1);

_FP16 *indata = input.getData<_FP16>();
ASSERT_NE(nullptr, indata);

_FP16 *outdata_beta = new _FP16[(input.size())];
_FP16 *indata_mul = new _FP16[(input.size())];
_FP16 *outdata = new _FP16[(input.size())];

std::transform(indata, indata + batch * height * width * channel,
outdata_beta,
std::bind(std::multiplies<_FP16>(), std::placeholders::_1,
static_cast<_FP16>(10.0)));

std::transform(indata, indata + batch * height * width * channel, indata,
indata_mul, std::multiplies<_FP16>());
std::transform(indata_mul, indata_mul + batch * height * width * channel,
outdata_beta, outdata, std::plus<_FP16>());

input.multiply_strided(input, output, 10.0);

_FP16 *data = output.getData<_FP16>();
ASSERT_NE(nullptr, data);

for (int i = 0; i < batch * height * width; ++i) {
if (data[i] != outdata[i]) {
status = ML_ERROR_RESULT_OUT_OF_RANGE;
break;
}
}

delete[] outdata_beta;
delete[] indata_mul;
delete[] outdata;

EXPECT_EQ(status, ML_ERROR_NONE);
}

int main(int argc, char **argv) {
int result = -1;

Expand Down

0 comments on commit 77ade7c

Please sign in to comment.