Skip to content

Commit

Permalink
rename test_convlution to test_convolution
Browse files Browse the repository at this point in the history
  • Loading branch information
Corea authored and nihui committed Apr 24, 2018
1 parent 354f515 commit b2794ba
Show file tree
Hide file tree
Showing 3 changed files with 86 additions and 86 deletions.
4 changes: 2 additions & 2 deletions autotest/autotest.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -7,11 +7,11 @@
#include "opencv.h"
#include "platform.h"

#include "test_convlution.h"
#include "test_convolution.h"
#include "test_innerproduct.h"
#include "gtest/gtest.h"

int main(int argc, char **argv){
::testing::InitGoogleTest(&argc, argv);
return RUN_ALL_TESTS();
}
}
92 changes: 46 additions & 46 deletions autotest/test_convlution.h → autotest/test_convolution.h
Original file line number Diff line number Diff line change
@@ -1,30 +1,30 @@
#pragma once
#include "gtest/gtest.h"
#include "layer/convolution.h"
using namespace ncnn;

/*
forward - pass:
[0,1,2,3,4,
1,2,3,4,5, [1,1,1, [ 9.5, 18.5,
2,3,4,5,6, * 0.5* 1,1,1, + 0.5 =
3,4,5,6,7, 1,1,1] 18.5, 27.5]
4,5,6,7,8]
*/

TEST(convolution, forward)
{
// layer params
Convolution convolution_layer;
convolution_layer.num_output = 1;
convolution_layer.kernel_size = 3;
convolution_layer.dilation = 1;
convolution_layer.stride = 2;
convolution_layer.pad = 0;
convolution_layer.bias_term = 1;
convolution_layer.weight_data_size = 9;

// input & output
#pragma once
#include "gtest/gtest.h"
#include "layer/convolution.h"
using namespace ncnn;

/*
forward - pass:
[0,1,2,3,4,
1,2,3,4,5, [1,1,1, [ 9.5, 18.5,
2,3,4,5,6, * 0.5* 1,1,1, + 0.5 =
3,4,5,6,7, 1,1,1] 18.5, 27.5]
4,5,6,7,8]
*/

TEST(convolution, forward)
{
// layer params
Convolution convolution_layer;
convolution_layer.num_output = 1;
convolution_layer.kernel_size = 3;
convolution_layer.dilation = 1;
convolution_layer.stride = 2;
convolution_layer.pad = 0;
convolution_layer.bias_term = 1;
convolution_layer.weight_data_size = 9;

// input & output
float_t in[] = {
0.0f, 1.0f, 2.0f, 3.0f, 4.0f,
1.0f, 2.0f, 3.0f, 4.0f, 5.0f,
Expand All @@ -49,22 +49,22 @@ TEST(convolution, forward)
float_t b[] = {
0.5f
};
// forward
Mat mat_in(5, 5, 1, in);
Mat mat_out;

convolution_layer.bias_data.data = b;
convolution_layer.weight_data.data = w;
convolution_layer.forward(mat_in, mat_out);

// check expect
EXPECT_EQ(mat_out.w, 2);
EXPECT_EQ(mat_out.h, 2);
EXPECT_EQ(mat_out.c, 1);
for (int i = 0; i < _countof(expected_out); ++i)
{
EXPECT_NEAR(mat_out[i], expected_out[i], 1E-5);
}

}

// forward
Mat mat_in(5, 5, 1, in);
Mat mat_out;

convolution_layer.bias_data.data = b;
convolution_layer.weight_data.data = w;
convolution_layer.forward(mat_in, mat_out);

// check expect
EXPECT_EQ(mat_out.w, 2);
EXPECT_EQ(mat_out.h, 2);
EXPECT_EQ(mat_out.c, 1);
for (int i = 0; i < _countof(expected_out); ++i)
{
EXPECT_NEAR(mat_out[i], expected_out[i], 1E-5);
}

}
76 changes: 38 additions & 38 deletions autotest/test_innerproduct.h
Original file line number Diff line number Diff line change
@@ -1,23 +1,23 @@
#pragma once
#include "gtest/gtest.h"
#include "layer/innerproduct.h"

/*
forward - pass:
[0,1,2,3] * [1,1,1,1 + [0.5, = [6.5,
1,1,1,1] 0.5] 6.5]
*/

TEST(innerproduct, forward)
{
// layer params
InnerProduct inner_product_layer;
inner_product_layer.num_output = 2; // W
inner_product_layer.bias_term = 1; // bias
inner_product_layer.weight_data_size = 3; // W + bias


// input & output
#pragma once
#include "gtest/gtest.h"
#include "layer/innerproduct.h"

/*
forward - pass:
[0,1,2,3] * [1,1,1,1 + [0.5, = [6.5,
1,1,1,1] 0.5] 6.5]
*/

TEST(innerproduct, forward)
{
// layer params
InnerProduct inner_product_layer;
inner_product_layer.num_output = 2; // W
inner_product_layer.bias_term = 1; // bias
inner_product_layer.weight_data_size = 3; // W + bias


// input & output
float_t in[] = {
0.0f, 1.0f, 2.0f, 3.0f
};
Expand All @@ -29,27 +29,27 @@ TEST(innerproduct, forward)

// weights & bias
float_t w[] = {
1.0f, 1.0f, 1.0f, 1.0f,
1.0f, 1.0f, 1.0f, 1.0f,
1.0f, 1.0f, 1.0f, 1.0f
};

float_t b[] = {
0.5f, 0.5f
};

// forward
Mat mat_in(4, in);
Mat mat_out;

inner_product_layer.bias_data.data = b;
inner_product_layer.weight_data.data = w;
inner_product_layer.forward(mat_in, mat_out);

// check expect
EXPECT_EQ(mat_out.c, 2);
for (int i = 0; i < _countof(expected_out); ++i)
{
float output_value = *(mat_out.data + mat_out.cstep * i);
EXPECT_NEAR(output_value, expected_out[i], 1E-5);
}
}

// forward
Mat mat_in(4, in);
Mat mat_out;

inner_product_layer.bias_data.data = b;
inner_product_layer.weight_data.data = w;
inner_product_layer.forward(mat_in, mat_out);

// check expect
EXPECT_EQ(mat_out.c, 2);
for (int i = 0; i < _countof(expected_out); ++i)
{
float output_value = *(mat_out.data + mat_out.cstep * i);
EXPECT_NEAR(output_value, expected_out[i], 1E-5);
}
}

0 comments on commit b2794ba

Please sign in to comment.