diff --git a/test/single_layer/fixture.hpp b/test/single_layer/fixture.hpp new file mode 100644 index 00000000..8413cddc --- /dev/null +++ b/test/single_layer/fixture.hpp @@ -0,0 +1,160 @@ +#pragma once +#include + +#include +#include +#include +#include + +#include "layers/Layer.hpp" + +using namespace it_lab_ai; + +class BaseTestFixture : public ::testing::Test { + public: + void SetUp() override { + defaultOptions.backend = Backend::kNaive; + defaultOptions.parallel = false; + defaultOptions.par_backend = ParBackend::kSeq; + } + + static RuntimeOptions setTBBOptions() { + RuntimeOptions options; + options.backend = Backend::kNaive; + options.parallel = true; + options.par_backend = ParBackend::kTbb; + return options; + } + + static RuntimeOptions setSeqOptions() { + RuntimeOptions options; + options.backend = Backend::kNaive; + options.parallel = true; + options.par_backend = ParBackend::kSeq; + return options; + } + + static RuntimeOptions setSTLOptions() { + RuntimeOptions options; + options.backend = Backend::kNaive; + options.parallel = true; + options.par_backend = ParBackend::kThreads; + return options; + } + + static RuntimeOptions setKokkosOptions() { + RuntimeOptions options; + options.backend = Backend::kNaive; + options.parallel = true; + options.par_backend = ParBackend::kKokkos; + return options; + } + + static RuntimeOptions setOmpOptions() { + RuntimeOptions options; + options.backend = Backend::kNaive; + options.parallel = true; + options.par_backend = ParBackend::kOmp; + return options; + } + + static RuntimeOptions createOptionsWithBackend(ParBackend backend) { + RuntimeOptions options; + options.backend = Backend::kNaive; + options.parallel = (backend != ParBackend::kSeq); + options.par_backend = backend; + return options; + } + + static std::vector basic1DData() { + return {9.0f, 8.0f, 7.0f, 6.0f, 5.0f, 4.0f, 3.0f, 2.0f}; + } + + static Shape basic1DShape() { return {8}; } + + static std::vector basic2DData4x4() { + return {9.0f, 8.0f, 7.0f, 6.0f, 5.0f, 4.0f, 3.0f, 2.0f, + 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f}; + } + + static Shape basic2DShape4x4() { return {4, 4}; } + + static std::vector basic2DData3x3() { + return {9.0f, 8.0f, 7.0f, 5.0f, 4.0f, 3.0f, 2.0f, 3.0f, 4.0f}; + } + + static Shape basic2DShape3x3() { return {3, 3}; } + + static std::vector activationTestData() { + return {-3.0f, -2.0f, -1.0f, 0.0f, 1.0f, 2.0f, 3.0f}; + } + + static std::vector reluExpected() { + return {0.0f, 0.0f, 0.0f, 0.0f, 1.0f, 2.0f, 3.0f}; + } + + static std::vector sigmoidExpected() { + return {0.0474f, 0.1192f, 0.2689f, 0.5f, 0.7311f, 0.8808f, 0.9526f}; + } + + static std::vector get1DAverageExpected() { + return {8.0f, 6.0f, 4.0f}; + } + + static std::vector get2DAverageStride1Expected() { + return {6.5f, 5.5f, 4.5f, 3.5f, 3.5f, 3.5f, 4.5f, 5.5f, 6.5f}; + } + + static std::vector ascending1DData() { + return {1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f, 10.0f}; + } + + static Shape ascending1DShape() { return {10}; } + + static std::vector descending1DData() { + return {10.0f, 9.0f, 8.0f, 7.0f, 6.0f, 5.0f, 4.0f, 3.0f, 2.0f, 1.0f}; + } + + static std::vector mixed1DData() { + return {-5.0f, -3.0f, 0.0f, 2.0f, 4.0f, -1.0f, 3.0f, 1.0f, -2.0f, 5.0f}; + } + + static std::vector small2DData2x2() { + return {1.0f, 2.0f, 3.0f, 4.0f}; + } + + static Shape small2DShape2x2() { return {2, 2}; } + + static std::vector medium2DData5x5() { + return {1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f, + 10.0f, 11.0f, 12.0f, 13.0f, 14.0f, 15.0f, 16.0f, 17.0f, 18.0f, + 19.0f, 20.0f, 21.0f, 22.0f, 23.0f, 24.0f, 25.0f}; + } + + static Shape medium2DShape5x5() { return {5, 5}; } + + static std::vector zero2DData3x3() { + return {0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f}; + } + + static Shape zero2DShape3x3() { return {3, 3}; } + + static std::vector constant2DData4x4(float value = 5.0f) { + return std::vector(16, value); + } + + static Shape constant2DShape4x4() { return {4, 4}; } + + template + static void expectVectorsNear(const std::vector& actual, + const std::vector& expected, + T tolerance = static_cast(1e-5)) { + ASSERT_EQ(actual.size(), expected.size()); + for (size_t i = 0; i < actual.size(); ++i) { + EXPECT_NEAR(actual[i], expected[i], tolerance) << "at index " << i; + } + } + + protected: + RuntimeOptions defaultOptions; +}; diff --git a/test/single_layer/test_convlayer.cpp b/test/single_layer/test_convlayer.cpp index 0ffbd6fa..717d3fea 100644 --- a/test/single_layer/test_convlayer.cpp +++ b/test/single_layer/test_convlayer.cpp @@ -1,9 +1,12 @@ #include +#include "fixture.hpp" #include "layers/ConvLayer.hpp" using namespace it_lab_ai; +class ConvTestFixture : public BaseTestFixture {}; + TEST(ConvolutionalLayerTest, IncompatibleInput) { int step = 2; std::vector kernelvec = {1, 0, 1, 0, 1, 0, 1, 0, 1}; @@ -585,11 +588,8 @@ TEST(ConvolutionalLayerTest, DepthwiseViaConvolutionalLayer) { } } -TEST(ConvolutionalLayerTest, Conv4DSTLViaConvolutionalLayer) { - RuntimeOptions options; - options.backend = Backend::kNaive; - options.parallel = true; - options.par_backend = ParBackend::kThreads; +TEST_F(ConvTestFixture, Conv4DSTLViaConvolutionalLayer) { + auto options = setSTLOptions(); std::vector image(48, 1.0f); Shape input_shape({1, 3, 4, 4}); @@ -1043,12 +1043,7 @@ TEST(ConvolutionalLayerTest, Float4DKernelWorking) { ASSERT_EQ(result.size(), 4); } -TEST(ConvolutionalLayerTest, Conv4DWithParallelNoneBackend) { - RuntimeOptions options; - options.backend = Backend::kNaive; - options.parallel = true; - options.par_backend = ParBackend::kSeq; - +TEST_F(ConvTestFixture, Conv4DWithParallelNoneBackend) { std::vector image(48, 1.0f); Shape input_shape({1, 3, 4, 4}); Tensor input = make_tensor(image, input_shape); @@ -1064,7 +1059,7 @@ TEST(ConvolutionalLayerTest, Conv4DWithParallelNoneBackend) { ConvolutionalLayer layer(1, 0, 1, kernel, Tensor()); std::vector in{input}; std::vector out{output}; - layer.run(in, out, options); + layer.run(in, out, defaultOptions); std::vector result = *out[0].as(); @@ -1104,11 +1099,8 @@ TEST(ConvolutionalLayerTest, Conv4DWithParallelDefaultFallback) { } } -TEST(ConvolutionalLayerTest, Conv4DWithoutParallelFlag) { - RuntimeOptions options; - options.backend = Backend::kNaive; - options.parallel = false; - options.par_backend = ParBackend::kThreads; +TEST_F(ConvTestFixture, Conv4DWithoutParallelFlag) { + auto options = setSTLOptions(); std::vector image(48, 1.0f); Shape input_shape({1, 3, 4, 4}); @@ -1135,12 +1127,7 @@ TEST(ConvolutionalLayerTest, Conv4DWithoutParallelFlag) { } } -TEST(ConvolutionalLayerTest, Conv4DLegacyFloatWithParallelNone) { - RuntimeOptions options; - options.backend = Backend::kNaive; - options.parallel = true; - options.par_backend = ParBackend::kSeq; - +TEST_F(ConvTestFixture, Conv4DLegacyFloatWithParallelNone) { std::vector image(48, 1.0f); Shape input_shape({1, 3, 4, 4}); Tensor input = make_tensor(image, input_shape); @@ -1162,7 +1149,7 @@ TEST(ConvolutionalLayerTest, Conv4DLegacyFloatWithParallelNone) { Tensor output = make_tensor(output_vec, output_shape); std::vector out{output}; - layer.run(in, out, options); + layer.run(in, out, defaultOptions); std::vector result = *out[0].as(); @@ -1175,3 +1162,130 @@ TEST(ConvolutionalLayerTest, Conv4DLegacyFloatWithParallelNone) { ASSERT_NEAR(result[4], expected_value_ch2, 1e-5f); ASSERT_NEAR(result[5], expected_value_ch2, 1e-5f); } + +struct ConvTestParams { + std::vector input_data; + Shape input_shape; + std::vector kernel_data; + Shape kernel_shape; + std::vector bias_data; + int stride; + int pad; + int dilation; + bool use_bias; + Shape output_shape; + std::vector expected_output; + std::string description; +}; + +class ConvParametrizedTest : public ConvTestFixture, + public ::testing::WithParamInterface< + std::tuple> {}; + +TEST_P(ConvParametrizedTest, test_convolution_with_different_backends) { + auto [params, runtime_options] = GetParam(); + + Tensor input = make_tensor(params.input_data, params.input_shape); + Tensor kernel = make_tensor(params.kernel_data, params.kernel_shape); + Tensor bias; + + if (params.use_bias) { + bias = make_tensor( + params.bias_data, Shape({static_cast(params.kernel_shape[0])})); + } + + ConvolutionalLayer layer(params.stride, params.pad, params.dilation, kernel, + bias, 1, true); + + std::vector output_vec(params.output_shape.count(), 0.0f); + Tensor output = make_tensor(output_vec, params.output_shape); + + std::vector inputs{input}; + std::vector outputs{output}; + + layer.run(inputs, outputs, runtime_options); + + auto output_data = *outputs[0].as(); + expectVectorsNear(output_data, params.expected_output, 1e-4f); +} + +static std::vector createSimpleKernel3x3() { + return {1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f}; +} + +INSTANTIATE_TEST_SUITE_P( + ConvLayerTests, ConvParametrizedTest, + ::testing::Combine( + ::testing::Values( + ConvTestParams{.input_data = std::vector(48, 1.0f), + .input_shape = {1, 3, 4, 4}, + .kernel_data = createSimpleKernel3x3(), + .kernel_shape = {3, 3}, + .bias_data = {}, + .stride = 1, + .pad = 0, + .dilation = 1, + .use_bias = false, + .output_shape = {1, 3, 2, 2}, + .expected_output = std::vector(12, 9.0f), + .description = "2D_Kernel_3_Channels_4x4"}, + + ConvTestParams{.input_data = std::vector(75, 1.0f), + .input_shape = {1, 3, 5, 5}, + .kernel_data = {1.0f, 0.0f, 1.0f, 0.0f, 1.0f, 0.0f, + 1.0f, 0.0f, 1.0f}, + .kernel_shape = {3, 3}, + .bias_data = {1.0f, 1.0f, 1.0f}, + .stride = 1, + .pad = 0, + .dilation = 1, + .use_bias = true, + .output_shape = {1, 3, 3, 3}, + .expected_output = std::vector(27, 6.0f), + .description = "2D_Kernel_With_Bias_3_Channels"}, + + ConvTestParams{.input_data = std::vector(75, 1.0f), + .input_shape = {1, 3, 5, 5}, + .kernel_data = {1.0f, 0.0f, 1.0f, 0.0f, 1.0f, 0.0f, + 1.0f, 0.0f, 1.0f}, + .kernel_shape = {3, 3}, + .bias_data = {}, + .stride = 2, + .pad = 0, + .dilation = 1, + .use_bias = false, + .output_shape = {1, 3, 2, 2}, + .expected_output = std::vector(12, 5.0f), + .description = "2D_Kernel_Stride_2"}), + ::testing::Values(ConvTestFixture::setTBBOptions(), + ConvTestFixture::setOmpOptions(), + ConvTestFixture::setSeqOptions(), + ConvTestFixture::setSTLOptions(), + ConvTestFixture::setKokkosOptions())), + [](const ::testing::TestParamInfo< + std::tuple>& info) { + const auto& params = std::get<0>(info.param); + const auto& options = std::get<1>(info.param); + + std::string name = params.description + "_"; + + if (options.parallel) { + if (options.par_backend == ParBackend::kTbb) { + name += "TBB"; + } else if (options.par_backend == ParBackend::kOmp) { + name += "OMP"; + } else if (options.par_backend == ParBackend::kThreads) { + name += "STL"; + } else if (options.par_backend == ParBackend::kKokkos) { + name += "Kokkos"; + } else { + name += "Seq"; + } + } else { + name += "NoParallel"; + } + + std::replace(name.begin(), name.end(), ' ', '_'); + std::replace(name.begin(), name.end(), '-', '_'); + return name; + }); \ No newline at end of file diff --git a/test/single_layer/test_ewlayer.cpp b/test/single_layer/test_ewlayer.cpp index e925a310..48d4371e 100644 --- a/test/single_layer/test_ewlayer.cpp +++ b/test/single_layer/test_ewlayer.cpp @@ -3,11 +3,14 @@ #include #include +#include "fixture.hpp" #include "gtest/gtest.h" #include "layers/EWLayer.hpp" using namespace it_lab_ai; +class EWLayerTest_F : public BaseTestFixture {}; + class EWTestsParameterized : public ::testing::TestWithParam< std::tuple, EWLayerImpl, @@ -216,3 +219,305 @@ TEST(ewlayer, new_ewlayer_can_sigmoid_float_extreme_values) { EXPECT_NEAR((*out[0].as())[i], expected_output[i], 1e-5F); } } + +TEST_F(EWLayerTest_F, parallel_for_ew) { + EWLayer layer("relu"); + + std::vector vec(8000000, -1); + Tensor input = make_tensor(vec); + Tensor output; + std::vector in{input}; + std::vector out{output}; + + std::vector backends = {ParBackend::kSeq, ParBackend::kThreads, + ParBackend::kTbb, ParBackend::kOmp}; + + for (auto backend : backends) { + auto options = createOptionsWithBackend(backend); + + auto start = std::chrono::high_resolution_clock::now(); + layer.run(in, out, options); + auto end = std::chrono::high_resolution_clock::now(); + auto duration = + std::chrono::duration_cast(end - start); + std::cout << " time: " << duration.count() << " ms" << std::endl; + for (size_t i = 0; i < 8000000; i++) { + EXPECT_EQ((*out[0].as())[i], 0); + } + } +} + +TEST(ewlayer, parallel_for_ew_sigmoid_compact) { + EWLayer layer("sigmoid"); + + std::vector vec(8000000, -1); + Tensor input = make_tensor(vec); + Tensor output; + std::vector in{input}; + std::vector out{output}; + + std::vector> backends = { + {ParBackend::kSeq, "Sequential"}, + {ParBackend::kThreads, "Threads"}, + {ParBackend::kTbb, "TBB"}, + {ParBackend::kOmp, "OpenMP"}}; + + std::vector reference_result; + bool first = true; + + for (const auto& [backend, name] : backends) { + RuntimeOptions options; + options.parallel = (backend != ParBackend::kSeq); + options.par_backend = backend; + if (backend == ParBackend::kThreads) { + options.threads = 4; + } + + auto start = std::chrono::high_resolution_clock::now(); + layer.run(in, out, options); + auto end = std::chrono::high_resolution_clock::now(); + auto duration = + std::chrono::duration_cast(end - start); + + std::cout << "Sigmoid " << name << " time: " << duration.count() << " ms" + << std::endl; + + auto current_result = *out[0].as(); + if (first) { + reference_result = current_result; + first = false; + for (size_t i = 0; i < 100; i++) { + EXPECT_EQ(current_result[i], 0) + << "Invalid sigmoid result at index " << i; + } + } else { + for (size_t i = 0; i < reference_result.size(); i++) { + EXPECT_EQ(current_result[i], reference_result[i]) + << "Mismatch with " << name << " at index " << i; + } + } + } +} + +TEST(ewlayer, parallel_for_direct) { + const int SIZE = 2000; + std::vector matrix1(SIZE * SIZE); + std::vector matrix2(SIZE * SIZE); + std::vector result(SIZE * SIZE); + + for (int i = 0; i < SIZE * SIZE; ++i) { + matrix1[i] = 1; + matrix2[i] = 1; + } + + auto start = std::chrono::high_resolution_clock::now(); + parallel::parallel_for( + SIZE * SIZE, [&](std::size_t i) { result[i] = matrix1[i] + matrix2[i]; }, + ParBackend::kSeq); + + auto end = std::chrono::high_resolution_clock::now(); + auto total_duration = + std::chrono::duration_cast(end - start); + + for (int i = 0; i < SIZE * SIZE; i++) ASSERT_EQ(result[i], 2); + + start = std::chrono::high_resolution_clock::now(); + parallel::parallel_for( + SIZE * SIZE, [&](std::size_t i) { result[i] = matrix1[i] + matrix2[i]; }, + ParBackend::kThreads); + end = std::chrono::high_resolution_clock::now(); + total_duration = + std::chrono::duration_cast(end - start); + for (int i = 0; i < SIZE * SIZE; i++) ASSERT_EQ(result[i], 2); + + start = std::chrono::high_resolution_clock::now(); + parallel::parallel_for( + SIZE * SIZE, [&](std::size_t i) { result[i] = matrix1[i] + matrix2[i]; }, + ParBackend::kTbb); + end = std::chrono::high_resolution_clock::now(); + total_duration = + std::chrono::duration_cast(end - start); + for (int i = 0; i < SIZE * SIZE; i++) ASSERT_EQ(result[i], 2); + + start = std::chrono::high_resolution_clock::now(); + parallel::parallel_for( + SIZE * SIZE, [&](std::size_t i) { result[i] = matrix1[i] + matrix2[i]; }, + ParBackend::kOmp); + end = std::chrono::high_resolution_clock::now(); + total_duration = + std::chrono::duration_cast(end - start); + for (int i = 0; i < SIZE * SIZE; i++) ASSERT_EQ(result[i], 2); +} + +TEST(ewlayer, parallel_for_notmatrix) { + const int SIZE = 3000; + std::vector matrix1(SIZE * SIZE); + std::vector result(SIZE * SIZE); + + for (int i = 0; i < SIZE * SIZE; ++i) { + matrix1[i] = 1; + } + + auto start = std::chrono::high_resolution_clock::now(); + parallel::parallel_for( + SIZE * SIZE, [&](std::size_t i) { result[i] = matrix1[i] + 1; }, + ParBackend::kSeq); + + auto end = std::chrono::high_resolution_clock::now(); + auto total_duration = + std::chrono::duration_cast(end - start); + + for (int i = 0; i < SIZE * SIZE; i++) ASSERT_EQ(result[i], 2); + + start = std::chrono::high_resolution_clock::now(); + parallel::parallel_for( + SIZE * SIZE, [&](std::size_t i) { result[i] = matrix1[i] + 1; }, + ParBackend::kThreads); + end = std::chrono::high_resolution_clock::now(); + total_duration = + std::chrono::duration_cast(end - start); + for (int i = 0; i < SIZE * SIZE; i++) ASSERT_EQ(result[i], 2); + + start = std::chrono::high_resolution_clock::now(); + parallel::parallel_for( + SIZE * SIZE, [&](std::size_t i) { result[i] = matrix1[i] + 1; }, + ParBackend::kTbb); + end = std::chrono::high_resolution_clock::now(); + total_duration = + std::chrono::duration_cast(end - start); + for (int i = 0; i < SIZE * SIZE; i++) ASSERT_EQ(result[i], 2); + + start = std::chrono::high_resolution_clock::now(); + parallel::parallel_for( + SIZE * SIZE, [&](std::size_t i) { result[i] = matrix1[i] + 1; }, + ParBackend::kOmp); + end = std::chrono::high_resolution_clock::now(); + total_duration = + std::chrono::duration_cast(end - start); + for (int i = 0; i < SIZE * SIZE; i++) ASSERT_EQ(result[i], 2); +} + +struct EWLayerTestParams { + std::string activation_type; + float alpha; + float beta; + std::vector input; + Shape input_shape; + std::vector expected_output; + std::string description; +}; + +class EWLayerParametrizedTest + : public BaseTestFixture, + public ::testing::WithParamInterface< + std::tuple> {}; + +TEST_P(EWLayerParametrizedTest, test_activation_with_different_backends) { + auto [params, runtime_options] = GetParam(); + + EWLayer layer(params.activation_type, params.alpha, params.beta); + + Tensor input = make_tensor(params.input, params.input_shape); + Tensor output; + + std::vector inputs{input}; + std::vector outputs{output}; + + layer.run(inputs, outputs, runtime_options); + + auto output_data = *outputs[0].as(); + expectVectorsNear(output_data, params.expected_output, 1e-4f); +} + +INSTANTIATE_TEST_SUITE_P( + EWLayerTests, EWLayerParametrizedTest, + ::testing::Combine( + ::testing::Values( + EWLayerTestParams{"relu", 1.0f, 0.0f, + BaseTestFixture::activationTestData(), Shape{7}, + BaseTestFixture::reluExpected(), "ReLU"}, + EWLayerTestParams{"sigmoid", 1.0f, 0.0f, + BaseTestFixture::activationTestData(), Shape{7}, + BaseTestFixture::sigmoidExpected(), "Sigmoid"}, + EWLayerTestParams{"linear", + 2.0f, + 3.0f, + {1.0f, -1.0f, 2.0f, -2.0f, 0.0f}, + Shape{5}, + {5.0f, 1.0f, 7.0f, -1.0f, 3.0f}, + "Linear_2x_plus_3"}, + EWLayerTestParams{"linear", 1.0f, 0.0f, + BaseTestFixture::basic1DData(), + BaseTestFixture::basic1DShape(), + BaseTestFixture::basic1DData(), "Linear_x"}, + EWLayerTestParams{ + "tanh", + 1.0f, + 0.0f, + {-2.0f, -1.0f, 0.0f, 1.0f, 2.0f}, + Shape{5}, + {std::tanh(-2.0f), std::tanh(-1.0f), std::tanh(0.0f), + std::tanh(1.0f), std::tanh(2.0f)}, + "Tanh"}, + EWLayerTestParams{ + "relu", 1.0f, 0.0f, BaseTestFixture::ascending1DData(), + BaseTestFixture::ascending1DShape(), + BaseTestFixture::ascending1DData(), "ReLU_Ascending"}, + EWLayerTestParams{ + "relu", + 1.0f, + 0.0f, + BaseTestFixture::mixed1DData(), + BaseTestFixture::ascending1DShape(), + {0.0f, 0.0f, 0.0f, 2.0f, 4.0f, 0.0f, 3.0f, 1.0f, 0.0f, 5.0f}, + "ReLU_Mixed"}, + EWLayerTestParams{"relu", 1.0f, 0.0f, std::vector(10, 0.0f), + Shape{10}, std::vector(10, 0.0f), + "ReLU_All_Zeros"}, + EWLayerTestParams{"relu", 1.0f, 0.0f, + BaseTestFixture::basic2DData4x4(), + BaseTestFixture::basic2DShape4x4(), + BaseTestFixture::basic2DData4x4(), "ReLU_2D"}, + EWLayerTestParams{ + "relu", + 1.0f, + 0.0f, + {9.0f, -8.0f, 7.0f, -6.0f, -5.0f, 4.0f, -3.0f, 2.0f, 2.0f, + -3.0f, 4.0f, -5.0f, 6.0f, -7.0f, 8.0f, -9.0f}, + Shape{4, 4}, + {9.0f, 0.0f, 7.0f, 0.0f, 0.0f, 4.0f, 0.0f, 2.0f, 2.0f, 0.0f, + 4.0f, 0.0f, 6.0f, 0.0f, 8.0f, 0.0f}, + "ReLU_2D_Mixed"}), + ::testing::Values(BaseTestFixture::setTBBOptions(), + BaseTestFixture::setSeqOptions(), + BaseTestFixture::setOmpOptions(), + BaseTestFixture::setKokkosOptions(), + BaseTestFixture::setSTLOptions())), + [](const ::testing::TestParamInfo< + std::tuple>& info) { + const auto& params = std::get<0>(info.param); + const auto& options = std::get<1>(info.param); + + std::string name = params.description + "_"; + + if (options.parallel) { + if (options.par_backend == ParBackend::kTbb) { + name += "TBB"; + } else if (options.par_backend == ParBackend::kThreads) { + name += "STL"; + } else if (options.par_backend == ParBackend::kOmp) { + name += "OMP"; + } else if (options.par_backend == ParBackend::kKokkos) { + name += "Kokkos"; + } else { + name += "Seq"; + } + } else { + name += "NoParallel"; + } + + std::replace(name.begin(), name.end(), ' ', '_'); + std::replace(name.begin(), name.end(), '-', '_'); + std::replace(name.begin(), name.end(), '.', '_'); + return name; + }); diff --git a/test/single_layer/test_poolinglayer.cpp b/test/single_layer/test_poolinglayer.cpp index b1bfd798..679c11cd 100644 --- a/test/single_layer/test_poolinglayer.cpp +++ b/test/single_layer/test_poolinglayer.cpp @@ -1,6 +1,7 @@ #include #include +#include "fixture.hpp" #include "gtest/gtest.h" #include "layers/PoolingLayer.hpp" @@ -8,6 +9,8 @@ using namespace it_lab_ai; GTEST_ALLOW_UNINSTANTIATED_PARAMETERIZED_TEST(PoolingTestsParameterized); +class PoolingLayerTest : public BaseTestFixture {}; + TEST(poolinglayer, empty_inputs1) { Shape inpshape = {8}; Shape poolshape = {3}; @@ -283,11 +286,8 @@ TEST(poolinglayer, new_pooling_layer_can_run_int_avg) { } } -TEST(poolinglayer, new_pooling_layer_can_run_int_avg_tbb) { - RuntimeOptions options; - options.backend = Backend::kNaive; - options.parallel = true; - options.par_backend = ParBackend::kTbb; +TEST_F(PoolingLayerTest, new_pooling_layer_can_run_int_avg_tbb) { + auto options = setTBBOptions(); Shape inpshape = {4, 4}; Shape poolshape = {2, 2}; PoolingLayer a(poolshape, {2, 2}, {0, 0, 0, 0}, {1, 1}, false, "average"); @@ -326,11 +326,8 @@ TEST(poolinglayer, new_pooling_layer_can_run_1d_pooling_float) { } } -TEST(poolinglayer, new_pooling_layer_tbb_can_run_1d_pooling_float) { - RuntimeOptions options; - options.backend = Backend::kNaive; - options.parallel = true; - options.par_backend = ParBackend::kTbb; +TEST_F(PoolingLayerTest, new_pooling_layer_tbb_can_run_1d_pooling_float) { + auto options = setTBBOptions(); Shape inpshape = {8}; Shape poolshape = {3}; PoolingLayer a(poolshape, "average"); @@ -440,12 +437,7 @@ TEST(poolinglayer, maxpool_onnx_with_pooling_layer) { } } -TEST(poolinglayer, new_pooling_layer_with_parallel_none) { - RuntimeOptions options; - options.backend = Backend::kNaive; - options.parallel = true; - options.par_backend = ParBackend::kSeq; - +TEST_F(PoolingLayerTest, new_pooling_layer_with_parallel_none) { Shape inpshape = {4, 4}; Shape poolshape = {2, 2}; PoolingLayer a(poolshape, {1, 1}, {1, 1, 1, 1}, {1, 1}, false, "average"); @@ -454,16 +446,11 @@ TEST(poolinglayer, new_pooling_layer_with_parallel_none) { Tensor output = make_tensor({0}); std::vector in{make_tensor(input, inpshape)}; std::vector out{output}; - a.run(in, out, options); + a.run(in, out, defaultOptions); EXPECT_EQ(out[0].get_shape().count(), 25); } -TEST(poolinglayer, new_pooling_layer_int_avg_with_parallel_none) { - RuntimeOptions options; - options.backend = Backend::kNaive; - options.parallel = true; - options.par_backend = ParBackend::kSeq; - +TEST_F(PoolingLayerTest, new_pooling_layer_int_avg_with_parallel_none) { Shape inpshape = {4, 4}; Shape poolshape = {2, 2}; PoolingLayer a(poolshape, {2, 2}, {0, 0, 0, 0}, {1, 1}, false, "average"); @@ -479,7 +466,7 @@ TEST(poolinglayer, new_pooling_layer_int_avg_with_parallel_none) { std::vector in{make_tensor(input, inpshape)}; std::vector out{output}; - a.run(in, out, options); + a.run(in, out, defaultOptions); std::vector true_output = {6, 4, 4, 6}; for (size_t i = 0; i < true_output.size(); i++) { @@ -487,11 +474,8 @@ TEST(poolinglayer, new_pooling_layer_int_avg_with_parallel_none) { } } -TEST(poolinglayer, new_pooling_layer_int_avg_without_parallel_flag) { - RuntimeOptions options; - options.backend = Backend::kNaive; - options.parallel = false; - options.par_backend = ParBackend::kTbb; +TEST_F(PoolingLayerTest, new_pooling_layer_int_avg_without_parallel_flag) { + auto options = setTBBOptions(); Shape inpshape = {4, 4}; Shape poolshape = {2, 2}; @@ -515,3 +499,204 @@ TEST(poolinglayer, new_pooling_layer_int_avg_without_parallel_flag) { EXPECT_NEAR((*out[0].as())[i], true_output[i], 1e-5); } } + +struct PoolingTestParams { + std::vector input; + Shape input_shape; + Shape pool_shape; + Shape strides; + Shape pads; + Shape dilations; + bool ceil_mode; + std::string pooling_type; + std::vector expected_output; + std::string description; +}; + +class PoolingWithTBBTest + : public BaseTestFixture, + public ::testing::WithParamInterface {}; + +TEST_P(PoolingWithTBBTest, test_pooling_with_tbb) { + auto params = GetParam(); + auto tbb_options = setTBBOptions(); + + PoolingLayer layer(params.pool_shape, params.strides, params.pads, + params.dilations, params.ceil_mode, params.pooling_type); + + Tensor input = make_tensor(params.input, params.input_shape); + + PoolingLayerImpl impl(params.input_shape, params.pool_shape, + params.strides, params.pads, params.dilations, + params.ceil_mode, params.pooling_type); + Shape output_shape = impl.get_output_shape(); + + std::vector zeros(output_shape.count(), 0.0f); + Tensor output = make_tensor(zeros, output_shape); + + std::vector inputs{input}; + std::vector outputs{output}; + + layer.run(inputs, outputs, tbb_options); + + auto output_data = *outputs[0].as(); + expectVectorsNear(output_data, params.expected_output, 1e-5f); +} + +INSTANTIATE_TEST_SUITE_P( + PoolingTBBTests, PoolingWithTBBTest, + ::testing::Values( + PoolingTestParams{BaseTestFixture::basic1DData(), + BaseTestFixture::basic1DShape(), + {3}, + {2}, + {0, 0, 0, 0}, + {1, 1}, + false, + "average", + BaseTestFixture::get1DAverageExpected(), + "1D_Avg_Stride2_TBB"}, + PoolingTestParams{BaseTestFixture::basic1DData(), + BaseTestFixture::basic1DShape(), + {3}, + {1}, + {1, 1, 0, 0}, + {1, 1}, + false, + "average", + {8.5f, 8.0f, 7.0f, 6.0f, 5.0f, 4.0f, 3.0f, 2.5f}, + "1D_Avg_Stride1_Padding_TBB"}, + PoolingTestParams{BaseTestFixture::basic1DData(), + BaseTestFixture::basic1DShape(), + {3}, + {2}, + {0, 0, 0, 0}, + {1, 1}, + false, + "max", + {9.0f, 7.0f, 5.0f}, + "1D_Max_Stride2_TBB"}, + PoolingTestParams{BaseTestFixture::basic1DData(), + BaseTestFixture::basic1DShape(), + {3}, + {3}, + {0, 0, 0, 0}, + {1, 1}, + false, + "average", + {8.0f, 5.0f}, + "1D_Avg_Stride3_TBB"}, + PoolingTestParams{BaseTestFixture::ascending1DData(), + BaseTestFixture::ascending1DShape(), + {3}, + {2}, + {0, 0, 0, 0}, + {1, 1}, + false, + "average", + {2.0f, 4.0f, 6.0f, 8.0f}, + "1D_Ascending_Avg_Stride2_TBB"}, + PoolingTestParams{BaseTestFixture::mixed1DData(), + BaseTestFixture::ascending1DShape(), + {3}, + {2}, + {0, 0, 0, 0}, + {1, 1}, + false, + "max", + {0.0f, 4.0f, 4.0f, 3.0f}, + "1D_Mixed_Max_Stride2_TBB"}, + PoolingTestParams{BaseTestFixture::basic2DData4x4(), + BaseTestFixture::basic2DShape4x4(), + {2, 2}, + {1, 1}, + {0, 0, 0, 0}, + {1, 1}, + false, + "average", + BaseTestFixture::get2DAverageStride1Expected(), + "2D_Avg_Stride1_TBB"}, + PoolingTestParams{BaseTestFixture::basic2DData4x4(), + BaseTestFixture::basic2DShape4x4(), + {2, 2}, + {2, 2}, + {0, 0, 0, 0}, + {1, 1}, + false, + "average", + {6.5f, 4.5f, 4.5f, 6.5f}, + "2D_Avg_Stride2_TBB"}, + PoolingTestParams{BaseTestFixture::basic2DData3x3(), + BaseTestFixture::basic2DShape3x3(), + {2, 2}, + {1, 1}, + {0, 0, 0, 0}, + {1, 1}, + false, + "max", + {9.0f, 8.0f, 5.0f, 4.0f}, + "2D_Max_Stride1_TBB"}, + PoolingTestParams{BaseTestFixture::small2DData2x2(), + BaseTestFixture::small2DShape2x2(), + {2, 2}, + {1, 1}, + {0, 0, 0, 0}, + {1, 1}, + false, + "average", + {2.5f}, + "2D_Small_Avg_Stride1_TBB"}, + PoolingTestParams{ + BaseTestFixture::small2DData2x2(), + BaseTestFixture::small2DShape2x2(), + {2, 2}, + {1, 1}, + {1, 1, 1, 1}, + {1, 1}, + false, + "average", + {1.0f, 1.5f, 2.0f, 2.0f, 2.5f, 3.0f, 3.0f, 3.5f, 4.0f}, + "2D_Small_Avg_Padding_TBB"}, + PoolingTestParams{BaseTestFixture::medium2DData5x5(), + BaseTestFixture::medium2DShape5x5(), + {3, 3}, + {2, 2}, + {0, 0, 0, 0}, + {1, 1}, + false, + "average", + {7.0f, 9.0f, 17.0f, 19.0f}, + "2D_Medium_Avg_3x3_Stride2_TBB"}, + PoolingTestParams{BaseTestFixture::zero2DData3x3(), + BaseTestFixture::zero2DShape3x3(), + {2, 2}, + {1, 1}, + {0, 0, 0, 0}, + {1, 1}, + false, + "max", + {0.0f, 0.0f, 0.0f, 0.0f}, + "2D_Zero_Max_TBB"}, + PoolingTestParams{BaseTestFixture::constant2DData4x4(7.0f), + BaseTestFixture::constant2DShape4x4(), + {2, 2}, + {1, 1}, + {0, 0, 0, 0}, + {1, 1}, + false, + "average", + std::vector(9, 7.0f), + "2D_Constant_Avg_TBB"}, + PoolingTestParams{BaseTestFixture::basic2DData4x4(), + BaseTestFixture::basic2DShape4x4(), + {2, 2}, + {1, 1}, + {0, 0, 0, 0}, + {2, 2}, + false, + "max", + {9.0f, 8.0f, 8.0f, 9.0f}, + "2D_Max_Dilation2_TBB"}), + [](const ::testing::TestParamInfo& info) { + return info.param.description; + }); \ No newline at end of file