Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
160 changes: 160 additions & 0 deletions test/single_layer/fixture.hpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,160 @@
#pragma once
#include <gtest/gtest.h>

#include <algorithm>
#include <random>
#include <string>
#include <vector>

#include "layers/Layer.hpp"

using namespace it_lab_ai;
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Please, do not use using namespace * in headers


class BaseTestFixture : public ::testing::Test {
public:
void SetUp() override {
defaultOptions.backend = Backend::kNaive;
defaultOptions.parallel = false;
defaultOptions.par_backend = ParBackend::kSeq;
}

static RuntimeOptions setTBBOptions() {
RuntimeOptions options;
options.backend = Backend::kNaive;
options.parallel = true;
options.par_backend = ParBackend::kTbb;
return options;
}

static RuntimeOptions setSeqOptions() {
RuntimeOptions options;
options.backend = Backend::kNaive;
options.parallel = true;
options.par_backend = ParBackend::kSeq;
return options;
}

static RuntimeOptions setSTLOptions() {
RuntimeOptions options;
options.backend = Backend::kNaive;
options.parallel = true;
options.par_backend = ParBackend::kThreads;
return options;
}

static RuntimeOptions setKokkosOptions() {
RuntimeOptions options;
options.backend = Backend::kNaive;
options.parallel = true;
options.par_backend = ParBackend::kKokkos;
return options;
}

static RuntimeOptions setOmpOptions() {
RuntimeOptions options;
options.backend = Backend::kNaive;
options.parallel = true;
options.par_backend = ParBackend::kOmp;
return options;
}

static RuntimeOptions createOptionsWithBackend(ParBackend backend) {
RuntimeOptions options;
options.backend = Backend::kNaive;
options.parallel = (backend != ParBackend::kSeq);
options.par_backend = backend;
return options;
}

static std::vector<float> basic1DData() {
return {9.0f, 8.0f, 7.0f, 6.0f, 5.0f, 4.0f, 3.0f, 2.0f};
}

static Shape basic1DShape() { return {8}; }

static std::vector<float> basic2DData4x4() {
return {9.0f, 8.0f, 7.0f, 6.0f, 5.0f, 4.0f, 3.0f, 2.0f,
2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f};
}

static Shape basic2DShape4x4() { return {4, 4}; }

static std::vector<float> basic2DData3x3() {
return {9.0f, 8.0f, 7.0f, 5.0f, 4.0f, 3.0f, 2.0f, 3.0f, 4.0f};
}

static Shape basic2DShape3x3() { return {3, 3}; }

static std::vector<float> activationTestData() {
return {-3.0f, -2.0f, -1.0f, 0.0f, 1.0f, 2.0f, 3.0f};
}

static std::vector<float> reluExpected() {
return {0.0f, 0.0f, 0.0f, 0.0f, 1.0f, 2.0f, 3.0f};
}

static std::vector<float> sigmoidExpected() {
return {0.0474f, 0.1192f, 0.2689f, 0.5f, 0.7311f, 0.8808f, 0.9526f};
}

static std::vector<float> get1DAverageExpected() {
return {8.0f, 6.0f, 4.0f};
}

static std::vector<float> get2DAverageStride1Expected() {
return {6.5f, 5.5f, 4.5f, 3.5f, 3.5f, 3.5f, 4.5f, 5.5f, 6.5f};
}

static std::vector<float> ascending1DData() {
return {1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f, 10.0f};
}

static Shape ascending1DShape() { return {10}; }

static std::vector<float> descending1DData() {
return {10.0f, 9.0f, 8.0f, 7.0f, 6.0f, 5.0f, 4.0f, 3.0f, 2.0f, 1.0f};
}

static std::vector<float> mixed1DData() {
return {-5.0f, -3.0f, 0.0f, 2.0f, 4.0f, -1.0f, 3.0f, 1.0f, -2.0f, 5.0f};
}

static std::vector<float> small2DData2x2() {
return {1.0f, 2.0f, 3.0f, 4.0f};
}

static Shape small2DShape2x2() { return {2, 2}; }

static std::vector<float> medium2DData5x5() {
return {1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f,
10.0f, 11.0f, 12.0f, 13.0f, 14.0f, 15.0f, 16.0f, 17.0f, 18.0f,
19.0f, 20.0f, 21.0f, 22.0f, 23.0f, 24.0f, 25.0f};
}

static Shape medium2DShape5x5() { return {5, 5}; }

static std::vector<float> zero2DData3x3() {
return {0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f};
}

static Shape zero2DShape3x3() { return {3, 3}; }

static std::vector<float> constant2DData4x4(float value = 5.0f) {
return std::vector<float>(16, value);
}

static Shape constant2DShape4x4() { return {4, 4}; }

template <typename T>
static void expectVectorsNear(const std::vector<T>& actual,
const std::vector<T>& expected,
T tolerance = static_cast<T>(1e-5)) {
ASSERT_EQ(actual.size(), expected.size());
for (size_t i = 0; i < actual.size(); ++i) {
EXPECT_NEAR(actual[i], expected[i], tolerance) << "at index " << i;
}
}

protected:
RuntimeOptions defaultOptions;
};
162 changes: 138 additions & 24 deletions test/single_layer/test_convlayer.cpp
Original file line number Diff line number Diff line change
@@ -1,9 +1,12 @@
#include <gtest/gtest.h>

#include "fixture.hpp"
#include "layers/ConvLayer.hpp"

using namespace it_lab_ai;

class ConvTestFixture : public BaseTestFixture {};

TEST(ConvolutionalLayerTest, IncompatibleInput) {
int step = 2;
std::vector<float> kernelvec = {1, 0, 1, 0, 1, 0, 1, 0, 1};
Expand Down Expand Up @@ -585,11 +588,8 @@ TEST(ConvolutionalLayerTest, DepthwiseViaConvolutionalLayer) {
}
}

TEST(ConvolutionalLayerTest, Conv4DSTLViaConvolutionalLayer) {
RuntimeOptions options;
options.backend = Backend::kNaive;
options.parallel = true;
options.par_backend = ParBackend::kThreads;
TEST_F(ConvTestFixture, Conv4DSTLViaConvolutionalLayer) {
auto options = setSTLOptions();

std::vector<float> image(48, 1.0f);
Shape input_shape({1, 3, 4, 4});
Expand Down Expand Up @@ -1043,12 +1043,7 @@ TEST(ConvolutionalLayerTest, Float4DKernelWorking) {
ASSERT_EQ(result.size(), 4);
}

TEST(ConvolutionalLayerTest, Conv4DWithParallelNoneBackend) {
RuntimeOptions options;
options.backend = Backend::kNaive;
options.parallel = true;
options.par_backend = ParBackend::kSeq;

TEST_F(ConvTestFixture, Conv4DWithParallelNoneBackend) {
std::vector<float> image(48, 1.0f);
Shape input_shape({1, 3, 4, 4});
Tensor input = make_tensor(image, input_shape);
Expand All @@ -1064,7 +1059,7 @@ TEST(ConvolutionalLayerTest, Conv4DWithParallelNoneBackend) {
ConvolutionalLayer layer(1, 0, 1, kernel, Tensor());
std::vector<Tensor> in{input};
std::vector<Tensor> out{output};
layer.run(in, out, options);
layer.run(in, out, defaultOptions);

std::vector<float> result = *out[0].as<float>();

Expand Down Expand Up @@ -1104,11 +1099,8 @@ TEST(ConvolutionalLayerTest, Conv4DWithParallelDefaultFallback) {
}
}

TEST(ConvolutionalLayerTest, Conv4DWithoutParallelFlag) {
RuntimeOptions options;
options.backend = Backend::kNaive;
options.parallel = false;
options.par_backend = ParBackend::kThreads;
TEST_F(ConvTestFixture, Conv4DWithoutParallelFlag) {
auto options = setSTLOptions();

std::vector<float> image(48, 1.0f);
Shape input_shape({1, 3, 4, 4});
Expand All @@ -1135,12 +1127,7 @@ TEST(ConvolutionalLayerTest, Conv4DWithoutParallelFlag) {
}
}

TEST(ConvolutionalLayerTest, Conv4DLegacyFloatWithParallelNone) {
RuntimeOptions options;
options.backend = Backend::kNaive;
options.parallel = true;
options.par_backend = ParBackend::kSeq;

TEST_F(ConvTestFixture, Conv4DLegacyFloatWithParallelNone) {
std::vector<float> image(48, 1.0f);
Shape input_shape({1, 3, 4, 4});
Tensor input = make_tensor(image, input_shape);
Expand All @@ -1162,7 +1149,7 @@ TEST(ConvolutionalLayerTest, Conv4DLegacyFloatWithParallelNone) {
Tensor output = make_tensor(output_vec, output_shape);
std::vector<Tensor> out{output};

layer.run(in, out, options);
layer.run(in, out, defaultOptions);

std::vector<float> result = *out[0].as<float>();

Expand All @@ -1175,3 +1162,130 @@ TEST(ConvolutionalLayerTest, Conv4DLegacyFloatWithParallelNone) {
ASSERT_NEAR(result[4], expected_value_ch2, 1e-5f);
ASSERT_NEAR(result[5], expected_value_ch2, 1e-5f);
}

struct ConvTestParams {
std::vector<float> input_data;
Shape input_shape;
std::vector<float> kernel_data;
Shape kernel_shape;
std::vector<float> bias_data;
int stride;
int pad;
int dilation;
bool use_bias;
Shape output_shape;
std::vector<float> expected_output;
std::string description;
};

class ConvParametrizedTest : public ConvTestFixture,
public ::testing::WithParamInterface<
std::tuple<ConvTestParams, RuntimeOptions>> {};

TEST_P(ConvParametrizedTest, test_convolution_with_different_backends) {
auto [params, runtime_options] = GetParam();

Tensor input = make_tensor<float>(params.input_data, params.input_shape);
Tensor kernel = make_tensor<float>(params.kernel_data, params.kernel_shape);
Tensor bias;

if (params.use_bias) {
bias = make_tensor<float>(
params.bias_data, Shape({static_cast<size_t>(params.kernel_shape[0])}));
}

ConvolutionalLayer layer(params.stride, params.pad, params.dilation, kernel,
bias, 1, true);

std::vector<float> output_vec(params.output_shape.count(), 0.0f);
Tensor output = make_tensor<float>(output_vec, params.output_shape);

std::vector<Tensor> inputs{input};
std::vector<Tensor> outputs{output};

layer.run(inputs, outputs, runtime_options);

auto output_data = *outputs[0].as<float>();
expectVectorsNear(output_data, params.expected_output, 1e-4f);
}

static std::vector<float> createSimpleKernel3x3() {
return {1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f};
}

INSTANTIATE_TEST_SUITE_P(
ConvLayerTests, ConvParametrizedTest,
::testing::Combine(
::testing::Values(
ConvTestParams{.input_data = std::vector<float>(48, 1.0f),
.input_shape = {1, 3, 4, 4},
.kernel_data = createSimpleKernel3x3(),
.kernel_shape = {3, 3},
.bias_data = {},
.stride = 1,
.pad = 0,
.dilation = 1,
.use_bias = false,
.output_shape = {1, 3, 2, 2},
.expected_output = std::vector<float>(12, 9.0f),
.description = "2D_Kernel_3_Channels_4x4"},

ConvTestParams{.input_data = std::vector<float>(75, 1.0f),
.input_shape = {1, 3, 5, 5},
.kernel_data = {1.0f, 0.0f, 1.0f, 0.0f, 1.0f, 0.0f,
1.0f, 0.0f, 1.0f},
.kernel_shape = {3, 3},
.bias_data = {1.0f, 1.0f, 1.0f},
.stride = 1,
.pad = 0,
.dilation = 1,
.use_bias = true,
.output_shape = {1, 3, 3, 3},
.expected_output = std::vector<float>(27, 6.0f),
.description = "2D_Kernel_With_Bias_3_Channels"},

ConvTestParams{.input_data = std::vector<float>(75, 1.0f),
.input_shape = {1, 3, 5, 5},
.kernel_data = {1.0f, 0.0f, 1.0f, 0.0f, 1.0f, 0.0f,
1.0f, 0.0f, 1.0f},
.kernel_shape = {3, 3},
.bias_data = {},
.stride = 2,
.pad = 0,
.dilation = 1,
.use_bias = false,
.output_shape = {1, 3, 2, 2},
.expected_output = std::vector<float>(12, 5.0f),
.description = "2D_Kernel_Stride_2"}),
::testing::Values(ConvTestFixture::setTBBOptions(),
ConvTestFixture::setOmpOptions(),
ConvTestFixture::setSeqOptions(),
ConvTestFixture::setSTLOptions(),
ConvTestFixture::setKokkosOptions())),
[](const ::testing::TestParamInfo<
std::tuple<ConvTestParams, RuntimeOptions>>& info) {
const auto& params = std::get<0>(info.param);
const auto& options = std::get<1>(info.param);

std::string name = params.description + "_";

if (options.parallel) {
if (options.par_backend == ParBackend::kTbb) {
name += "TBB";
} else if (options.par_backend == ParBackend::kOmp) {
name += "OMP";
} else if (options.par_backend == ParBackend::kThreads) {
name += "STL";
} else if (options.par_backend == ParBackend::kKokkos) {
name += "Kokkos";
} else {
name += "Seq";
}
} else {
name += "NoParallel";
}

std::replace(name.begin(), name.end(), ' ', '_');
std::replace(name.begin(), name.end(), '-', '_');
return name;
});
Loading
Loading