diff --git a/.clang-format b/.clang-format index fbacfbd..b82f1d7 100644 --- a/.clang-format +++ b/.clang-format @@ -27,7 +27,7 @@ IncludeCategories: Priority: 1 # standard C++ headers # TODO: add as needed - - Regex: ^<(algorithm|array|condition_variable|functional|iostream|map|memory|mutex|numeric|string|thread|vector)>$ + - Regex: ^<(algorithm|array|condition_variable|functional|iomanip|iostream|map|memory|mutex|numeric|string|thread|type_traits|vector)>$ Priority: 2 # third party headers - Regex: ^ diff --git a/.gitignore b/.gitignore index 0dc3866..57e0fcc 100644 --- a/.gitignore +++ b/.gitignore @@ -6,6 +6,7 @@ *.tar /3rdparty /bin +/build /cmake-build /examples/Makefile /lib diff --git a/CMakeLists.txt b/CMakeLists.txt index 04a2560..6023586 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -6,8 +6,9 @@ SET(CMAKE_MODULE_PATH ${CMAKE_SOURCE_DIR}/cmake) FIND_PACKAGE(stdtensor REQUIRED) -ADD_DEFINITIONS(-Wfatal-errors) ADD_DEFINITIONS(-Wall) +ADD_DEFINITIONS(-Werror) +ADD_DEFINITIONS(-Wfatal-errors) INCLUDE_DIRECTORIES(${CMAKE_SOURCE_DIR}/3rdparty/include) INCLUDE_DIRECTORIES(${CMAKE_SOURCE_DIR}/include) diff --git a/examples/example_1.cpp b/examples/example_1.cpp index 4311a43..99c1330 100644 --- a/examples/example_1.cpp +++ b/examples/example_1.cpp @@ -1,6 +1,7 @@ -#include #include +#include + struct examples { const uint32_t n = 10; const uint32_t c = 3; diff --git a/examples/example_mlp.cpp b/examples/example_mlp.cpp index e3db82a..81f36af 100644 --- a/examples/example_mlp.cpp +++ b/examples/example_mlp.cpp @@ -1,8 +1,9 @@ #include -#include #include +#include + #include "utils.hpp" void example_mlp() diff --git a/examples/example_mnist.cpp b/examples/example_mnist.cpp index 63ec9fa..8d479dc 100644 --- a/examples/example_mnist.cpp +++ b/examples/example_mnist.cpp @@ -1,14 +1,17 @@ #include + #include #include #ifdef USE_OPENCV -#include +# include #endif #include +#include "utils.hpp" + void example_mnist() {} int main() @@ -25,6 +28,7 @@ int main() int i = 0; system("mkdir -p images"); for (auto im : t) { + UNUSED(im); char name[20]; sprintf(name, "images/%d.png", ++i); #ifdef USE_OPENCV diff --git a/examples/example_model_plain34.cpp b/examples/example_model_plain34.cpp new file mode 100644 index 0000000..cd5751a --- /dev/null +++ b/examples/example_model_plain34.cpp @@ -0,0 +1,125 @@ +// #define STDNN_OPS_HAVE_CBLAS + +#include +#include + +#include + +#include "utils.hpp" + +template +class plain34_model +{ + const size_t logits = 1000; + + using relu = nn::ops::pointwise; + using bn_layer = nn::layers::batch_norm; + + using flatten = nn::layers::flatten<1, 3>; + using dense = nn::layers::dense<>; + using softmax = nn::layers::activation; + + auto conv1(int d) const + { + using layer = nn::layers::conv; + return layer(d, layer::ksize(7, 7), layer::padding_same(), + layer::stride(2, 2)); + } + + auto pool1() const + { + using layer = nn::layers::pool; + return layer(layer::ksize(3, 3), layer::padding_same(), + layer::stride(2, 2)); + } + + auto pool2() const + { + using layer = nn::layers::pool; + return layer(layer::ksize(7, 7)); + } + + auto conv(int d, int s) const + { + using layer = nn::layers::conv; + return layer(d, layer::ksize(3, 3), layer::padding_same(), + layer::stride(s, s)); + } + + const std::string prefix_; + + const auto p(const std::string &name) const + { + return nn::ops::readtar(prefix_, name); + } + + public: + const size_t h = 224; + const size_t w = 224; + + plain34_model(const std::string &prefix) : prefix_(prefix) {} + + template + auto operator()(const ttl::tensor_ref &x, int m = 5) const + { + auto layers = nn::models::make_sequential() // + << conv1(64) // + << pool1() // + + << conv(64, 1) << bn_layer() // + << conv(64, 1) << bn_layer() << conv(64, 1) << bn_layer() + << conv(64, 1) << bn_layer() << conv(64, 1) << bn_layer() + << conv(64, 1) << bn_layer() + + << conv(128, 2) << bn_layer() // + << conv(128, 1) << bn_layer() // + << conv(128, 1) << bn_layer() // + << conv(128, 1) << bn_layer() // + << conv(128, 1) << bn_layer() // + << conv(128, 1) << bn_layer() // + << conv(128, 1) << bn_layer() // + << conv(128, 1) << bn_layer() // + + << conv(256, 2) << bn_layer() // + << conv(256, 1) << bn_layer() // + << conv(256, 1) << bn_layer() // + << conv(256, 1) << bn_layer() // + << conv(256, 1) << bn_layer() // + << conv(256, 1) << bn_layer() // + << conv(256, 1) << bn_layer() // + << conv(256, 1) << bn_layer() // + << conv(256, 1) << bn_layer() // + << conv(256, 1) << bn_layer() // + << conv(256, 1) << bn_layer() // + << conv(256, 1) << bn_layer() // + + << conv(512, 2) << bn_layer() // + << conv(512, 1) << bn_layer() // + << conv(512, 1) << bn_layer() // + << conv(512, 1) << bn_layer() // + << conv(512, 1) << bn_layer() // + << conv(512, 1) << bn_layer() // + + << pool2() // + << flatten() // + << dense(logits) // + << softmax() // + ; + + auto y = layers(x); + return y; + } +}; + +int main(int argc, char *argv[]) +{ + const std::string home(std::getenv("HOME")); + const std::string prefix = home + "/var/models/resnet"; + plain34_model model(prefix); + const auto x = ttl::tensor(1, model.h, model.w, 3); + const auto y = model(ref(x)); + PPRINT(x); + PPRINT(*y); + return 0; +} diff --git a/examples/example_model_plain50.cpp b/examples/example_model_plain50.cpp new file mode 100644 index 0000000..d28a425 --- /dev/null +++ b/examples/example_model_plain50.cpp @@ -0,0 +1,200 @@ +// #define STDNN_OPS_HAVE_CBLAS + +#include +#include + +#include + +#include "utils.hpp" + +using nn::layers::with_init; +using nn::layers::debug::show_name; + +template +class plain50_model +{ + const size_t logits = 1000; + + using relu = nn::ops::pointwise; + using bn_layer = nn::layers::batch_norm; + + using flatten = nn::layers::flatten<1, 3>; + using conv = nn::layers::conv; + using softmax = nn::layers::activation; + + auto conv7x7(int d) const + { + using conv_relu = + nn::layers::conv; + return with_init(conv_relu(d, conv_relu::ksize(7, 7), + conv_relu::padding_same(), + conv_relu::stride(2, 2)), + show_name("conv1/kernel"), show_name("conv1/bias")); + } + + auto bn() const + { + return with_init(bn_layer(), // + // show_name("mean"), show_name("var"), + nn::ops::noop(), // + nn::ops::noop(), // + show_name("bn_conv?/beta"), + show_name("bn_conv?/gamma")); + } + + auto pool1() const + { + using pool_max = nn::layers::pool; + return pool_max(pool_max::ksize(3, 3), pool_max::padding_same(), + pool_max::stride(2, 2)); + } + + auto pool2() const + { + using pool_mean = nn::layers::pool; + return pool_mean(pool_mean::ksize(7, 7)); + } + + auto conv1x1(int d, int s) const + { + return with_init(conv(d, conv::ksize(1, 1), conv::padding_same(), + conv::stride(s, s)), + show_name("res?_branch?/kernel"), + show_name("res?_branch?/bias")); + } + + auto conv3x3(int d, int s) const + { + return with_init(conv(d, conv::ksize(3, 3), conv::padding_same(), + conv::stride(s, s)), + show_name("res?_branch?/kernel"), + show_name("res?_branch?/bias")); + } + + // auto conv2_x() const {} + // auto conv3_x() const {} + // auto conv4_x() const {} + // auto conv5_x() const {} + + auto dense(int logits) const + { + using layer = nn::layers::dense<>; + return with_init(layer(logits), show_name("W"), show_name("b")); + } + + const std::string prefix_; + + const auto p(const std::string &name) const + { + return nn::ops::readtar(prefix_, name); + } + + public: + const size_t h = 224; + const size_t w = 224; + + plain50_model(const std::string &prefix) : prefix_(prefix) {} + + template + auto operator()(const ttl::tensor_ref &x, int m = 5) const + { + auto layers = nn::models::make_sequential() // + << conv7x7(64) << bn() // + << pool1() // + + // conv2_x [1x1, 64; 3x3, 64; 1x1, 256] x 3 + << conv1x1(64, 1) << bn() // + << conv3x3(64, 1) << bn() // + << conv1x1(256, 1) << bn() // + + << conv1x1(64, 1) << bn() // + << conv3x3(64, 1) << bn() // + << conv1x1(256, 1) << bn() // + + << conv1x1(64, 1) << bn() // + << conv3x3(64, 1) << bn() // + << conv1x1(256, 1) + << bn() // + + // conv3_x [1x1, 128; 3x3, 128; 1x1, 512] x 4 + << conv1x1(128, 2) << bn() // + << conv3x3(128, 1) << bn() // + << conv1x1(512, 1) << bn() // + + << conv1x1(128, 1) << bn() // + << conv3x3(128, 1) << bn() // + << conv1x1(512, 1) << bn() // + + << conv1x1(128, 1) << bn() // + << conv3x3(128, 1) << bn() // + << conv1x1(512, 1) << bn() // + + << conv1x1(128, 1) << bn() // + << conv3x3(128, 1) << bn() // + << conv1x1(512, 1) + << bn() // + + // conv4_x [1x1, 256; 3x3, 256; 1x1, 1024] x 6 + << conv1x1(256, 2) << bn() // + << conv3x3(256, 1) << bn() // + << conv1x1(1024, 1) << bn() // + + << conv1x1(256, 1) << bn() // + << conv3x3(256, 1) << bn() // + << conv1x1(1024, 1) << bn() // + + << conv1x1(256, 1) << bn() // + << conv3x3(256, 1) << bn() // + << conv1x1(1024, 1) << bn() // + + << conv1x1(256, 1) << bn() // + << conv3x3(256, 1) << bn() // + << conv1x1(1024, 1) << bn() // + + << conv1x1(256, 1) << bn() // + << conv3x3(256, 1) << bn() // + << conv1x1(1024, 1) << bn() // + + << conv1x1(256, 1) << bn() // + << conv3x3(256, 1) << bn() // + << conv1x1(1024, 1) + << bn() // + + // conv5_x [1x1, 512; 3x3, 512; 1x1, 2048] x 3 + << conv1x1(512, 2) << bn() // + << conv3x3(512, 1) << bn() // + << conv1x1(2048, 1) << bn() // + + << conv1x1(512, 1) << bn() // + << conv3x3(512, 1) << bn() // + << conv1x1(2048, 1) << bn() // + + << conv1x1(512, 1) << bn() // + << conv3x3(512, 1) << bn() // + << conv1x1(2048, 1) + << bn() // + + // + << pool2() // + << flatten() // + << dense(logits) // + << softmax() // + ; + + auto y = layers(x); + return y; + } +}; + +int main(int argc, char *argv[]) +{ + const std::string home(std::getenv("HOME")); + const std::string prefix = home + "/var/models/resnet"; + plain50_model model(prefix); + const auto x = ttl::tensor(1, model.h, model.w, 3); + const auto y = model(ref(x)); + PPRINT(x); + PPRINT(*y); + return 0; +} diff --git a/examples/example_train_mnist_slp.cpp b/examples/example_train_mnist_slp.cpp index f3e652a..f5bc4d0 100644 --- a/examples/example_train_mnist_slp.cpp +++ b/examples/example_train_mnist_slp.cpp @@ -125,6 +125,7 @@ void train_slp_model(const D &ds, // const int n_epochs = 1; int step = 0; for (auto _ : range(n_epochs)) { + UNUSED(_); for (auto offset : range(n / batch_size)) { ++step; printf("step: %d\n", step); diff --git a/examples/utils.hpp b/examples/utils.hpp index 44e4b0d..e1d53be 100644 --- a/examples/utils.hpp +++ b/examples/utils.hpp @@ -1,9 +1,13 @@ +#include +#include #include -#include -#include #include +#include + +#include + template std::string show_shape(const ttl::internal::basic_shape &shape, char bracket_l = '(', char bracket_r = ')') @@ -21,7 +25,7 @@ void show_signature(const T &y, const Ts &... x) { std::array args({show_shape(x.shape())...}); std::string ss; - for (auto p : args) { + for (const auto &p : args) { if (!ss.empty()) { ss += ", "; } ss += p; } @@ -35,3 +39,28 @@ template void pprint(const T &t, const char *name) } #define PPRINT(e) pprint(e, #e); + +namespace nn::layers::debug +{ +class show_name +{ + const std::string name_; + + public: + show_name(const std::string name) : name_(name) {} + + template + void operator()(const ttl::tensor_ref &y) const + { + std::cerr << std::left << std::setw(32) << name_ + << " :: " << show_shape(y.shape()) << std::endl; + } +}; +} // namespace nn::layers::debug + +inline void make_unuse(void *) {} + +#define UNUSED(e) \ + { \ + make_unuse(&e); \ + } diff --git a/include/nn/bits/layers/conv.hpp b/include/nn/bits/layers/conv.hpp index 7003af3..cdac628 100644 --- a/include/nn/bits/layers/conv.hpp +++ b/include/nn/bits/layers/conv.hpp @@ -21,7 +21,8 @@ class conv_trait using stride_t = std::experimental::new_type, stride_trait>; using rate_t = std::experimental::new_type, rate_trait>; - using padding_policy = ops::linear_sample_trait::padding_policy; + using dim_t = uint32_t; + using padding_policy = ops::linear_sample_trait::padding_policy; const size_t n_filters_; const ksize_t ksize_; @@ -36,12 +37,12 @@ class conv_trait static padding_policy padding_same() { - return ops::linear_sample_trait::padding_same(); + return ops::linear_sample_trait::padding_same(); } static padding_policy padding_valid() { - return ops::linear_sample_trait::padding_valid(); + return ops::linear_sample_trait::padding_valid(); } conv_trait(size_t n_filters, const ksize_t &ksize) diff --git a/include/nn/bits/layers/pool.hpp b/include/nn/bits/layers/pool.hpp index afb89fe..df657fb 100644 --- a/include/nn/bits/layers/pool.hpp +++ b/include/nn/bits/layers/pool.hpp @@ -18,7 +18,8 @@ template <> class pool_trait using ksize_t = std::experimental::new_type, ksize_trait>; using stride_t = std::experimental::new_type, stride_trait>; - using padding_policy = ops::linear_sample_trait::padding_policy; + using dim_t = uint32_t; + using padding_policy = ops::linear_sample_trait::padding_policy; const ksize_t ksize_; const padding_policy padding_; @@ -30,12 +31,12 @@ template <> class pool_trait static padding_policy padding_same() { - return ops::linear_sample_trait::padding_same(); + return ops::linear_sample_trait::padding_same(); } static padding_policy padding_valid() { - return ops::linear_sample_trait::padding_valid(); + return ops::linear_sample_trait::padding_valid(); } pool_trait() : pool_trait(ksize(2, 2)) {} diff --git a/include/nn/bits/layers/reshape.hpp b/include/nn/bits/layers/reshape.hpp new file mode 100644 index 0000000..9d17a71 --- /dev/null +++ b/include/nn/bits/layers/reshape.hpp @@ -0,0 +1,20 @@ +#pragma once +#include +#include +#include + +namespace nn::layers +{ +template class flatten +{ + using op = ops::flatten; + + public: + template + auto operator()(const ttl::tensor_ref &x) const + { + auto y = nn::ops::new_result>(op(), x); + return nn::layers::make_layer(y); + } +}; +} // namespace nn::layers diff --git a/include/nn/bits/ops/conv.hpp b/include/nn/bits/ops/conv.hpp index 4b98169..339ded9 100644 --- a/include/nn/bits/ops/conv.hpp +++ b/include/nn/bits/ops/conv.hpp @@ -50,10 +50,7 @@ template class linear_conv_trait } linear_conv_trait(const padding_t &pad, dim_t stride, dim_t rate) - : pad_l_(std::get<0>(pad.dims)), - pad_r_(std::get<1>(pad.dims)), - rate_(rate), - stride_(stride) + : pad_l_(pad.left_), pad_r_(pad.right_), rate_(rate), stride_(stride) { } @@ -69,7 +66,7 @@ template class conv_trait; template <> class conv_trait { - using dim_t = size_t; + using dim_t = uint32_t; using conv_trait_1d_t = linear_conv_trait; public: diff --git a/include/nn/bits/ops/im2col.hpp b/include/nn/bits/ops/im2col.hpp index 3490c2e..cc84499 100644 --- a/include/nn/bits/ops/im2col.hpp +++ b/include/nn/bits/ops/im2col.hpp @@ -8,7 +8,8 @@ namespace nn::ops { template class im2col_trait; -template <> class im2col_trait : public multi_linear_sample_trait<2, size_t> +template <> +class im2col_trait : public multi_linear_sample_trait<2, uint32_t> { using multi_linear_sample_trait::multi_linear_sample_trait; }; diff --git a/include/nn/bits/ops/linear_sample.hpp b/include/nn/bits/ops/linear_sample.hpp index 0736aef..868aff3 100644 --- a/include/nn/bits/ops/linear_sample.hpp +++ b/include/nn/bits/ops/linear_sample.hpp @@ -1,4 +1,6 @@ #pragma once +#include + #include /*! @@ -16,35 +18,52 @@ e.g. namespace nn::ops { +struct fixed_padding; +struct valid_padding; +struct same_padding; + +template +class linear_sample_trait; -template class linear_sample_trait +template class linear_sample_trait { - const dim_t pad_l_; // TODO: make it template parameter - const dim_t pad_r_; // TODO: make it template parameter + using signed_dim_t = typename std::make_signed::type; + + const signed_dim_t pad_l_; // TODO: make it template parameter + const signed_dim_t pad_r_; // TODO: make it template parameter const dim_t rate_; const dim_t stride_; const dim_t ksize_; - struct padding_trait; - public: static constexpr dim_t default_rate = 1; static constexpr dim_t default_stride = 1; static constexpr dim_t default_pad_lr = 0; - using padding_t = std::experimental::new_type, padding_trait>; + struct padding_t { + signed_dim_t left_; + signed_dim_t right_; - static constexpr padding_t padding(dim_t p) { return padding_t(p, p); } + padding_t(signed_dim_t left, signed_dim_t right) + : left_(left), right_(right) + { + } + }; + + static constexpr padding_t padding(signed_dim_t p) + { + return padding_t(p, p); + } - static constexpr padding_t padding(dim_t left, dim_t right) + static constexpr padding_t padding(signed_dim_t left, signed_dim_t right) { return padding_t(left, right); }; static dim_t patch_size(dim_t k, dim_t r) { return r * (k - 1) + 1; } - static padding_t even_padding(dim_t p) + static padding_t even_padding(signed_dim_t p) { return padding_t(p / 2, p - p / 2); } @@ -61,10 +80,9 @@ template class linear_sample_trait { const dim_t ps = patch_size(k, r); const dim_t n0 = n % s; - // p = ps - s - (n % s) - // TODO: support negative padding - contract_assert(ps >= s + n0); - return even_padding(ps - s - n0); + // tot_pad = ps - s - (n % s) + return even_padding(static_cast(ps) - + static_cast(s + n0)); } using padding_policy = std::function; @@ -102,13 +120,14 @@ template class linear_sample_trait { } - linear_sample_trait(dim_t ksize, dim_t stride, dim_t rate, dim_t pad_lr) + linear_sample_trait(dim_t ksize, dim_t stride, dim_t rate, + signed_dim_t pad_lr) : linear_sample_trait(ksize, stride, rate, padding(pad_lr)) { } - linear_sample_trait(dim_t ksize, dim_t stride, dim_t rate, dim_t pad_l, - dim_t pad_r) + linear_sample_trait(dim_t ksize, dim_t stride, dim_t rate, + signed_dim_t pad_l, signed_dim_t pad_r) : linear_sample_trait(ksize, stride, rate, padding(pad_l, pad_r)) { // TODO: deprecate it @@ -116,8 +135,8 @@ template class linear_sample_trait linear_sample_trait(dim_t ksize, dim_t stride, dim_t rate, const padding_t &pad) - : pad_l_(std::get<0>(pad.dims)), - pad_r_(std::get<1>(pad.dims)), + : pad_l_(pad.left_), + pad_r_(pad.right_), rate_(rate), stride_(stride), ksize_(ksize) @@ -125,8 +144,6 @@ template class linear_sample_trait contract_assert(rate_ >= 1); contract_assert(stride_ >= 1); contract_assert(ksize_ >= 1); - contract_assert(pad_l_ >= 0); - contract_assert(pad_r_ >= 0); } dim_t get_ksize() const { return ksize_; } @@ -171,6 +188,32 @@ template class linear_sample_trait dim_t unpad(dim_t i) const { return i - pad_l_; } }; +template class linear_sample_trait +{ + const dim_t rate_; + const dim_t stride_; + const dim_t ksize_; +}; + +template class linear_sample_trait +{ + const dim_t rate_; + const dim_t stride_; + const dim_t ksize_; +}; + +template +constexpr typename linear_sample_trait::padding_t pad(dim_t p) +{ + return typename linear_sample_trait::padding_t(p, p); +} + +template +constexpr typename linear_sample_trait::padding_t pad(dim_t l, dim_t r) +{ + return typename linear_sample_trait::padding_t(l, r); +} + namespace internal { template static T constant(const T &x) { return x; } diff --git a/include/nn/bits/ops/pool.hpp b/include/nn/bits/ops/pool.hpp index 9b628fd..0c71a94 100644 --- a/include/nn/bits/ops/pool.hpp +++ b/include/nn/bits/ops/pool.hpp @@ -15,7 +15,7 @@ template <> class pool_trait struct ksize_trait; struct stride_trait; - using dim_t = size_t; + using dim_t = uint32_t; using sample1d_t_ = linear_sample_trait; using padding_1d_t = typename sample1d_t_::padding_t; diff --git a/include/nn/bits/ops/reshape.hpp b/include/nn/bits/ops/reshape.hpp index 6aa78d6..b427520 100644 --- a/include/nn/bits/ops/reshape.hpp +++ b/include/nn/bits/ops/reshape.hpp @@ -14,4 +14,23 @@ T as_matrix(const T1 &t) return T(t.data(), as_mat_shape(t.shape())); } +// TODO: make it generic: template +template class flatten +{ + static constexpr ttl::rank_t r = p + q; + + public: + shape<2> operator()(const shape &x) const + { + return as_mat_shape(x); + } + + template + void operator()(const ttl::tensor_ref &y, + const ttl::tensor_view &x) const + { + std::copy(x.data(), x.data() + x.shape().size(), y.data()); + } +}; + } // namespace nn::ops diff --git a/include/nn/experimental/bits/ops/utility.hpp b/include/nn/experimental/bits/ops/utility.hpp index cc811e9..fc62344 100644 --- a/include/nn/experimental/bits/ops/utility.hpp +++ b/include/nn/experimental/bits/ops/utility.hpp @@ -46,7 +46,12 @@ class onehot const auto y_flat = nn::ops::as_matrix>(y); const auto n = x.shape().size(); for (auto i : range(n)) { - y_flat.at(i, x.data()[i]) = static_cast(1); + const dim_t j = x.data()[i]; + if (0 <= j && j < k_) { + y_flat.at(i, j) = static_cast(1); + } else { + // TODO: maybe throw runtime_error(""): + } } } }; diff --git a/include/nn/layers b/include/nn/layers index f4e15c6..6a2ff5c 100644 --- a/include/nn/layers +++ b/include/nn/layers @@ -8,3 +8,4 @@ #include #include #include +#include diff --git a/tests/test_linear_sample.cpp b/tests/test_linear_sample.cpp index b2e3ce5..38b13c6 100644 --- a/tests/test_linear_sample.cpp +++ b/tests/test_linear_sample.cpp @@ -1,13 +1,14 @@ -#include "testing.hpp" - -#include #include #include +#include + +#include "testing.hpp" void test_linear_sample_ksr_nm(int ksize, int stride, int rate, int n, int m) { - nn::ops::linear_sample_trait sample(ksize, stride, rate); + using sample_t = nn::ops::linear_sample_trait; + sample_t sample(ksize, stride, rate); ASSERT_EQ(sample(n), m); ASSERT_EQ(sample(0, 0), 0); @@ -80,9 +81,8 @@ void test_valid_padding_ksize_3(dim_t n, dim_t s, dim_t pad_l, dim_t pad_r) { using sample_t = nn::ops::linear_sample_trait; const auto padding = sample_t::valid_padding(3, s, 1, n); - const auto [u, v] = padding.dims; - ASSERT_EQ(u, pad_l); - ASSERT_EQ(v, pad_r); + ASSERT_EQ(padding.left_, pad_l); + ASSERT_EQ(padding.right_, pad_r); } template @@ -90,9 +90,8 @@ void test_same_padding_ksize_3(dim_t n, dim_t s, dim_t pad_l, dim_t pad_r) { using sample_t = nn::ops::linear_sample_trait; const auto padding = sample_t::same_padding(3, s, 1, n); - const auto [u, v] = padding.dims; - ASSERT_EQ(u, pad_l); - ASSERT_EQ(v, pad_r); + ASSERT_EQ(padding.left_, pad_l); + ASSERT_EQ(padding.right_, pad_r); } TEST(linear_sample_test, test_auto_padding) @@ -103,3 +102,12 @@ TEST(linear_sample_test, test_auto_padding) test_same_padding_ksize_3(56, 1, 1, 1); test_same_padding_ksize_3(112, 2, 0, 1); } + +TEST(linear_sample_test, test_pad) +{ + using nn::ops::pad; + pad(1); + pad(2); + pad(1, 2); + pad(0, 1); +}