Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
83 changes: 83 additions & 0 deletions include/xsimd/arch/utils/shifts.hpp
Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I named this util as common seems to be more used for implementing the common architecture.

Original file line number Diff line number Diff line change
@@ -0,0 +1,83 @@
/***************************************************************************
* Copyright (c) Johan Mabille, Sylvain Corlay, Wolf Vollprecht and *
* Martin Renou *
* Copyright (c) QuantStack *
* Copyright (c) Serge Guelton *
* Copyright (c) Marco Barbone *
* *
* Distributed under the terms of the BSD 3-Clause License. *
* *
* The full license is in the file LICENSE, distributed with this software. *
****************************************************************************/

#ifndef XSIMD_UTILS_SHIFTS_HPP
#define XSIMD_UTILS_SHIFTS_HPP

#include "../../config/xsimd_inline.hpp"
#include "../../types/xsimd_batch.hpp"
#include "../../types/xsimd_batch_constant.hpp"
#include "../../types/xsimd_traits.hpp"

namespace xsimd
{
namespace kernel
{
namespace utils
{
template <typename I, I offset, I length, I... Vs>
struct select_stride
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Perhaps a more hardcore C++ developer wouldn't mind, but I would add comments to these helpers so that people don't have to decipher the code.

{
static constexpr I values_array[] = { Vs... };

template <typename K>
static constexpr K get(K i, K)
{
return static_cast<K>(values_array[length * i + offset]);
}
};

template <typename I>
constexpr I lsb_mask(I bit_index)
{
if (bit_index == 8 * sizeof(I))
{
return ~I { 0 };
}
return static_cast<I>((I { 1 } << bit_index) - I { 1 });
}

template <class T, class A, T V0, T... Vs>
constexpr bool all_equals(batch_constant<T, A, V0, Vs...> c)
{
return (c == std::integral_constant<T, V0> {}).all();
}

template <class T, class A, T... Vs>
XSIMD_INLINE batch<T, A> bitwise_lshift_as_twice_larger(
batch<T, A> const& self, batch_constant<T, A, Vs...>) noexcept
{
using T2 = widen_t<T>;

const auto self2 = bitwise_cast<T2>(self);

// Lower byte: shift as twice the size and mask bits flowing to higher byte.
constexpr auto shifts_lo = make_batch_constant<T2, select_stride<T, 0, 2, Vs...>, A>();
constexpr auto mask_lo = lsb_mask<T2>(8 * sizeof(T));
const auto shifted_lo = bitwise_lshift(self2, shifts_lo);
constexpr auto batch_mask_lo = make_batch_constant<T2, mask_lo, A>();
const auto masked_lo = bitwise_and(shifted_lo, batch_mask_lo.as_batch());

// Higher byte: mask bits that would flow from lower byte and shift as twice the size.
constexpr auto shifts_hi = make_batch_constant<T2, select_stride<T, 1, 2, Vs...>, A>();
constexpr auto mask_hi = mask_lo << (8 * sizeof(T));
constexpr auto batch_mask_hi = make_batch_constant<T2, mask_hi, A>();
const auto masked_hi = bitwise_and(self2, batch_mask_hi.as_batch());
const auto shifted_hi = bitwise_lshift(masked_hi, shifts_hi);

return bitwise_cast<T>(bitwise_or(masked_lo, shifted_hi));
}
}
}
}

#endif
24 changes: 24 additions & 0 deletions include/xsimd/arch/xsimd_avx2.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,7 @@

#include "../types/xsimd_avx2_register.hpp"
#include "../types/xsimd_batch_constant.hpp"
#include "./utils/shifts.hpp"

#include <limits>

Expand Down Expand Up @@ -332,6 +333,29 @@ namespace xsimd
}
}

// bitwise_lshift multiple (constant) specific implementations.
// Missing implementations are dispatched to the `batch` overload in xsimd_api.
// The 1 byte constant implementation calls the 2 bytes constant version, the 2 bytes
// constant version calls into the 4 bytes version which resolves to the dynamic one above.
template <class T, class A, T... Vs,
std::enable_if_t<std::is_integral<T>::value && (sizeof(T) <= 2), int> = 0>
XSIMD_INLINE batch<T, A> bitwise_lshift(
batch<T, A> const& self, batch_constant<T, A, Vs...> shifts, requires_arch<avx2> req) noexcept
{
using uint_t = typename std::make_unsigned<T>::type;

// AVX2 only supports 16-bit shifts with a uniform bitshift value,
// otherwise emulate using 32-bit shifts.
XSIMD_IF_CONSTEXPR(utils::all_equals(shifts))
{
return bitwise_lshift<shifts.get(0), A>(self, req);
}
return bitwise_cast<T>(
utils::bitwise_lshift_as_twice_larger<uint_t>(
bitwise_cast<uint_t>(self),
batch_constant<uint_t, A, static_cast<uint_t>(Vs)...> {}));
}

// bitwise_or
template <class A, class T, class = std::enable_if_t<std::is_integral<T>::value>>
XSIMD_INLINE batch<T, A> bitwise_or(batch<T, A> const& self, batch<T, A> const& other, requires_arch<avx2>) noexcept
Expand Down
31 changes: 31 additions & 0 deletions include/xsimd/arch/xsimd_sse2.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,7 @@

#include "../types/xsimd_batch_constant.hpp"
#include "../types/xsimd_sse2_register.hpp"
#include "./utils/shifts.hpp"

namespace xsimd
{
Expand Down Expand Up @@ -326,6 +327,36 @@ namespace xsimd
return bitwise_lshift<shift>(self, common {});
}

// bitwise_lshift multiple (constant)
// Missing implementations are dispacthed to the `batch` overload in xsimd_api.
template <class T, class A, T... Vs, detail::enable_sized_integral_t<T, 2> = 0>
XSIMD_INLINE batch<T, A> bitwise_lshift(
batch<T, A> const& self, batch_constant<T, A, Vs...> shifts, requires_arch<sse2> req) noexcept
{
XSIMD_IF_CONSTEXPR(utils::all_equals(shifts))
{
return bitwise_lshift<shifts.get(0), A>(self, req);
}
constexpr auto mults = batch_constant<T, A, static_cast<T>(1u << Vs)...>();
return _mm_mullo_epi16(self, mults.as_batch());
}

template <class T, class A, T... Vs, detail::enable_sized_integral_t<T, 1> = 0>
XSIMD_INLINE batch<T, A> bitwise_lshift(
batch<T, A> const& self, batch_constant<T, A, Vs...> shifts, requires_arch<sse2> req) noexcept
{
using uint_t = typename std::make_unsigned<T>::type;

XSIMD_IF_CONSTEXPR(utils::all_equals(shifts))
{
return bitwise_lshift<shifts.get(0), A>(self, req);
}
return bitwise_cast<T>(
utils::bitwise_lshift_as_twice_larger<uint_t>(
bitwise_cast<uint_t>(self),
batch_constant<uint_t, A, static_cast<uint_t>(Vs)...> {}));
}

// bitwise_not
template <class A>
XSIMD_INLINE batch<float, A> bitwise_not(batch<float, A> const& self, requires_arch<sse2>) noexcept
Expand Down
9 changes: 9 additions & 0 deletions include/xsimd/arch/xsimd_sse4_1.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -41,6 +41,15 @@ namespace xsimd
return _mm_ceil_pd(self);
}

// bitwise_lshift multiple (constant)
template <class A, uint32_t... Vs>
XSIMD_INLINE batch<uint32_t, A> bitwise_lshift(
batch<uint32_t, A> const& self, batch_constant<uint32_t, A, Vs...>, requires_arch<sse4_1>) noexcept
{
constexpr auto mults = batch_constant<uint32_t, A, static_cast<uint32_t>(1u << Vs)...>();
return _mm_mullo_epi32(self, mults.as_batch());
}

// fast_cast
namespace detail
{
Expand Down
50 changes: 47 additions & 3 deletions include/xsimd/types/xsimd_api.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -353,6 +353,43 @@ namespace xsimd
return kernel::bitwise_cast<A>(x, batch<T_out, A> {}, A {});
}

namespace detail
{
// Detection for kernel overloads accepting ``batch_constant`` in ``bitwise_lshift``
// directly (or in a parent register function).
// The ``batch_constant`` overload is a rare but useful optimization.
// Running the detection here is less error prone than to add a fallback to all
// architectures.

template <class Arch, class Batch, class BatchConstant, class = void>
struct has_bitwise_lshift_batch_const : std::false_type
{
};

template <class Arch, class Batch, class BatchConstant>
struct has_bitwise_lshift_batch_const<
Arch, Batch, BatchConstant,
void_t<decltype(kernel::bitwise_lshift<Arch>(
std::declval<Batch>(), std::declval<BatchConstant>(), Arch {}))>>
: std::true_type
{
};
Comment on lines +356 to +376
Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

We run the detection of an optimization in the xsimd_api to avoid adding many error-prone overloads.

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I know we've been having issues with error-prone overloads in the past, but I'm not a big fan of that approach.

I'll let it pass this time, but I think we should settle on something better.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I agree, batch_constant versions are tedious because they break full ordering of which dispatch is the best.
We cannot automate which what is better: dispatching to the runtime version, or dispatching to a lower architecture version. The implicit default (because of inheritance) is to dispatch to a lower architecture.
This is especially tricky when a new implementation is added somewhere in the chain, then it can impact the reasoning for other functions not defined in the same file. Hopefully new implementations are not discovered too often.

In this case, I first tried implementing all functions. Coming back to it much later, it became a big effort to track what dispatches to what. This solution was much simpler because it changes the default dispatch from lower architecture to runtime alternative. However I am now realizing this has the same pitfalls. I agree we should ideally stick to the default behavior. If C++17 is not too far away, this could more easily be done with a simplification of overloads.

I'll let it pass this time, but I think we should settle on something better.

Note that I plan to add a symetric API for bitwise_rshift for AVX2, and possibly to compute the negative shifts in neon.

In fact we should probably add a placeholder API for batch_constant for all functions, even the ones with no constant implementation. That way if something gets added, users would get the benefit without code changes. Otherwise they need to track their .as_batch() calls and which new xsimd constant API was added.


template <class Arch, class T, T... Values>
XSIMD_INLINE batch<T, Arch> bitwise_lshift_batch_const(batch<T, Arch> const& x, batch_constant<T, Arch, Values...> shift, std::true_type) noexcept
{
// Optimized ``batch_constant`` implementation
return kernel::bitwise_lshift<Arch>(x, shift, Arch {});
}

template <class Arch, class T, T... Values>
XSIMD_INLINE batch<T, Arch> bitwise_lshift_batch_const(batch<T, Arch> const& x, batch_constant<T, Arch, Values...> shift, std::false_type) noexcept
{
// Fallback to regular run-time implementation
return kernel::bitwise_lshift<Arch>(x, shift.as_batch(), Arch {});
}
}

/**
* @ingroup batch_bitwise
*
Expand All @@ -367,17 +404,24 @@ namespace xsimd
detail::static_check_supported_config<T, A>();
return kernel::bitwise_lshift<A>(x, shift, A {});
}
template <size_t shift, class T, class A>
XSIMD_INLINE batch<T, A> bitwise_lshift(batch<T, A> const& x) noexcept
{
detail::static_check_supported_config<T, A>();
return kernel::bitwise_lshift<shift, A>(x, A {});
}
template <class T, class A>
XSIMD_INLINE batch<T, A> bitwise_lshift(batch<T, A> const& x, batch<T, A> const& shift) noexcept
{
detail::static_check_supported_config<T, A>();
return kernel::bitwise_lshift<A>(x, shift, A {});
}
template <size_t shift, class T, class A>
XSIMD_INLINE batch<T, A> bitwise_lshift(batch<T, A> const& x) noexcept
template <class T, class A, T... Values>
XSIMD_INLINE batch<T, A> bitwise_lshift(batch<T, A> const& x, batch_constant<T, A, Values...> shift) noexcept
{
detail::static_check_supported_config<T, A>();
return kernel::bitwise_lshift<shift, A>(x, A {});
using has_batch_const_impl = detail::has_bitwise_lshift_batch_const<A, decltype(x), decltype(shift)>;
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

it seems to me that the dispatch here should be 'if all Values... are the same, dispatch to the overload that takes a single parameter, otherwise dispatch to the generic overload. Wouldn't that simplify the whole implementation?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

That is a nice improvement to add as well, but this is not what has_bitwise_lshift_batch_const is checking.
It is a compile time check to detect if there is a batch_constant overload in the kernel namespace.

I added this pattern because in most cases, the extra overload would forward to the dynamic version.
This is tedious and possibly error-prone, à la #1267.

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

ack

return detail::bitwise_lshift_batch_const<A>(x, shift, has_batch_const_impl {});
}

/**
Expand Down
16 changes: 16 additions & 0 deletions include/xsimd/types/xsimd_traits.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,7 @@
#ifndef XSIMD_TRAITS_HPP
#define XSIMD_TRAITS_HPP

#include <cstdint>
#include <type_traits>

#include "xsimd_batch.hpp"
Expand Down Expand Up @@ -421,6 +422,21 @@ namespace xsimd
using type = uint16_t;
};
template <>
struct widen<int32_t>
{
using type = int64_t;
};
template <>
struct widen<int16_t>
{
using type = int32_t;
};
template <>
struct widen<int8_t>
{
using type = int16_t;
};
template <>
struct widen<float>
{
using type = double;
Expand Down
46 changes: 42 additions & 4 deletions test/test_xsimd_api.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -351,7 +351,7 @@ struct xsimd_api_integral_types_functions
{
using value_type = typename scalar_type<T>::type;

void test_bitwise_lshift()
void test_bitwise_lshift_single()
{
constexpr int shift = 3;
value_type val0(12);
Expand All @@ -364,6 +364,35 @@ struct xsimd_api_integral_types_functions
CHECK_EQ(extract(cr), r);
}

/* Test when T is a batch_constant only, not a scalar. */
template <typename U = T>
void test_bitwise_lshift_multiple(T const& vals, typename std::enable_if<!std::is_integral<U>::value, int>::type = 0)
{
#ifndef XSIMD_NO_SUPPORTED_ARCHITECTURE
constexpr auto Max = static_cast<value_type>(std::numeric_limits<value_type>::digits);
constexpr auto max_batch = xsimd::make_batch_constant<value_type, Max>();
constexpr auto shifts = xsimd::make_iota_batch_constant<value_type>() % max_batch;

{
auto shifted = xsimd::bitwise_lshift(vals, shifts.as_batch());
auto shifted_cst = xsimd::bitwise_lshift(vals, shifts);

for (std::size_t i = 0; i < shifts.size; ++i)
{
const auto expected = static_cast<value_type>(vals.get(i) << shifts.get(i));
CHECK_EQ(shifted.get(i), expected);
CHECK_EQ(shifted_cst.get(i), expected);
}
}
#endif
}

/* Test multiple does not make sense when T is scalar. */
template <typename U = T>
void test_bitwise_lshift_multiple(T const&, typename std::enable_if<std::is_integral<U>::value, int>::type = 0)
{
}

void test_bitwise_rshift()
{
constexpr int shift = 3;
Expand Down Expand Up @@ -424,11 +453,20 @@ struct xsimd_api_integral_types_functions

TEST_CASE_TEMPLATE("[xsimd api | integral types functions]", B, INTEGRAL_TYPES)
{
xsimd_api_integral_types_functions<B> Test;
using test_type = xsimd_api_integral_types_functions<B>;

test_type Test;

SUBCASE("test_bitwise_lshift_single")
{
Test.test_bitwise_lshift_single();
}

SUBCASE("bitwise_lshift")
SUBCASE("bitwise_lshift_multiple")
{
Test.test_bitwise_lshift();
Test.test_bitwise_lshift_multiple({ 1 });
Test.test_bitwise_lshift_multiple({ 3 });
Test.test_bitwise_lshift_multiple({ 127 });
}

SUBCASE("bitwise_rshift")
Expand Down