/// @ref core /// @file glm/detail/func_integer.inl #include "type_vec2.hpp" #include "type_vec3.hpp" #include "type_vec4.hpp" #include "type_int.hpp" #include "_vectorize.hpp" #if(GLM_ARCH & GLM_ARCH_X86 && GLM_COMPILER & GLM_COMPILER_VC) # include # pragma intrinsic(_BitScanReverse) #endif//(GLM_ARCH & GLM_ARCH_X86 && GLM_COMPILER & GLM_COMPILER_VC) #include #if !GLM_HAS_EXTENDED_INTEGER_TYPE # if GLM_COMPILER & GLM_COMPILER_GCC # pragma GCC diagnostic ignored "-Wlong-long" # endif # if (GLM_COMPILER & GLM_COMPILER_CLANG) # pragma clang diagnostic ignored "-Wc++11-long-long" # endif #endif namespace glm{ namespace detail { template GLM_FUNC_QUALIFIER T mask(T Bits) { return Bits >= sizeof(T) * 8 ? ~static_cast(0) : (static_cast(1) << Bits) - static_cast(1); } template class vecType, bool Aligned, bool EXEC> struct compute_bitfieldReverseStep { GLM_FUNC_QUALIFIER static vecType call(vecType const & v, T, T) { return v; } }; template class vecType, bool Aligned> struct compute_bitfieldReverseStep { GLM_FUNC_QUALIFIER static vecType call(vecType const & v, T Mask, T Shift) { return (v & Mask) << Shift | (v & (~Mask)) >> Shift; } }; template class vecType, bool Aligned, bool EXEC> struct compute_bitfieldBitCountStep { GLM_FUNC_QUALIFIER static vecType call(vecType const & v, T, T) { return v; } }; template class vecType, bool Aligned> struct compute_bitfieldBitCountStep { GLM_FUNC_QUALIFIER static vecType call(vecType const & v, T Mask, T Shift) { return (v & Mask) + ((v >> Shift) & Mask); } }; template struct compute_findLSB { GLM_FUNC_QUALIFIER static int call(genIUType Value) { if(Value == 0) return -1; return glm::bitCount(~Value & (Value - static_cast(1))); } }; # if GLM_HAS_BITSCAN_WINDOWS template struct compute_findLSB { GLM_FUNC_QUALIFIER static int call(genIUType Value) { unsigned long Result(0); unsigned char IsNotNull = _BitScanForward(&Result, *reinterpret_cast(&Value)); return IsNotNull ? int(Result) : -1; } }; # if !((GLM_COMPILER & GLM_COMPILER_VC) && (GLM_MODEL == GLM_MODEL_32)) template struct compute_findLSB { GLM_FUNC_QUALIFIER static int call(genIUType Value) { unsigned long Result(0); unsigned char IsNotNull = _BitScanForward64(&Result, *reinterpret_cast(&Value)); return IsNotNull ? int(Result) : -1; } }; # endif # endif//GLM_HAS_BITSCAN_WINDOWS template class vecType, bool EXEC = true> struct compute_findMSB_step_vec { GLM_FUNC_QUALIFIER static vecType call(vecType const & x, T Shift) { return x | (x >> Shift); } }; template class vecType> struct compute_findMSB_step_vec { GLM_FUNC_QUALIFIER static vecType call(vecType const & x, T) { return x; } }; template class vecType, int> struct compute_findMSB_vec { GLM_FUNC_QUALIFIER static vecType call(vecType const & vec) { vecType x(vec); x = compute_findMSB_step_vec= 8>::call(x, static_cast( 1)); x = compute_findMSB_step_vec= 8>::call(x, static_cast( 2)); x = compute_findMSB_step_vec= 8>::call(x, static_cast( 4)); x = compute_findMSB_step_vec= 16>::call(x, static_cast( 8)); x = compute_findMSB_step_vec= 32>::call(x, static_cast(16)); x = compute_findMSB_step_vec= 64>::call(x, static_cast(32)); return vecType(sizeof(T) * 8 - 1) - glm::bitCount(~x); } }; # if GLM_HAS_BITSCAN_WINDOWS template GLM_FUNC_QUALIFIER int compute_findMSB_32(genIUType Value) { unsigned long Result(0); unsigned char IsNotNull = _BitScanReverse(&Result, *reinterpret_cast(&Value)); return IsNotNull ? int(Result) : -1; } template class vecType> struct compute_findMSB_vec { GLM_FUNC_QUALIFIER static vecType call(vecType const & x) { return detail::functor1::call(compute_findMSB_32, x); } }; # if !((GLM_COMPILER & GLM_COMPILER_VC) && (GLM_MODEL == GLM_MODEL_32)) template GLM_FUNC_QUALIFIER int compute_findMSB_64(genIUType Value) { unsigned long Result(0); unsigned char IsNotNull = _BitScanReverse64(&Result, *reinterpret_cast(&Value)); return IsNotNull ? int(Result) : -1; } template class vecType> struct compute_findMSB_vec { GLM_FUNC_QUALIFIER static vecType call(vecType const & x) { return detail::functor1::call(compute_findMSB_64, x); } }; # endif # endif//GLM_HAS_BITSCAN_WINDOWS }//namespace detail // uaddCarry GLM_FUNC_QUALIFIER uint uaddCarry(uint const & x, uint const & y, uint & Carry) { uint64 const Value64(static_cast(x) + static_cast(y)); uint64 const Max32((static_cast(1) << static_cast(32)) - static_cast(1)); Carry = Value64 > Max32 ? 1u : 0u; return static_cast(Value64 % (Max32 + static_cast(1))); } template class vecType> GLM_FUNC_QUALIFIER vecType uaddCarry(vecType const & x, vecType const & y, vecType & Carry) { vecType Value64(vecType(x) + vecType(y)); vecType Max32((static_cast(1) << static_cast(32)) - static_cast(1)); Carry = mix(vecType(0), vecType(1), greaterThan(Value64, Max32)); return vecType(Value64 % (Max32 + static_cast(1))); } // usubBorrow GLM_FUNC_QUALIFIER uint usubBorrow(uint const & x, uint const & y, uint & Borrow) { GLM_STATIC_ASSERT(sizeof(uint) == sizeof(uint32), "uint and uint32 size mismatch"); Borrow = x >= y ? static_cast(0) : static_cast(1); if(y >= x) return y - x; else return static_cast((static_cast(1) << static_cast(32)) + (static_cast(y) - static_cast(x))); } template class vecType> GLM_FUNC_QUALIFIER vecType usubBorrow(vecType const & x, vecType const & y, vecType & Borrow) { Borrow = mix(vecType(1), vecType(0), greaterThanEqual(x, y)); vecType const YgeX(y - x); vecType const XgeY(vecType((static_cast(1) << static_cast(32)) + (vecType(y) - vecType(x)))); return mix(XgeY, YgeX, greaterThanEqual(y, x)); } // umulExtended GLM_FUNC_QUALIFIER void umulExtended(uint const & x, uint const & y, uint & msb, uint & lsb) { GLM_STATIC_ASSERT(sizeof(uint) == sizeof(uint32), "uint and uint32 size mismatch"); uint64 Value64 = static_cast(x) * static_cast(y); msb = static_cast(Value64 >> static_cast(32)); lsb = static_cast(Value64); } template class vecType> GLM_FUNC_QUALIFIER void umulExtended(vecType const & x, vecType const & y, vecType & msb, vecType & lsb) { GLM_STATIC_ASSERT(sizeof(uint) == sizeof(uint32), "uint and uint32 size mismatch"); vecType Value64(vecType(x) * vecType(y)); msb = vecType(Value64 >> static_cast(32)); lsb = vecType(Value64); } // imulExtended GLM_FUNC_QUALIFIER void imulExtended(int x, int y, int & msb, int & lsb) { GLM_STATIC_ASSERT(sizeof(int) == sizeof(int32), "int and int32 size mismatch"); int64 Value64 = static_cast(x) * static_cast(y); msb = static_cast(Value64 >> static_cast(32)); lsb = static_cast(Value64); } template class vecType> GLM_FUNC_QUALIFIER void imulExtended(vecType const & x, vecType const & y, vecType & msb, vecType & lsb) { GLM_STATIC_ASSERT(sizeof(int) == sizeof(int32), "int and int32 size mismatch"); vecType Value64(vecType(x) * vecType(y)); lsb = vecType(Value64 & static_cast(0xFFFFFFFF)); msb = vecType((Value64 >> static_cast(32)) & static_cast(0xFFFFFFFF)); } // bitfieldExtract template GLM_FUNC_QUALIFIER genIUType bitfieldExtract(genIUType Value, int Offset, int Bits) { return bitfieldExtract(tvec1(Value), Offset, Bits).x; } template class vecType> GLM_FUNC_QUALIFIER vecType bitfieldExtract(vecType const & Value, int Offset, int Bits) { GLM_STATIC_ASSERT(std::numeric_limits::is_integer, "'bitfieldExtract' only accept integer inputs"); return (Value >> static_cast(Offset)) & static_cast(detail::mask(Bits)); } // bitfieldInsert template GLM_FUNC_QUALIFIER genIUType bitfieldInsert(genIUType const & Base, genIUType const & Insert, int Offset, int Bits) { return bitfieldInsert(tvec1(Base), tvec1(Insert), Offset, Bits).x; } template class vecType> GLM_FUNC_QUALIFIER vecType bitfieldInsert(vecType const & Base, vecType const & Insert, int Offset, int Bits) { GLM_STATIC_ASSERT(std::numeric_limits::is_integer, "'bitfieldInsert' only accept integer values"); T const Mask = static_cast(detail::mask(Bits) << Offset); return (Base & ~Mask) | (Insert & Mask); } // bitfieldReverse template GLM_FUNC_QUALIFIER genType bitfieldReverse(genType x) { return bitfieldReverse(glm::tvec1(x)).x; } template class vecType> GLM_FUNC_QUALIFIER vecType bitfieldReverse(vecType const & v) { vecType x(v); x = detail::compute_bitfieldReverseStep::value, sizeof(T) * 8>= 2>::call(x, T(0x5555555555555555ull), static_cast( 1)); x = detail::compute_bitfieldReverseStep::value, sizeof(T) * 8>= 4>::call(x, T(0x3333333333333333ull), static_cast( 2)); x = detail::compute_bitfieldReverseStep::value, sizeof(T) * 8>= 8>::call(x, T(0x0F0F0F0F0F0F0F0Full), static_cast( 4)); x = detail::compute_bitfieldReverseStep::value, sizeof(T) * 8>= 16>::call(x, T(0x00FF00FF00FF00FFull), static_cast( 8)); x = detail::compute_bitfieldReverseStep::value, sizeof(T) * 8>= 32>::call(x, T(0x0000FFFF0000FFFFull), static_cast(16)); x = detail::compute_bitfieldReverseStep::value, sizeof(T) * 8>= 64>::call(x, T(0x00000000FFFFFFFFull), static_cast(32)); return x; } // bitCount template GLM_FUNC_QUALIFIER int bitCount(genType x) { return bitCount(glm::tvec1(x)).x; } template class vecType> GLM_FUNC_QUALIFIER vecType bitCount(vecType const & v) { vecType::type, P> x(*reinterpret_cast::type, P> const *>(&v)); x = detail::compute_bitfieldBitCountStep::type, P, vecType, detail::is_aligned

::value, sizeof(T) * 8>= 2>::call(x, typename detail::make_unsigned::type(0x5555555555555555ull), typename detail::make_unsigned::type( 1)); x = detail::compute_bitfieldBitCountStep::type, P, vecType, detail::is_aligned

::value, sizeof(T) * 8>= 4>::call(x, typename detail::make_unsigned::type(0x3333333333333333ull), typename detail::make_unsigned::type( 2)); x = detail::compute_bitfieldBitCountStep::type, P, vecType, detail::is_aligned

::value, sizeof(T) * 8>= 8>::call(x, typename detail::make_unsigned::type(0x0F0F0F0F0F0F0F0Full), typename detail::make_unsigned::type( 4)); x = detail::compute_bitfieldBitCountStep::type, P, vecType, detail::is_aligned

::value, sizeof(T) * 8>= 16>::call(x, typename detail::make_unsigned::type(0x00FF00FF00FF00FFull), typename detail::make_unsigned::type( 8)); x = detail::compute_bitfieldBitCountStep::type, P, vecType, detail::is_aligned

::value, sizeof(T) * 8>= 32>::call(x, typename detail::make_unsigned::type(0x0000FFFF0000FFFFull), typename detail::make_unsigned::type(16)); x = detail::compute_bitfieldBitCountStep::type, P, vecType, detail::is_aligned

::value, sizeof(T) * 8>= 64>::call(x, typename detail::make_unsigned::type(0x00000000FFFFFFFFull), typename detail::make_unsigned::type(32)); return vecType(x); } // findLSB template GLM_FUNC_QUALIFIER int findLSB(genIUType Value) { GLM_STATIC_ASSERT(std::numeric_limits::is_integer, "'findLSB' only accept integer values"); return detail::compute_findLSB::call(Value); } template class vecType> GLM_FUNC_QUALIFIER vecType findLSB(vecType const & x) { GLM_STATIC_ASSERT(std::numeric_limits::is_integer, "'findLSB' only accept integer values"); return detail::functor1::call(findLSB, x); } // findMSB template GLM_FUNC_QUALIFIER int findMSB(genIUType x) { GLM_STATIC_ASSERT(std::numeric_limits::is_integer, "'findMSB' only accept integer values"); return findMSB(tvec1(x)).x; } template class vecType> GLM_FUNC_QUALIFIER vecType findMSB(vecType const & x) { GLM_STATIC_ASSERT(std::numeric_limits::is_integer, "'findMSB' only accept integer values"); return detail::compute_findMSB_vec::call(x); } }//namespace glm #if GLM_ARCH != GLM_ARCH_PURE && GLM_HAS_UNRESTRICTED_UNIONS # include "func_integer_simd.inl" #endif