commit d8443b3c8c6283a264ae912b65b729f025f139f9 Author: Zack Buhman Date: Thu Mar 5 21:53:02 2026 -0600 initial diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..9ae3a0d --- /dev/null +++ b/.gitignore @@ -0,0 +1,8 @@ +.~* +.\#* +\#* +*~ +*.o +main +*.so +*.dylib \ No newline at end of file diff --git a/Makefile b/Makefile new file mode 100644 index 0000000..88f9c0c --- /dev/null +++ b/Makefile @@ -0,0 +1,50 @@ +#PREFIX = x86_64-w64-mingw32- +CC=$(PREFIX)gcc +CXX=$(PREFIX)g++ + +OPT = -Og -march=x86-64-v3 + +CSTD = -std=gnu23 +CXXSTD = -std=gnu++23 +CFLAGS += -g +CFLAGS += -fpic +CFLAGS += -I./include +CFLAGS += -Wall -Werror -Wfatal-errors -Wno-error=unused-variable -Wno-error=unused-but-set-variable +CFLAGS += -Wno-error=unknown-pragmas -Wno-unknown-pragmas +CFLAGS += $(shell pkg-config --cflags glfw3) + +LDFLAGS += -lm +LDFLAGS += $(shell pkg-config --libs glfw3) + +OBJS = \ + src/gl.o \ + src/opengl.o \ + src/test.o + +all: test.so + +%.o: %.c + $(CC) $(ARCH) $(CSTD) $(CFLAGS) $(OPT) -c $< -o $@ + +%.o: %.cpp + $(CXX) $(ARCH) $(CXXSTD) $(CFLAGS) $(OPT) -c $< -o $@ + +test.so: $(OBJS) + $(CC) $(ARCH) $(OPT) -shared -g $^ -o $@ -lSDL3 + +main: $(OBJS) src/main.o + $(CC) $(ARCH) $(LDFLAGS) $(OPT) -g $^ -o $@ + +clean: + find . -type f ! -name "*.*" -delete + +.SUFFIXES: +.INTERMEDIATE: +.SECONDARY: +.PHONY: all clean phony + +%: RCS/%,v +%: RCS/% +%: %,v +%: s.% +%: SCCS/s.% diff --git a/conf.lua b/conf.lua new file mode 100644 index 0000000..478c510 --- /dev/null +++ b/conf.lua @@ -0,0 +1,7 @@ +function love.conf(t) + t.window.width = 1024 + t.window.height = 1024 + t.window.depth = true + t.window.resizable = true + t.graphics.excluderenderers = {"vulkan", "metal"} +end diff --git a/include/KHR/khrplatform.h b/include/KHR/khrplatform.h new file mode 100644 index 0000000..0164644 --- /dev/null +++ b/include/KHR/khrplatform.h @@ -0,0 +1,311 @@ +#ifndef __khrplatform_h_ +#define __khrplatform_h_ + +/* +** Copyright (c) 2008-2018 The Khronos Group Inc. +** +** Permission is hereby granted, free of charge, to any person obtaining a +** copy of this software and/or associated documentation files (the +** "Materials"), to deal in the Materials without restriction, including +** without limitation the rights to use, copy, modify, merge, publish, +** distribute, sublicense, and/or sell copies of the Materials, and to +** permit persons to whom the Materials are furnished to do so, subject to +** the following conditions: +** +** The above copyright notice and this permission notice shall be included +** in all copies or substantial portions of the Materials. +** +** THE MATERIALS ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +** EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +** MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +** IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +** CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, +** TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE +** MATERIALS OR THE USE OR OTHER DEALINGS IN THE MATERIALS. +*/ + +/* Khronos platform-specific types and definitions. + * + * The master copy of khrplatform.h is maintained in the Khronos EGL + * Registry repository at https://github.com/KhronosGroup/EGL-Registry + * The last semantic modification to khrplatform.h was at commit ID: + * 67a3e0864c2d75ea5287b9f3d2eb74a745936692 + * + * Adopters may modify this file to suit their platform. Adopters are + * encouraged to submit platform specific modifications to the Khronos + * group so that they can be included in future versions of this file. + * Please submit changes by filing pull requests or issues on + * the EGL Registry repository linked above. + * + * + * See the Implementer's Guidelines for information about where this file + * should be located on your system and for more details of its use: + * http://www.khronos.org/registry/implementers_guide.pdf + * + * This file should be included as + * #include + * by Khronos client API header files that use its types and defines. + * + * The types in khrplatform.h should only be used to define API-specific types. + * + * Types defined in khrplatform.h: + * khronos_int8_t signed 8 bit + * khronos_uint8_t unsigned 8 bit + * khronos_int16_t signed 16 bit + * khronos_uint16_t unsigned 16 bit + * khronos_int32_t signed 32 bit + * khronos_uint32_t unsigned 32 bit + * khronos_int64_t signed 64 bit + * khronos_uint64_t unsigned 64 bit + * khronos_intptr_t signed same number of bits as a pointer + * khronos_uintptr_t unsigned same number of bits as a pointer + * khronos_ssize_t signed size + * khronos_usize_t unsigned size + * khronos_float_t signed 32 bit floating point + * khronos_time_ns_t unsigned 64 bit time in nanoseconds + * khronos_utime_nanoseconds_t unsigned time interval or absolute time in + * nanoseconds + * khronos_stime_nanoseconds_t signed time interval in nanoseconds + * khronos_boolean_enum_t enumerated boolean type. This should + * only be used as a base type when a client API's boolean type is + * an enum. Client APIs which use an integer or other type for + * booleans cannot use this as the base type for their boolean. + * + * Tokens defined in khrplatform.h: + * + * KHRONOS_FALSE, KHRONOS_TRUE Enumerated boolean false/true values. + * + * KHRONOS_SUPPORT_INT64 is 1 if 64 bit integers are supported; otherwise 0. + * KHRONOS_SUPPORT_FLOAT is 1 if floats are supported; otherwise 0. + * + * Calling convention macros defined in this file: + * KHRONOS_APICALL + * KHRONOS_APIENTRY + * KHRONOS_APIATTRIBUTES + * + * These may be used in function prototypes as: + * + * KHRONOS_APICALL void KHRONOS_APIENTRY funcname( + * int arg1, + * int arg2) KHRONOS_APIATTRIBUTES; + */ + +#if defined(__SCITECH_SNAP__) && !defined(KHRONOS_STATIC) +# define KHRONOS_STATIC 1 +#endif + +/*------------------------------------------------------------------------- + * Definition of KHRONOS_APICALL + *------------------------------------------------------------------------- + * This precedes the return type of the function in the function prototype. + */ +#if defined(KHRONOS_STATIC) + /* If the preprocessor constant KHRONOS_STATIC is defined, make the + * header compatible with static linking. */ +# define KHRONOS_APICALL +#elif defined(_WIN32) +# define KHRONOS_APICALL __declspec(dllimport) +#elif defined (__SYMBIAN32__) +# define KHRONOS_APICALL IMPORT_C +#elif defined(__ANDROID__) +# define KHRONOS_APICALL __attribute__((visibility("default"))) +#else +# define KHRONOS_APICALL +#endif + +/*------------------------------------------------------------------------- + * Definition of KHRONOS_APIENTRY + *------------------------------------------------------------------------- + * This follows the return type of the function and precedes the function + * name in the function prototype. + */ +#if defined(_WIN32) && !defined(_WIN32_WCE) && !defined(__SCITECH_SNAP__) + /* Win32 but not WinCE */ +# define KHRONOS_APIENTRY __stdcall +#else +# define KHRONOS_APIENTRY +#endif + +/*------------------------------------------------------------------------- + * Definition of KHRONOS_APIATTRIBUTES + *------------------------------------------------------------------------- + * This follows the closing parenthesis of the function prototype arguments. + */ +#if defined (__ARMCC_2__) +#define KHRONOS_APIATTRIBUTES __softfp +#else +#define KHRONOS_APIATTRIBUTES +#endif + +/*------------------------------------------------------------------------- + * basic type definitions + *-----------------------------------------------------------------------*/ +#if (defined(__STDC_VERSION__) && __STDC_VERSION__ >= 199901L) || defined(__GNUC__) || defined(__SCO__) || defined(__USLC__) + + +/* + * Using + */ +#include +typedef int32_t khronos_int32_t; +typedef uint32_t khronos_uint32_t; +typedef int64_t khronos_int64_t; +typedef uint64_t khronos_uint64_t; +#define KHRONOS_SUPPORT_INT64 1 +#define KHRONOS_SUPPORT_FLOAT 1 +/* + * To support platform where unsigned long cannot be used interchangeably with + * inptr_t (e.g. CHERI-extended ISAs), we can use the stdint.h intptr_t. + * Ideally, we could just use (u)intptr_t everywhere, but this could result in + * ABI breakage if khronos_uintptr_t is changed from unsigned long to + * unsigned long long or similar (this results in different C++ name mangling). + * To avoid changes for existing platforms, we restrict usage of intptr_t to + * platforms where the size of a pointer is larger than the size of long. + */ +#if defined(__SIZEOF_LONG__) && defined(__SIZEOF_POINTER__) +#if __SIZEOF_POINTER__ > __SIZEOF_LONG__ +#define KHRONOS_USE_INTPTR_T +#endif +#endif + +#elif defined(__VMS ) || defined(__sgi) + +/* + * Using + */ +#include +typedef int32_t khronos_int32_t; +typedef uint32_t khronos_uint32_t; +typedef int64_t khronos_int64_t; +typedef uint64_t khronos_uint64_t; +#define KHRONOS_SUPPORT_INT64 1 +#define KHRONOS_SUPPORT_FLOAT 1 + +#elif defined(_WIN32) && !defined(__SCITECH_SNAP__) + +/* + * Win32 + */ +typedef __int32 khronos_int32_t; +typedef unsigned __int32 khronos_uint32_t; +typedef __int64 khronos_int64_t; +typedef unsigned __int64 khronos_uint64_t; +#define KHRONOS_SUPPORT_INT64 1 +#define KHRONOS_SUPPORT_FLOAT 1 + +#elif defined(__sun__) || defined(__digital__) + +/* + * Sun or Digital + */ +typedef int khronos_int32_t; +typedef unsigned int khronos_uint32_t; +#if defined(__arch64__) || defined(_LP64) +typedef long int khronos_int64_t; +typedef unsigned long int khronos_uint64_t; +#else +typedef long long int khronos_int64_t; +typedef unsigned long long int khronos_uint64_t; +#endif /* __arch64__ */ +#define KHRONOS_SUPPORT_INT64 1 +#define KHRONOS_SUPPORT_FLOAT 1 + +#elif 0 + +/* + * Hypothetical platform with no float or int64 support + */ +typedef int khronos_int32_t; +typedef unsigned int khronos_uint32_t; +#define KHRONOS_SUPPORT_INT64 0 +#define KHRONOS_SUPPORT_FLOAT 0 + +#else + +/* + * Generic fallback + */ +#include +typedef int32_t khronos_int32_t; +typedef uint32_t khronos_uint32_t; +typedef int64_t khronos_int64_t; +typedef uint64_t khronos_uint64_t; +#define KHRONOS_SUPPORT_INT64 1 +#define KHRONOS_SUPPORT_FLOAT 1 + +#endif + + +/* + * Types that are (so far) the same on all platforms + */ +typedef signed char khronos_int8_t; +typedef unsigned char khronos_uint8_t; +typedef signed short int khronos_int16_t; +typedef unsigned short int khronos_uint16_t; + +/* + * Types that differ between LLP64 and LP64 architectures - in LLP64, + * pointers are 64 bits, but 'long' is still 32 bits. Win64 appears + * to be the only LLP64 architecture in current use. + */ +#ifdef KHRONOS_USE_INTPTR_T +typedef intptr_t khronos_intptr_t; +typedef uintptr_t khronos_uintptr_t; +#elif defined(_WIN64) +typedef signed long long int khronos_intptr_t; +typedef unsigned long long int khronos_uintptr_t; +#else +typedef signed long int khronos_intptr_t; +typedef unsigned long int khronos_uintptr_t; +#endif + +#if defined(_WIN64) +typedef signed long long int khronos_ssize_t; +typedef unsigned long long int khronos_usize_t; +#else +typedef signed long int khronos_ssize_t; +typedef unsigned long int khronos_usize_t; +#endif + +#if KHRONOS_SUPPORT_FLOAT +/* + * Float type + */ +typedef float khronos_float_t; +#endif + +#if KHRONOS_SUPPORT_INT64 +/* Time types + * + * These types can be used to represent a time interval in nanoseconds or + * an absolute Unadjusted System Time. Unadjusted System Time is the number + * of nanoseconds since some arbitrary system event (e.g. since the last + * time the system booted). The Unadjusted System Time is an unsigned + * 64 bit value that wraps back to 0 every 584 years. Time intervals + * may be either signed or unsigned. + */ +typedef khronos_uint64_t khronos_utime_nanoseconds_t; +typedef khronos_int64_t khronos_stime_nanoseconds_t; +#endif + +/* + * Dummy value used to pad enum types to 32 bits. + */ +#ifndef KHRONOS_MAX_ENUM +#define KHRONOS_MAX_ENUM 0x7FFFFFFF +#endif + +/* + * Enumerated boolean type + * + * Values other than zero should be considered to be true. Therefore + * comparisons should not be made against KHRONOS_TRUE. + */ +typedef enum { + KHRONOS_FALSE = 0, + KHRONOS_TRUE = 1, + KHRONOS_BOOLEAN_ENUM_FORCE_SIZE = KHRONOS_MAX_ENUM +} khronos_boolean_enum_t; + +#endif /* __khrplatform_h_ */ diff --git a/include/directxmath/directxmath.h b/include/directxmath/directxmath.h new file mode 100644 index 0000000..0318518 --- /dev/null +++ b/include/directxmath/directxmath.h @@ -0,0 +1,2221 @@ +//------------------------------------------------------------------------------------- +// DirectXMath.h -- SIMD C++ Math library +// +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. +// +// http://go.microsoft.com/fwlink/?LinkID=615560 +//------------------------------------------------------------------------------------- + +#pragma once + +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wunused-but-set-variable" + +#ifndef __cplusplus +#error DirectX Math requires C++ +#endif + +#define DIRECTX_MATH_VERSION 314 + +#if defined(_MSC_VER) && (_MSC_VER < 1910) +#error DirectX Math requires Visual C++ 2017 or later. +#endif + +#if defined(_MSC_VER) && !defined(_M_ARM) && !defined(_M_ARM64) && !defined(_M_HYBRID_X86_ARM64) && (!_MANAGED) && (!_M_CEE) && (!defined(_M_IX86_FP) || (_M_IX86_FP > 1)) && !defined(_XM_NO_INTRINSICS_) && !defined(_XM_VECTORCALL_) +#define _XM_VECTORCALL_ 1 +#endif + +#if _XM_VECTORCALL_ +#define XM_CALLCONV __vectorcall +#elif defined(__GNUC__) +#define XM_CALLCONV +#else +#define XM_CALLCONV __fastcall +#endif + +#ifndef XM_DEPRECATED +#ifdef __GNUC__ +#define XM_DEPRECATED __attribute__ ((deprecated)) +#else +#define XM_DEPRECATED __declspec(deprecated("This is deprecated and will be removed in a future version.")) +#endif +#endif + +#if !defined(_XM_AVX2_INTRINSICS_) && defined(__AVX2__) && !defined(_XM_NO_INTRINSICS_) +#define _XM_AVX2_INTRINSICS_ +#endif + +#if !defined(_XM_FMA3_INTRINSICS_) && defined(_XM_AVX2_INTRINSICS_) && !defined(_XM_NO_INTRINSICS_) +#define _XM_FMA3_INTRINSICS_ +#endif + +#if !defined(_XM_F16C_INTRINSICS_) && defined(_XM_AVX2_INTRINSICS_) && !defined(_XM_NO_INTRINSICS_) +#define _XM_F16C_INTRINSICS_ +#endif + +#if !defined(_XM_F16C_INTRINSICS_) && defined(__F16C__) && !defined(_XM_NO_INTRINSICS_) +#define _XM_F16C_INTRINSICS_ +#endif + +#if defined(_XM_FMA3_INTRINSICS_) && !defined(_XM_AVX_INTRINSICS_) +#define _XM_AVX_INTRINSICS_ +#endif + +#if defined(_XM_F16C_INTRINSICS_) && !defined(_XM_AVX_INTRINSICS_) +#define _XM_AVX_INTRINSICS_ +#endif + +#if !defined(_XM_AVX_INTRINSICS_) && defined(__AVX__) && !defined(_XM_NO_INTRINSICS_) +#define _XM_AVX_INTRINSICS_ +#endif + +#if defined(_XM_AVX_INTRINSICS_) && !defined(_XM_SSE4_INTRINSICS_) +#define _XM_SSE4_INTRINSICS_ +#endif + +#if defined(_XM_SSE4_INTRINSICS_) && !defined(_XM_SSE3_INTRINSICS_) +#define _XM_SSE3_INTRINSICS_ +#endif + +#if defined(_XM_SSE3_INTRINSICS_) && !defined(_XM_SSE_INTRINSICS_) +#define _XM_SSE_INTRINSICS_ +#endif + +#if !defined(_XM_ARM_NEON_INTRINSICS_) && !defined(_XM_SSE_INTRINSICS_) && !defined(_XM_NO_INTRINSICS_) +#if (defined(_M_IX86) || defined(_M_X64) || __i386__ || __x86_64__) && !defined(_M_HYBRID_X86_ARM64) +#define _XM_SSE_INTRINSICS_ +#elif defined(_M_ARM) || defined(_M_ARM64) || defined(_M_HYBRID_X86_ARM64) || __arm__ || __aarch64__ +#define _XM_ARM_NEON_INTRINSICS_ +#elif !defined(_XM_NO_INTRINSICS_) +#error DirectX Math does not support this target +#endif +#endif // !_XM_ARM_NEON_INTRINSICS_ && !_XM_SSE_INTRINSICS_ && !_XM_NO_INTRINSICS_ + +#if !defined(_XM_NO_XMVECTOR_OVERLOADS_) && (defined(__clang__) || defined(__GNUC__)) +#define _XM_NO_XMVECTOR_OVERLOADS_ +#endif + +#pragma warning(push) +#pragma warning(disable:4514 4820) +// C4514/4820: Off by default noise +#include +#include +#pragma warning(pop) + +#ifndef _XM_NO_INTRINSICS_ + +#ifdef _MSC_VER +#pragma warning(push) +#pragma warning(disable : 4987) +// C4987: Off by default noise +#include +#pragma warning(pop) +#endif + +#if (defined(__clang__) || defined(__GNUC__)) && (__x86_64__ || __i386__) +#include +#endif + +#ifdef _XM_SSE_INTRINSICS_ +#include +#include + +#ifdef _XM_SSE3_INTRINSICS_ +#include +#endif + +#ifdef _XM_SSE4_INTRINSICS_ +#include +#endif + +#ifdef _XM_AVX_INTRINSICS_ +#include +#endif + +#elif defined(_XM_ARM_NEON_INTRINSICS_) +#if defined(_MSC_VER) && (defined(_M_ARM64) || defined(_M_HYBRID_X86_ARM64)) +#include +#else +#include +#endif +#endif +#endif // !_XM_NO_INTRINSICS_ + +#include "sal.h" +#include + +#pragma warning(push) +#pragma warning(disable : 4005 4668) +// C4005/4668: Old header issue +#include +#pragma warning(pop) + +#ifdef __GNUC__ +#define XM_ALIGNED_DATA(x) __attribute__ ((aligned(x))) +#define XM_ALIGNED_STRUCT(x) struct __attribute__ ((aligned(x))) +#else +#define XM_ALIGNED_DATA(x) __declspec(align(x)) +#define XM_ALIGNED_STRUCT(x) __declspec(align(x)) struct +#endif + +/**************************************************************************** + * + * Conditional intrinsics + * + ****************************************************************************/ + +#if defined(_XM_SSE_INTRINSICS_) && !defined(_XM_NO_INTRINSICS_) + +#if defined(_XM_NO_MOVNT_) +#define XM_STREAM_PS( p, a ) _mm_store_ps((p), (a)) +#define XM256_STREAM_PS( p, a ) _mm256_store_ps((p), (a)) +#define XM_SFENCE() +#else +#define XM_STREAM_PS( p, a ) _mm_stream_ps((p), (a)) +#define XM256_STREAM_PS( p, a ) _mm256_stream_ps((p), (a)) +#define XM_SFENCE() _mm_sfence() +#endif + +#if defined(_XM_FMA3_INTRINSICS_) +#define XM_FMADD_PS( a, b, c ) _mm_fmadd_ps((a), (b), (c)) +#define XM_FNMADD_PS( a, b, c ) _mm_fnmadd_ps((a), (b), (c)) +#else +#define XM_FMADD_PS( a, b, c ) _mm_add_ps(_mm_mul_ps((a), (b)), (c)) +#define XM_FNMADD_PS( a, b, c ) _mm_sub_ps((c), _mm_mul_ps((a), (b))) +#endif + +#if defined(_XM_AVX_INTRINSICS_) && defined(_XM_FAVOR_INTEL_) +#define XM_PERMUTE_PS( v, c ) _mm_permute_ps((v), c ) +#else +#define XM_PERMUTE_PS( v, c ) _mm_shuffle_ps((v), (v), c ) +#endif + +#endif // _XM_SSE_INTRINSICS_ && !_XM_NO_INTRINSICS_ + +#if defined(_XM_ARM_NEON_INTRINSICS_) && !defined(_XM_NO_INTRINSICS_) + +#if defined(__clang__) +#define XM_PREFETCH( a ) __builtin_prefetch(a) +#elif defined(_MSC_VER) +#define XM_PREFETCH( a ) __prefetch(a) +#else +#define XM_PREFETCH( a ) +#endif + +#endif // _XM_ARM_NEON_INTRINSICS_ && !_XM_NO_INTRINSICS_ + +namespace DirectX +{ + + /**************************************************************************** + * + * Constant definitions + * + ****************************************************************************/ + +#if defined(__XNAMATH_H__) && defined(XM_PI) +#undef XM_PI +#undef XM_2PI +#undef XM_1DIVPI +#undef XM_1DIV2PI +#undef XM_PIDIV2 +#undef XM_PIDIV4 +#undef XM_SELECT_0 +#undef XM_SELECT_1 +#undef XM_PERMUTE_0X +#undef XM_PERMUTE_0Y +#undef XM_PERMUTE_0Z +#undef XM_PERMUTE_0W +#undef XM_PERMUTE_1X +#undef XM_PERMUTE_1Y +#undef XM_PERMUTE_1Z +#undef XM_PERMUTE_1W +#undef XM_CRMASK_CR6 +#undef XM_CRMASK_CR6TRUE +#undef XM_CRMASK_CR6FALSE +#undef XM_CRMASK_CR6BOUNDS +#undef XM_CACHE_LINE_SIZE +#endif + + constexpr float XM_PI = 3.141592654f; + constexpr float XM_2PI = 6.283185307f; + constexpr float XM_1DIVPI = 0.318309886f; + constexpr float XM_1DIV2PI = 0.159154943f; + constexpr float XM_PIDIV2 = 1.570796327f; + constexpr float XM_PIDIV4 = 0.785398163f; + + constexpr uint32_t XM_SELECT_0 = 0x00000000; + constexpr uint32_t XM_SELECT_1 = 0xFFFFFFFF; + + constexpr uint32_t XM_PERMUTE_0X = 0; + constexpr uint32_t XM_PERMUTE_0Y = 1; + constexpr uint32_t XM_PERMUTE_0Z = 2; + constexpr uint32_t XM_PERMUTE_0W = 3; + constexpr uint32_t XM_PERMUTE_1X = 4; + constexpr uint32_t XM_PERMUTE_1Y = 5; + constexpr uint32_t XM_PERMUTE_1Z = 6; + constexpr uint32_t XM_PERMUTE_1W = 7; + + constexpr uint32_t XM_SWIZZLE_X = 0; + constexpr uint32_t XM_SWIZZLE_Y = 1; + constexpr uint32_t XM_SWIZZLE_Z = 2; + constexpr uint32_t XM_SWIZZLE_W = 3; + + constexpr uint32_t XM_CRMASK_CR6 = 0x000000F0; + constexpr uint32_t XM_CRMASK_CR6TRUE = 0x00000080; + constexpr uint32_t XM_CRMASK_CR6FALSE = 0x00000020; + constexpr uint32_t XM_CRMASK_CR6BOUNDS = XM_CRMASK_CR6FALSE; + + constexpr size_t XM_CACHE_LINE_SIZE = 64; + + + /**************************************************************************** + * + * Macros + * + ****************************************************************************/ + +#if defined(__XNAMATH_H__) && defined(XMComparisonAllTrue) +#undef XMComparisonAllTrue +#undef XMComparisonAnyTrue +#undef XMComparisonAllFalse +#undef XMComparisonAnyFalse +#undef XMComparisonMixed +#undef XMComparisonAllInBounds +#undef XMComparisonAnyOutOfBounds +#endif + + // Unit conversion + + inline constexpr float XMConvertToRadians(float fDegrees) noexcept { return fDegrees * (XM_PI / 180.0f); } + inline constexpr float XMConvertToDegrees(float fRadians) noexcept { return fRadians * (180.0f / XM_PI); } + + // Condition register evaluation proceeding a recording (R) comparison + + inline constexpr bool XMComparisonAllTrue(uint32_t CR) noexcept { return (((CR)&XM_CRMASK_CR6TRUE) == XM_CRMASK_CR6TRUE); } + inline constexpr bool XMComparisonAnyTrue(uint32_t CR) noexcept { return (((CR)&XM_CRMASK_CR6FALSE) != XM_CRMASK_CR6FALSE); } + inline constexpr bool XMComparisonAllFalse(uint32_t CR) noexcept { return (((CR)&XM_CRMASK_CR6FALSE) == XM_CRMASK_CR6FALSE); } + inline constexpr bool XMComparisonAnyFalse(uint32_t CR) noexcept { return (((CR)&XM_CRMASK_CR6TRUE) != XM_CRMASK_CR6TRUE); } + inline constexpr bool XMComparisonMixed(uint32_t CR) noexcept { return (((CR)&XM_CRMASK_CR6) == 0); } + inline constexpr bool XMComparisonAllInBounds(uint32_t CR) noexcept { return (((CR)&XM_CRMASK_CR6BOUNDS) == XM_CRMASK_CR6BOUNDS); } + inline constexpr bool XMComparisonAnyOutOfBounds(uint32_t CR) noexcept { return (((CR)&XM_CRMASK_CR6BOUNDS) != XM_CRMASK_CR6BOUNDS); } + + + /**************************************************************************** + * + * Data types + * + ****************************************************************************/ + +#pragma warning(push) +#pragma warning(disable:4068 4201 4365 4324 4820) + // C4068: ignore unknown pragmas + // C4201: nonstandard extension used : nameless struct/union + // C4365: Off by default noise + // C4324/4820: padding warnings + +#ifdef _PREFAST_ +#pragma prefast(push) +#pragma prefast(disable : 25000, "FXMVECTOR is 16 bytes") +#endif + +//------------------------------------------------------------------------------ +#if defined(_XM_NO_INTRINSICS_) + struct __vector4 + { + union + { + float vector4_f32[4]; + uint32_t vector4_u32[4]; + }; + }; +#endif // _XM_NO_INTRINSICS_ + + //------------------------------------------------------------------------------ + // Vector intrinsic: Four 32 bit floating point components aligned on a 16 byte + // boundary and mapped to hardware vector registers +#if defined(_XM_SSE_INTRINSICS_) && !defined(_XM_NO_INTRINSICS_) + typedef __m128 XMVECTOR; +#elif defined(_XM_ARM_NEON_INTRINSICS_) && !defined(_XM_NO_INTRINSICS_) + typedef float32x4_t XMVECTOR; +#else + typedef __vector4 XMVECTOR; +#endif + + // Fix-up for (1st-3rd) XMVECTOR parameters that are pass-in-register for x86, ARM, ARM64, and vector call; by reference otherwise +#if ( defined(_M_IX86) || defined(_M_ARM) || defined(_M_ARM64) || _XM_VECTORCALL_ || __i386__ || __arm__ || __aarch64__ ) && !defined(_XM_NO_INTRINSICS_) + typedef const XMVECTOR FXMVECTOR; +#else + typedef const XMVECTOR& FXMVECTOR; +#endif + + // Fix-up for (4th) XMVECTOR parameter to pass in-register for ARM, ARM64, and x64 vector call; by reference otherwise +#if ( defined(_M_ARM) || defined(_M_ARM64) || defined(_M_HYBRID_X86_ARM64) || (_XM_VECTORCALL_ && !defined(_M_IX86) ) || __arm__ || __aarch64__ ) && !defined(_XM_NO_INTRINSICS_) + typedef const XMVECTOR GXMVECTOR; +#else + typedef const XMVECTOR& GXMVECTOR; +#endif + + // Fix-up for (5th & 6th) XMVECTOR parameter to pass in-register for ARM64 and vector call; by reference otherwise +#if ( defined(_M_ARM64) || defined(_M_HYBRID_X86_ARM64) || _XM_VECTORCALL_ || __aarch64__ ) && !defined(_XM_NO_INTRINSICS_) + typedef const XMVECTOR HXMVECTOR; +#else + typedef const XMVECTOR& HXMVECTOR; +#endif + + // Fix-up for (7th+) XMVECTOR parameters to pass by reference + typedef const XMVECTOR& CXMVECTOR; + + //------------------------------------------------------------------------------ + // Conversion types for constants + XM_ALIGNED_STRUCT(16) XMVECTORF32 + { + union + { + float f[4]; + XMVECTOR v; + }; + + inline operator XMVECTOR() const noexcept { return v; } + inline operator const float* () const noexcept { return f; } +#if !defined(_XM_NO_INTRINSICS_) && defined(_XM_SSE_INTRINSICS_) + inline operator __m128i() const noexcept { return _mm_castps_si128(v); } + inline operator __m128d() const noexcept { return _mm_castps_pd(v); } +#endif + }; + + XM_ALIGNED_STRUCT(16) XMVECTORI32 + { + union + { + int32_t i[4]; + XMVECTOR v; + }; + + inline operator XMVECTOR() const noexcept { return v; } +#if !defined(_XM_NO_INTRINSICS_) && defined(_XM_SSE_INTRINSICS_) + inline operator __m128i() const noexcept { return _mm_castps_si128(v); } + inline operator __m128d() const noexcept { return _mm_castps_pd(v); } +#endif + }; + + XM_ALIGNED_STRUCT(16) XMVECTORU8 + { + union + { + uint8_t u[16]; + XMVECTOR v; + }; + + inline operator XMVECTOR() const noexcept { return v; } +#if !defined(_XM_NO_INTRINSICS_) && defined(_XM_SSE_INTRINSICS_) + inline operator __m128i() const noexcept { return _mm_castps_si128(v); } + inline operator __m128d() const noexcept { return _mm_castps_pd(v); } +#endif + }; + + XM_ALIGNED_STRUCT(16) XMVECTORU32 + { + union + { + uint32_t u[4]; + XMVECTOR v; + }; + + inline operator XMVECTOR() const noexcept { return v; } +#if !defined(_XM_NO_INTRINSICS_) && defined(_XM_SSE_INTRINSICS_) + inline operator __m128i() const noexcept { return _mm_castps_si128(v); } + inline operator __m128d() const noexcept { return _mm_castps_pd(v); } +#endif + }; + + //------------------------------------------------------------------------------ + // Vector operators + +#ifndef _XM_NO_XMVECTOR_OVERLOADS_ + XMVECTOR XM_CALLCONV operator+ (FXMVECTOR V) noexcept; + XMVECTOR XM_CALLCONV operator- (FXMVECTOR V) noexcept; + + XMVECTOR& XM_CALLCONV operator+= (XMVECTOR& V1, FXMVECTOR V2) noexcept; + XMVECTOR& XM_CALLCONV operator-= (XMVECTOR& V1, FXMVECTOR V2) noexcept; + XMVECTOR& XM_CALLCONV operator*= (XMVECTOR& V1, FXMVECTOR V2) noexcept; + XMVECTOR& XM_CALLCONV operator/= (XMVECTOR& V1, FXMVECTOR V2) noexcept; + + XMVECTOR& operator*= (XMVECTOR& V, float S) noexcept; + XMVECTOR& operator/= (XMVECTOR& V, float S) noexcept; + + XMVECTOR XM_CALLCONV operator+ (FXMVECTOR V1, FXMVECTOR V2) noexcept; + XMVECTOR XM_CALLCONV operator- (FXMVECTOR V1, FXMVECTOR V2) noexcept; + XMVECTOR XM_CALLCONV operator* (FXMVECTOR V1, FXMVECTOR V2) noexcept; + XMVECTOR XM_CALLCONV operator/ (FXMVECTOR V1, FXMVECTOR V2) noexcept; + XMVECTOR XM_CALLCONV operator* (FXMVECTOR V, float S) noexcept; + XMVECTOR XM_CALLCONV operator* (float S, FXMVECTOR V) noexcept; + XMVECTOR XM_CALLCONV operator/ (FXMVECTOR V, float S) noexcept; +#endif /* !_XM_NO_XMVECTOR_OVERLOADS_ */ + + //------------------------------------------------------------------------------ + // Matrix type: Sixteen 32 bit floating point components aligned on a + // 16 byte boundary and mapped to four hardware vector registers + + struct XMMATRIX; + + // Fix-up for (1st) XMMATRIX parameter to pass in-register for ARM64 and vector call; by reference otherwise +#if ( defined(_M_ARM64) || defined(_M_HYBRID_X86_ARM64) || _XM_VECTORCALL_ || __aarch64__ ) && !defined(_XM_NO_INTRINSICS_) + typedef const XMMATRIX FXMMATRIX; +#else + typedef const XMMATRIX& FXMMATRIX; +#endif + + // Fix-up for (2nd+) XMMATRIX parameters to pass by reference + typedef const XMMATRIX& CXMMATRIX; + +#ifdef _XM_NO_INTRINSICS_ + struct XMMATRIX +#else + XM_ALIGNED_STRUCT(16) XMMATRIX +#endif + { +#ifdef _XM_NO_INTRINSICS_ + union + { + XMVECTOR r[4]; + struct + { + float _11, _12, _13, _14; + float _21, _22, _23, _24; + float _31, _32, _33, _34; + float _41, _42, _43, _44; + }; + float m[4][4]; + }; +#else + XMVECTOR r[4]; +#endif + + XMMATRIX() = default; + + XMMATRIX(const XMMATRIX&) = default; + +#if defined(_MSC_VER) && (_MSC_FULL_VER < 191426431) + XMMATRIX& operator= (const XMMATRIX& M) noexcept { r[0] = M.r[0]; r[1] = M.r[1]; r[2] = M.r[2]; r[3] = M.r[3]; return *this; } +#else + XMMATRIX& operator=(const XMMATRIX&) = default; + + XMMATRIX(XMMATRIX&&) = default; + XMMATRIX& operator=(XMMATRIX&&) = default; +#endif + + constexpr XMMATRIX(FXMVECTOR R0, FXMVECTOR R1, FXMVECTOR R2, CXMVECTOR R3) noexcept : r{ R0,R1,R2,R3 } {} + XMMATRIX(float m00, float m01, float m02, float m03, + float m10, float m11, float m12, float m13, + float m20, float m21, float m22, float m23, + float m30, float m31, float m32, float m33) noexcept; + explicit XMMATRIX(_In_reads_(16) const float* pArray) noexcept; + +#ifdef _XM_NO_INTRINSICS_ + float operator() (size_t Row, size_t Column) const noexcept { return m[Row][Column]; } + float& operator() (size_t Row, size_t Column) noexcept { return m[Row][Column]; } +#endif + + XMMATRIX operator+ () const noexcept { return *this; } + XMMATRIX operator- () const noexcept; + + XMMATRIX& XM_CALLCONV operator+= (FXMMATRIX M) noexcept; + XMMATRIX& XM_CALLCONV operator-= (FXMMATRIX M) noexcept; + XMMATRIX& XM_CALLCONV operator*= (FXMMATRIX M) noexcept; + XMMATRIX& operator*= (float S) noexcept; + XMMATRIX& operator/= (float S) noexcept; + + XMMATRIX XM_CALLCONV operator+ (FXMMATRIX M) const noexcept; + XMMATRIX XM_CALLCONV operator- (FXMMATRIX M) const noexcept; + XMMATRIX XM_CALLCONV operator* (FXMMATRIX M) const noexcept; + XMMATRIX operator* (float S) const noexcept; + XMMATRIX operator/ (float S) const noexcept; + + friend XMMATRIX XM_CALLCONV operator* (float S, FXMMATRIX M) noexcept; + }; + + //------------------------------------------------------------------------------ + // 2D Vector; 32 bit floating point components + struct XMFLOAT2 + { + float x; + float y; + + XMFLOAT2() = default; + + XMFLOAT2(const XMFLOAT2&) = default; + XMFLOAT2& operator=(const XMFLOAT2&) = default; + + XMFLOAT2(XMFLOAT2&&) = default; + XMFLOAT2& operator=(XMFLOAT2&&) = default; + + constexpr XMFLOAT2(float _x, float _y) noexcept : x(_x), y(_y) {} + explicit XMFLOAT2(_In_reads_(2) const float* pArray) noexcept : x(pArray[0]), y(pArray[1]) {} + }; + + // 2D Vector; 32 bit floating point components aligned on a 16 byte boundary + XM_ALIGNED_STRUCT(16) XMFLOAT2A : public XMFLOAT2 + { + XMFLOAT2A() = default; + + XMFLOAT2A(const XMFLOAT2A&) = default; + XMFLOAT2A& operator=(const XMFLOAT2A&) = default; + + XMFLOAT2A(XMFLOAT2A&&) = default; + XMFLOAT2A& operator=(XMFLOAT2A&&) = default; + + constexpr XMFLOAT2A(float _x, float _y) noexcept : XMFLOAT2(_x, _y) {} + explicit XMFLOAT2A(_In_reads_(2) const float* pArray) noexcept : XMFLOAT2(pArray) {} + }; + + //------------------------------------------------------------------------------ + // 2D Vector; 32 bit signed integer components + struct XMINT2 + { + int32_t x; + int32_t y; + + XMINT2() = default; + + XMINT2(const XMINT2&) = default; + XMINT2& operator=(const XMINT2&) = default; + + XMINT2(XMINT2&&) = default; + XMINT2& operator=(XMINT2&&) = default; + + constexpr XMINT2(int32_t _x, int32_t _y) noexcept : x(_x), y(_y) {} + explicit XMINT2(_In_reads_(2) const int32_t* pArray) noexcept : x(pArray[0]), y(pArray[1]) {} + }; + + // 2D Vector; 32 bit unsigned integer components + struct XMUINT2 + { + uint32_t x; + uint32_t y; + + XMUINT2() = default; + + XMUINT2(const XMUINT2&) = default; + XMUINT2& operator=(const XMUINT2&) = default; + + XMUINT2(XMUINT2&&) = default; + XMUINT2& operator=(XMUINT2&&) = default; + + constexpr XMUINT2(uint32_t _x, uint32_t _y) noexcept : x(_x), y(_y) {} + explicit XMUINT2(_In_reads_(2) const uint32_t* pArray) noexcept : x(pArray[0]), y(pArray[1]) {} + }; + + //------------------------------------------------------------------------------ + // 3D Vector; 32 bit floating point components + struct XMFLOAT3 + { + float x; + float y; + float z; + + XMFLOAT3() = default; + + XMFLOAT3(const XMFLOAT3&) = default; + XMFLOAT3& operator=(const XMFLOAT3&) = default; + + XMFLOAT3(XMFLOAT3&&) = default; + XMFLOAT3& operator=(XMFLOAT3&&) = default; + + constexpr XMFLOAT3(float _x, float _y, float _z) noexcept : x(_x), y(_y), z(_z) {} + explicit XMFLOAT3(_In_reads_(3) const float* pArray) noexcept : x(pArray[0]), y(pArray[1]), z(pArray[2]) {} + }; + + // 3D Vector; 32 bit floating point components aligned on a 16 byte boundary + XM_ALIGNED_STRUCT(16) XMFLOAT3A : public XMFLOAT3 + { + XMFLOAT3A() = default; + + XMFLOAT3A(const XMFLOAT3A&) = default; + XMFLOAT3A& operator=(const XMFLOAT3A&) = default; + + XMFLOAT3A(XMFLOAT3A&&) = default; + XMFLOAT3A& operator=(XMFLOAT3A&&) = default; + + constexpr XMFLOAT3A(float _x, float _y, float _z) noexcept : XMFLOAT3(_x, _y, _z) {} + explicit XMFLOAT3A(_In_reads_(3) const float* pArray) noexcept : XMFLOAT3(pArray) {} + }; + + //------------------------------------------------------------------------------ + // 3D Vector; 32 bit signed integer components + struct XMINT3 + { + int32_t x; + int32_t y; + int32_t z; + + XMINT3() = default; + + XMINT3(const XMINT3&) = default; + XMINT3& operator=(const XMINT3&) = default; + + XMINT3(XMINT3&&) = default; + XMINT3& operator=(XMINT3&&) = default; + + constexpr XMINT3(int32_t _x, int32_t _y, int32_t _z) noexcept : x(_x), y(_y), z(_z) {} + explicit XMINT3(_In_reads_(3) const int32_t* pArray) noexcept : x(pArray[0]), y(pArray[1]), z(pArray[2]) {} + }; + + // 3D Vector; 32 bit unsigned integer components + struct XMUINT3 + { + uint32_t x; + uint32_t y; + uint32_t z; + + XMUINT3() = default; + + XMUINT3(const XMUINT3&) = default; + XMUINT3& operator=(const XMUINT3&) = default; + + XMUINT3(XMUINT3&&) = default; + XMUINT3& operator=(XMUINT3&&) = default; + + constexpr XMUINT3(uint32_t _x, uint32_t _y, uint32_t _z) noexcept : x(_x), y(_y), z(_z) {} + explicit XMUINT3(_In_reads_(3) const uint32_t* pArray) noexcept : x(pArray[0]), y(pArray[1]), z(pArray[2]) {} + }; + + //------------------------------------------------------------------------------ + // 4D Vector; 32 bit floating point components + struct XMFLOAT4 + { + float x; + float y; + float z; + float w; + + XMFLOAT4() = default; + + XMFLOAT4(const XMFLOAT4&) = default; + XMFLOAT4& operator=(const XMFLOAT4&) = default; + + XMFLOAT4(XMFLOAT4&&) = default; + XMFLOAT4& operator=(XMFLOAT4&&) = default; + + constexpr XMFLOAT4(float _x, float _y, float _z, float _w) noexcept : x(_x), y(_y), z(_z), w(_w) {} + explicit XMFLOAT4(_In_reads_(4) const float* pArray) noexcept : x(pArray[0]), y(pArray[1]), z(pArray[2]), w(pArray[3]) {} + }; + + // 4D Vector; 32 bit floating point components aligned on a 16 byte boundary + XM_ALIGNED_STRUCT(16) XMFLOAT4A : public XMFLOAT4 + { + XMFLOAT4A() = default; + + XMFLOAT4A(const XMFLOAT4A&) = default; + XMFLOAT4A& operator=(const XMFLOAT4A&) = default; + + XMFLOAT4A(XMFLOAT4A&&) = default; + XMFLOAT4A& operator=(XMFLOAT4A&&) = default; + + constexpr XMFLOAT4A(float _x, float _y, float _z, float _w) noexcept : XMFLOAT4(_x, _y, _z, _w) {} + explicit XMFLOAT4A(_In_reads_(4) const float* pArray) noexcept : XMFLOAT4(pArray) {} + }; + + //------------------------------------------------------------------------------ + // 4D Vector; 32 bit signed integer components + struct XMINT4 + { + int32_t x; + int32_t y; + int32_t z; + int32_t w; + + XMINT4() = default; + + XMINT4(const XMINT4&) = default; + XMINT4& operator=(const XMINT4&) = default; + + XMINT4(XMINT4&&) = default; + XMINT4& operator=(XMINT4&&) = default; + + constexpr XMINT4(int32_t _x, int32_t _y, int32_t _z, int32_t _w) noexcept : x(_x), y(_y), z(_z), w(_w) {} + explicit XMINT4(_In_reads_(4) const int32_t* pArray) noexcept : x(pArray[0]), y(pArray[1]), z(pArray[2]), w(pArray[3]) {} + }; + + // 4D Vector; 32 bit unsigned integer components + struct XMUINT4 + { + uint32_t x; + uint32_t y; + uint32_t z; + uint32_t w; + + XMUINT4() = default; + + XMUINT4(const XMUINT4&) = default; + XMUINT4& operator=(const XMUINT4&) = default; + + XMUINT4(XMUINT4&&) = default; + XMUINT4& operator=(XMUINT4&&) = default; + + constexpr XMUINT4(uint32_t _x, uint32_t _y, uint32_t _z, uint32_t _w) noexcept : x(_x), y(_y), z(_z), w(_w) {} + explicit XMUINT4(_In_reads_(4) const uint32_t* pArray) noexcept : x(pArray[0]), y(pArray[1]), z(pArray[2]), w(pArray[3]) {} + }; + +#ifdef __clang__ +#pragma clang diagnostic push +#pragma clang diagnostic ignored "-Wgnu-anonymous-struct" +#pragma clang diagnostic ignored "-Wnested-anon-types" +#endif + + //------------------------------------------------------------------------------ + // 3x3 Matrix: 32 bit floating point components + struct XMFLOAT3X3 + { + union + { + struct + { + float _11, _12, _13; + float _21, _22, _23; + float _31, _32, _33; + }; + float m[3][3]; + }; + + XMFLOAT3X3() = default; + + XMFLOAT3X3(const XMFLOAT3X3&) = default; + XMFLOAT3X3& operator=(const XMFLOAT3X3&) = default; + + XMFLOAT3X3(XMFLOAT3X3&&) = default; + XMFLOAT3X3& operator=(XMFLOAT3X3&&) = default; + + constexpr XMFLOAT3X3(float m00, float m01, float m02, + float m10, float m11, float m12, + float m20, float m21, float m22) noexcept + : _11(m00), _12(m01), _13(m02), + _21(m10), _22(m11), _23(m12), + _31(m20), _32(m21), _33(m22) {} + explicit XMFLOAT3X3(_In_reads_(9) const float* pArray) noexcept; + + float operator() (size_t Row, size_t Column) const noexcept { return m[Row][Column]; } + float& operator() (size_t Row, size_t Column) noexcept { return m[Row][Column]; } + }; + + //------------------------------------------------------------------------------ + // 4x3 Row-major Matrix: 32 bit floating point components + struct XMFLOAT4X3 + { + union + { + struct + { + float _11, _12, _13; + float _21, _22, _23; + float _31, _32, _33; + float _41, _42, _43; + }; + float m[4][3]; + float f[12]; + }; + + XMFLOAT4X3() = default; + + XMFLOAT4X3(const XMFLOAT4X3&) = default; + XMFLOAT4X3& operator=(const XMFLOAT4X3&) = default; + + XMFLOAT4X3(XMFLOAT4X3&&) = default; + XMFLOAT4X3& operator=(XMFLOAT4X3&&) = default; + + constexpr XMFLOAT4X3(float m00, float m01, float m02, + float m10, float m11, float m12, + float m20, float m21, float m22, + float m30, float m31, float m32) noexcept + : _11(m00), _12(m01), _13(m02), + _21(m10), _22(m11), _23(m12), + _31(m20), _32(m21), _33(m22), + _41(m30), _42(m31), _43(m32) {} + explicit XMFLOAT4X3(_In_reads_(12) const float* pArray) noexcept; + + float operator() (size_t Row, size_t Column) const noexcept { return m[Row][Column]; } + float& operator() (size_t Row, size_t Column) noexcept { return m[Row][Column]; } + }; + + // 4x3 Row-major Matrix: 32 bit floating point components aligned on a 16 byte boundary + XM_ALIGNED_STRUCT(16) XMFLOAT4X3A : public XMFLOAT4X3 + { + XMFLOAT4X3A() = default; + + XMFLOAT4X3A(const XMFLOAT4X3A&) = default; + XMFLOAT4X3A& operator=(const XMFLOAT4X3A&) = default; + + XMFLOAT4X3A(XMFLOAT4X3A&&) = default; + XMFLOAT4X3A& operator=(XMFLOAT4X3A&&) = default; + + constexpr XMFLOAT4X3A(float m00, float m01, float m02, + float m10, float m11, float m12, + float m20, float m21, float m22, + float m30, float m31, float m32) noexcept : + XMFLOAT4X3(m00, m01, m02, m10, m11, m12, m20, m21, m22, m30, m31, m32) {} + explicit XMFLOAT4X3A(_In_reads_(12) const float* pArray) noexcept : XMFLOAT4X3(pArray) {} + }; + + //------------------------------------------------------------------------------ + // 3x4 Column-major Matrix: 32 bit floating point components + struct XMFLOAT3X4 + { + union + { + struct + { + float _11, _12, _13, _14; + float _21, _22, _23, _24; + float _31, _32, _33, _34; + }; + float m[3][4]; + float f[12]; + }; + + XMFLOAT3X4() = default; + + XMFLOAT3X4(const XMFLOAT3X4&) = default; + XMFLOAT3X4& operator=(const XMFLOAT3X4&) = default; + + XMFLOAT3X4(XMFLOAT3X4&&) = default; + XMFLOAT3X4& operator=(XMFLOAT3X4&&) = default; + + constexpr XMFLOAT3X4(float m00, float m01, float m02, float m03, + float m10, float m11, float m12, float m13, + float m20, float m21, float m22, float m23) noexcept + : _11(m00), _12(m01), _13(m02), _14(m03), + _21(m10), _22(m11), _23(m12), _24(m13), + _31(m20), _32(m21), _33(m22), _34(m23) {} + explicit XMFLOAT3X4(_In_reads_(12) const float* pArray) noexcept; + + float operator() (size_t Row, size_t Column) const noexcept { return m[Row][Column]; } + float& operator() (size_t Row, size_t Column) noexcept { return m[Row][Column]; } + }; + + // 3x4 Column-major Matrix: 32 bit floating point components aligned on a 16 byte boundary + XM_ALIGNED_STRUCT(16) XMFLOAT3X4A : public XMFLOAT3X4 + { + XMFLOAT3X4A() = default; + + XMFLOAT3X4A(const XMFLOAT3X4A&) = default; + XMFLOAT3X4A& operator=(const XMFLOAT3X4A&) = default; + + XMFLOAT3X4A(XMFLOAT3X4A&&) = default; + XMFLOAT3X4A& operator=(XMFLOAT3X4A&&) = default; + + constexpr XMFLOAT3X4A(float m00, float m01, float m02, float m03, + float m10, float m11, float m12, float m13, + float m20, float m21, float m22, float m23) noexcept : + XMFLOAT3X4(m00, m01, m02, m03, m10, m11, m12, m13, m20, m21, m22, m23) {} + explicit XMFLOAT3X4A(_In_reads_(12) const float* pArray) noexcept : XMFLOAT3X4(pArray) {} + }; + + //------------------------------------------------------------------------------ + // 4x4 Matrix: 32 bit floating point components + struct XMFLOAT4X4 + { + union + { + struct + { + float _11, _12, _13, _14; + float _21, _22, _23, _24; + float _31, _32, _33, _34; + float _41, _42, _43, _44; + }; + float m[4][4]; + }; + + XMFLOAT4X4() = default; + + XMFLOAT4X4(const XMFLOAT4X4&) = default; + XMFLOAT4X4& operator=(const XMFLOAT4X4&) = default; + + XMFLOAT4X4(XMFLOAT4X4&&) = default; + XMFLOAT4X4& operator=(XMFLOAT4X4&&) = default; + + constexpr XMFLOAT4X4(float m00, float m01, float m02, float m03, + float m10, float m11, float m12, float m13, + float m20, float m21, float m22, float m23, + float m30, float m31, float m32, float m33) noexcept + : _11(m00), _12(m01), _13(m02), _14(m03), + _21(m10), _22(m11), _23(m12), _24(m13), + _31(m20), _32(m21), _33(m22), _34(m23), + _41(m30), _42(m31), _43(m32), _44(m33) {} + explicit XMFLOAT4X4(_In_reads_(16) const float* pArray) noexcept; + + float operator() (size_t Row, size_t Column) const noexcept { return m[Row][Column]; } + float& operator() (size_t Row, size_t Column) noexcept { return m[Row][Column]; } + }; + + // 4x4 Matrix: 32 bit floating point components aligned on a 16 byte boundary + XM_ALIGNED_STRUCT(16) XMFLOAT4X4A : public XMFLOAT4X4 + { + XMFLOAT4X4A() = default; + + XMFLOAT4X4A(const XMFLOAT4X4A&) = default; + XMFLOAT4X4A& operator=(const XMFLOAT4X4A&) = default; + + XMFLOAT4X4A(XMFLOAT4X4A&&) = default; + XMFLOAT4X4A& operator=(XMFLOAT4X4A&&) = default; + + constexpr XMFLOAT4X4A(float m00, float m01, float m02, float m03, + float m10, float m11, float m12, float m13, + float m20, float m21, float m22, float m23, + float m30, float m31, float m32, float m33) noexcept + : XMFLOAT4X4(m00, m01, m02, m03, m10, m11, m12, m13, m20, m21, m22, m23, m30, m31, m32, m33) {} + explicit XMFLOAT4X4A(_In_reads_(16) const float* pArray) noexcept : XMFLOAT4X4(pArray) {} + }; + + //////////////////////////////////////////////////////////////////////////////// + +#ifdef __clang__ +#pragma clang diagnostic pop +#endif + +#ifdef _PREFAST_ +#pragma prefast(pop) +#endif + +#pragma warning(pop) + +/**************************************************************************** + * + * Data conversion operations + * + ****************************************************************************/ + + XMVECTOR XM_CALLCONV XMConvertVectorIntToFloat(FXMVECTOR VInt, uint32_t DivExponent) noexcept; + XMVECTOR XM_CALLCONV XMConvertVectorFloatToInt(FXMVECTOR VFloat, uint32_t MulExponent) noexcept; + XMVECTOR XM_CALLCONV XMConvertVectorUIntToFloat(FXMVECTOR VUInt, uint32_t DivExponent) noexcept; + XMVECTOR XM_CALLCONV XMConvertVectorFloatToUInt(FXMVECTOR VFloat, uint32_t MulExponent) noexcept; + +#if defined(__XNAMATH_H__) && defined(XMVectorSetBinaryConstant) +#undef XMVectorSetBinaryConstant +#undef XMVectorSplatConstant +#undef XMVectorSplatConstantInt +#endif + + XMVECTOR XM_CALLCONV XMVectorSetBinaryConstant(uint32_t C0, uint32_t C1, uint32_t C2, uint32_t C3) noexcept; + XMVECTOR XM_CALLCONV XMVectorSplatConstant(int32_t IntConstant, uint32_t DivExponent) noexcept; + XMVECTOR XM_CALLCONV XMVectorSplatConstantInt(int32_t IntConstant) noexcept; + + /**************************************************************************** + * + * Load operations + * + ****************************************************************************/ + + XMVECTOR XM_CALLCONV XMLoadInt(_In_ const uint32_t* pSource) noexcept; + XMVECTOR XM_CALLCONV XMLoadFloat(_In_ const float* pSource) noexcept; + + XMVECTOR XM_CALLCONV XMLoadInt2(_In_reads_(2) const uint32_t* pSource) noexcept; + XMVECTOR XM_CALLCONV XMLoadInt2A(_In_reads_(2) const uint32_t* PSource) noexcept; + XMVECTOR XM_CALLCONV XMLoadFloat2(_In_ const XMFLOAT2* pSource) noexcept; + XMVECTOR XM_CALLCONV XMLoadFloat2A(_In_ const XMFLOAT2A* pSource) noexcept; + XMVECTOR XM_CALLCONV XMLoadSInt2(_In_ const XMINT2* pSource) noexcept; + XMVECTOR XM_CALLCONV XMLoadUInt2(_In_ const XMUINT2* pSource) noexcept; + + XMVECTOR XM_CALLCONV XMLoadInt3(_In_reads_(3) const uint32_t* pSource) noexcept; + XMVECTOR XM_CALLCONV XMLoadInt3A(_In_reads_(3) const uint32_t* pSource) noexcept; + XMVECTOR XM_CALLCONV XMLoadFloat3(_In_ const XMFLOAT3* pSource) noexcept; + XMVECTOR XM_CALLCONV XMLoadFloat3A(_In_ const XMFLOAT3A* pSource) noexcept; + XMVECTOR XM_CALLCONV XMLoadSInt3(_In_ const XMINT3* pSource) noexcept; + XMVECTOR XM_CALLCONV XMLoadUInt3(_In_ const XMUINT3* pSource) noexcept; + + XMVECTOR XM_CALLCONV XMLoadInt4(_In_reads_(4) const uint32_t* pSource) noexcept; + XMVECTOR XM_CALLCONV XMLoadInt4A(_In_reads_(4) const uint32_t* pSource) noexcept; + XMVECTOR XM_CALLCONV XMLoadFloat4(_In_ const XMFLOAT4* pSource) noexcept; + XMVECTOR XM_CALLCONV XMLoadFloat4A(_In_ const XMFLOAT4A* pSource) noexcept; + XMVECTOR XM_CALLCONV XMLoadSInt4(_In_ const XMINT4* pSource) noexcept; + XMVECTOR XM_CALLCONV XMLoadUInt4(_In_ const XMUINT4* pSource) noexcept; + + XMMATRIX XM_CALLCONV XMLoadFloat3x3(_In_ const XMFLOAT3X3* pSource) noexcept; + XMMATRIX XM_CALLCONV XMLoadFloat4x3(_In_ const XMFLOAT4X3* pSource) noexcept; + XMMATRIX XM_CALLCONV XMLoadFloat4x3A(_In_ const XMFLOAT4X3A* pSource) noexcept; + XMMATRIX XM_CALLCONV XMLoadFloat3x4(_In_ const XMFLOAT3X4* pSource) noexcept; + XMMATRIX XM_CALLCONV XMLoadFloat3x4A(_In_ const XMFLOAT3X4A* pSource) noexcept; + XMMATRIX XM_CALLCONV XMLoadFloat4x4(_In_ const XMFLOAT4X4* pSource) noexcept; + XMMATRIX XM_CALLCONV XMLoadFloat4x4A(_In_ const XMFLOAT4X4A* pSource) noexcept; + + /**************************************************************************** + * + * Store operations + * + ****************************************************************************/ + + void XM_CALLCONV XMStoreInt(_Out_ uint32_t* pDestination, _In_ FXMVECTOR V) noexcept; + void XM_CALLCONV XMStoreFloat(_Out_ float* pDestination, _In_ FXMVECTOR V) noexcept; + + void XM_CALLCONV XMStoreInt2(_Out_writes_(2) uint32_t* pDestination, _In_ FXMVECTOR V) noexcept; + void XM_CALLCONV XMStoreInt2A(_Out_writes_(2) uint32_t* pDestination, _In_ FXMVECTOR V) noexcept; + void XM_CALLCONV XMStoreFloat2(_Out_ XMFLOAT2* pDestination, _In_ FXMVECTOR V) noexcept; + void XM_CALLCONV XMStoreFloat2A(_Out_ XMFLOAT2A* pDestination, _In_ FXMVECTOR V) noexcept; + void XM_CALLCONV XMStoreSInt2(_Out_ XMINT2* pDestination, _In_ FXMVECTOR V) noexcept; + void XM_CALLCONV XMStoreUInt2(_Out_ XMUINT2* pDestination, _In_ FXMVECTOR V) noexcept; + + void XM_CALLCONV XMStoreInt3(_Out_writes_(3) uint32_t* pDestination, _In_ FXMVECTOR V) noexcept; + void XM_CALLCONV XMStoreInt3A(_Out_writes_(3) uint32_t* pDestination, _In_ FXMVECTOR V) noexcept; + void XM_CALLCONV XMStoreFloat3(_Out_ XMFLOAT3* pDestination, _In_ FXMVECTOR V) noexcept; + void XM_CALLCONV XMStoreFloat3A(_Out_ XMFLOAT3A* pDestination, _In_ FXMVECTOR V) noexcept; + void XM_CALLCONV XMStoreSInt3(_Out_ XMINT3* pDestination, _In_ FXMVECTOR V) noexcept; + void XM_CALLCONV XMStoreUInt3(_Out_ XMUINT3* pDestination, _In_ FXMVECTOR V) noexcept; + + void XM_CALLCONV XMStoreInt4(_Out_writes_(4) uint32_t* pDestination, _In_ FXMVECTOR V) noexcept; + void XM_CALLCONV XMStoreInt4A(_Out_writes_(4) uint32_t* pDestination, _In_ FXMVECTOR V) noexcept; + void XM_CALLCONV XMStoreFloat4(_Out_ XMFLOAT4* pDestination, _In_ FXMVECTOR V) noexcept; + void XM_CALLCONV XMStoreFloat4A(_Out_ XMFLOAT4A* pDestination, _In_ FXMVECTOR V) noexcept; + void XM_CALLCONV XMStoreSInt4(_Out_ XMINT4* pDestination, _In_ FXMVECTOR V) noexcept; + void XM_CALLCONV XMStoreUInt4(_Out_ XMUINT4* pDestination, _In_ FXMVECTOR V) noexcept; + + void XM_CALLCONV XMStoreFloat3x3(_Out_ XMFLOAT3X3* pDestination, _In_ FXMMATRIX M) noexcept; + void XM_CALLCONV XMStoreFloat4x3(_Out_ XMFLOAT4X3* pDestination, _In_ FXMMATRIX M) noexcept; + void XM_CALLCONV XMStoreFloat4x3A(_Out_ XMFLOAT4X3A* pDestination, _In_ FXMMATRIX M) noexcept; + void XM_CALLCONV XMStoreFloat3x4(_Out_ XMFLOAT3X4* pDestination, _In_ FXMMATRIX M) noexcept; + void XM_CALLCONV XMStoreFloat3x4A(_Out_ XMFLOAT3X4A* pDestination, _In_ FXMMATRIX M) noexcept; + void XM_CALLCONV XMStoreFloat4x4(_Out_ XMFLOAT4X4* pDestination, _In_ FXMMATRIX M) noexcept; + void XM_CALLCONV XMStoreFloat4x4A(_Out_ XMFLOAT4X4A* pDestination, _In_ FXMMATRIX M) noexcept; + + /**************************************************************************** + * + * General vector operations + * + ****************************************************************************/ + + XMVECTOR XM_CALLCONV XMVectorZero() noexcept; + XMVECTOR XM_CALLCONV XMVectorSet(float x, float y, float z, float w) noexcept; + XMVECTOR XM_CALLCONV XMVectorSetInt(uint32_t x, uint32_t y, uint32_t z, uint32_t w) noexcept; + XMVECTOR XM_CALLCONV XMVectorReplicate(float Value) noexcept; + XMVECTOR XM_CALLCONV XMVectorReplicatePtr(_In_ const float* pValue) noexcept; + XMVECTOR XM_CALLCONV XMVectorReplicateInt(uint32_t Value) noexcept; + XMVECTOR XM_CALLCONV XMVectorReplicateIntPtr(_In_ const uint32_t* pValue) noexcept; + XMVECTOR XM_CALLCONV XMVectorTrueInt() noexcept; + XMVECTOR XM_CALLCONV XMVectorFalseInt() noexcept; + XMVECTOR XM_CALLCONV XMVectorSplatX(FXMVECTOR V) noexcept; + XMVECTOR XM_CALLCONV XMVectorSplatY(FXMVECTOR V) noexcept; + XMVECTOR XM_CALLCONV XMVectorSplatZ(FXMVECTOR V) noexcept; + XMVECTOR XM_CALLCONV XMVectorSplatW(FXMVECTOR V) noexcept; + XMVECTOR XM_CALLCONV XMVectorSplatOne() noexcept; + XMVECTOR XM_CALLCONV XMVectorSplatInfinity() noexcept; + XMVECTOR XM_CALLCONV XMVectorSplatQNaN() noexcept; + XMVECTOR XM_CALLCONV XMVectorSplatEpsilon() noexcept; + XMVECTOR XM_CALLCONV XMVectorSplatSignMask() noexcept; + + float XM_CALLCONV XMVectorGetByIndex(FXMVECTOR V, size_t i) noexcept; + float XM_CALLCONV XMVectorGetX(FXMVECTOR V) noexcept; + float XM_CALLCONV XMVectorGetY(FXMVECTOR V) noexcept; + float XM_CALLCONV XMVectorGetZ(FXMVECTOR V) noexcept; + float XM_CALLCONV XMVectorGetW(FXMVECTOR V) noexcept; + + void XM_CALLCONV XMVectorGetByIndexPtr(_Out_ float* f, _In_ FXMVECTOR V, _In_ size_t i) noexcept; + void XM_CALLCONV XMVectorGetXPtr(_Out_ float* x, _In_ FXMVECTOR V) noexcept; + void XM_CALLCONV XMVectorGetYPtr(_Out_ float* y, _In_ FXMVECTOR V) noexcept; + void XM_CALLCONV XMVectorGetZPtr(_Out_ float* z, _In_ FXMVECTOR V) noexcept; + void XM_CALLCONV XMVectorGetWPtr(_Out_ float* w, _In_ FXMVECTOR V) noexcept; + + uint32_t XM_CALLCONV XMVectorGetIntByIndex(FXMVECTOR V, size_t i) noexcept; + uint32_t XM_CALLCONV XMVectorGetIntX(FXMVECTOR V) noexcept; + uint32_t XM_CALLCONV XMVectorGetIntY(FXMVECTOR V) noexcept; + uint32_t XM_CALLCONV XMVectorGetIntZ(FXMVECTOR V) noexcept; + uint32_t XM_CALLCONV XMVectorGetIntW(FXMVECTOR V) noexcept; + + void XM_CALLCONV XMVectorGetIntByIndexPtr(_Out_ uint32_t* x, _In_ FXMVECTOR V, _In_ size_t i) noexcept; + void XM_CALLCONV XMVectorGetIntXPtr(_Out_ uint32_t* x, _In_ FXMVECTOR V) noexcept; + void XM_CALLCONV XMVectorGetIntYPtr(_Out_ uint32_t* y, _In_ FXMVECTOR V) noexcept; + void XM_CALLCONV XMVectorGetIntZPtr(_Out_ uint32_t* z, _In_ FXMVECTOR V) noexcept; + void XM_CALLCONV XMVectorGetIntWPtr(_Out_ uint32_t* w, _In_ FXMVECTOR V) noexcept; + + XMVECTOR XM_CALLCONV XMVectorSetByIndex(FXMVECTOR V, float f, size_t i) noexcept; + XMVECTOR XM_CALLCONV XMVectorSetX(FXMVECTOR V, float x) noexcept; + XMVECTOR XM_CALLCONV XMVectorSetY(FXMVECTOR V, float y) noexcept; + XMVECTOR XM_CALLCONV XMVectorSetZ(FXMVECTOR V, float z) noexcept; + XMVECTOR XM_CALLCONV XMVectorSetW(FXMVECTOR V, float w) noexcept; + + XMVECTOR XM_CALLCONV XMVectorSetByIndexPtr(_In_ FXMVECTOR V, _In_ const float* f, _In_ size_t i) noexcept; + XMVECTOR XM_CALLCONV XMVectorSetXPtr(_In_ FXMVECTOR V, _In_ const float* x) noexcept; + XMVECTOR XM_CALLCONV XMVectorSetYPtr(_In_ FXMVECTOR V, _In_ const float* y) noexcept; + XMVECTOR XM_CALLCONV XMVectorSetZPtr(_In_ FXMVECTOR V, _In_ const float* z) noexcept; + XMVECTOR XM_CALLCONV XMVectorSetWPtr(_In_ FXMVECTOR V, _In_ const float* w) noexcept; + + XMVECTOR XM_CALLCONV XMVectorSetIntByIndex(FXMVECTOR V, uint32_t x, size_t i) noexcept; + XMVECTOR XM_CALLCONV XMVectorSetIntX(FXMVECTOR V, uint32_t x) noexcept; + XMVECTOR XM_CALLCONV XMVectorSetIntY(FXMVECTOR V, uint32_t y) noexcept; + XMVECTOR XM_CALLCONV XMVectorSetIntZ(FXMVECTOR V, uint32_t z) noexcept; + XMVECTOR XM_CALLCONV XMVectorSetIntW(FXMVECTOR V, uint32_t w) noexcept; + + XMVECTOR XM_CALLCONV XMVectorSetIntByIndexPtr(_In_ FXMVECTOR V, _In_ const uint32_t* x, _In_ size_t i) noexcept; + XMVECTOR XM_CALLCONV XMVectorSetIntXPtr(_In_ FXMVECTOR V, _In_ const uint32_t* x) noexcept; + XMVECTOR XM_CALLCONV XMVectorSetIntYPtr(_In_ FXMVECTOR V, _In_ const uint32_t* y) noexcept; + XMVECTOR XM_CALLCONV XMVectorSetIntZPtr(_In_ FXMVECTOR V, _In_ const uint32_t* z) noexcept; + XMVECTOR XM_CALLCONV XMVectorSetIntWPtr(_In_ FXMVECTOR V, _In_ const uint32_t* w) noexcept; + +#if defined(__XNAMATH_H__) && defined(XMVectorSwizzle) +#undef XMVectorSwizzle +#endif + + XMVECTOR XM_CALLCONV XMVectorSwizzle(FXMVECTOR V, uint32_t E0, uint32_t E1, uint32_t E2, uint32_t E3) noexcept; + XMVECTOR XM_CALLCONV XMVectorPermute(FXMVECTOR V1, FXMVECTOR V2, uint32_t PermuteX, uint32_t PermuteY, uint32_t PermuteZ, uint32_t PermuteW) noexcept; + XMVECTOR XM_CALLCONV XMVectorSelectControl(uint32_t VectorIndex0, uint32_t VectorIndex1, uint32_t VectorIndex2, uint32_t VectorIndex3) noexcept; + XMVECTOR XM_CALLCONV XMVectorSelect(FXMVECTOR V1, FXMVECTOR V2, FXMVECTOR Control) noexcept; + XMVECTOR XM_CALLCONV XMVectorMergeXY(FXMVECTOR V1, FXMVECTOR V2) noexcept; + XMVECTOR XM_CALLCONV XMVectorMergeZW(FXMVECTOR V1, FXMVECTOR V2) noexcept; + +#if defined(__XNAMATH_H__) && defined(XMVectorShiftLeft) +#undef XMVectorShiftLeft +#undef XMVectorRotateLeft +#undef XMVectorRotateRight +#undef XMVectorInsert +#endif + + XMVECTOR XM_CALLCONV XMVectorShiftLeft(FXMVECTOR V1, FXMVECTOR V2, uint32_t Elements) noexcept; + XMVECTOR XM_CALLCONV XMVectorRotateLeft(FXMVECTOR V, uint32_t Elements) noexcept; + XMVECTOR XM_CALLCONV XMVectorRotateRight(FXMVECTOR V, uint32_t Elements) noexcept; + XMVECTOR XM_CALLCONV XMVectorInsert(FXMVECTOR VD, FXMVECTOR VS, uint32_t VSLeftRotateElements, + uint32_t Select0, uint32_t Select1, uint32_t Select2, uint32_t Select3) noexcept; + + XMVECTOR XM_CALLCONV XMVectorEqual(FXMVECTOR V1, FXMVECTOR V2) noexcept; + XMVECTOR XM_CALLCONV XMVectorEqualR(_Out_ uint32_t* pCR, _In_ FXMVECTOR V1, _In_ FXMVECTOR V2) noexcept; + XMVECTOR XM_CALLCONV XMVectorEqualInt(FXMVECTOR V1, FXMVECTOR V2) noexcept; + XMVECTOR XM_CALLCONV XMVectorEqualIntR(_Out_ uint32_t* pCR, _In_ FXMVECTOR V, _In_ FXMVECTOR V2) noexcept; + XMVECTOR XM_CALLCONV XMVectorNearEqual(FXMVECTOR V1, FXMVECTOR V2, FXMVECTOR Epsilon) noexcept; + XMVECTOR XM_CALLCONV XMVectorNotEqual(FXMVECTOR V1, FXMVECTOR V2) noexcept; + XMVECTOR XM_CALLCONV XMVectorNotEqualInt(FXMVECTOR V1, FXMVECTOR V2) noexcept; + XMVECTOR XM_CALLCONV XMVectorGreater(FXMVECTOR V1, FXMVECTOR V2) noexcept; + XMVECTOR XM_CALLCONV XMVectorGreaterR(_Out_ uint32_t* pCR, _In_ FXMVECTOR V1, _In_ FXMVECTOR V2) noexcept; + XMVECTOR XM_CALLCONV XMVectorGreaterOrEqual(FXMVECTOR V1, FXMVECTOR V2) noexcept; + XMVECTOR XM_CALLCONV XMVectorGreaterOrEqualR(_Out_ uint32_t* pCR, _In_ FXMVECTOR V1, _In_ FXMVECTOR V2) noexcept; + XMVECTOR XM_CALLCONV XMVectorLess(FXMVECTOR V1, FXMVECTOR V2) noexcept; + XMVECTOR XM_CALLCONV XMVectorLessOrEqual(FXMVECTOR V1, FXMVECTOR V2) noexcept; + XMVECTOR XM_CALLCONV XMVectorInBounds(FXMVECTOR V, FXMVECTOR Bounds) noexcept; + XMVECTOR XM_CALLCONV XMVectorInBoundsR(_Out_ uint32_t* pCR, _In_ FXMVECTOR V, _In_ FXMVECTOR Bounds) noexcept; + + XMVECTOR XM_CALLCONV XMVectorIsNaN(FXMVECTOR V) noexcept; + XMVECTOR XM_CALLCONV XMVectorIsInfinite(FXMVECTOR V) noexcept; + + XMVECTOR XM_CALLCONV XMVectorMin(FXMVECTOR V1, FXMVECTOR V2) noexcept; + XMVECTOR XM_CALLCONV XMVectorMax(FXMVECTOR V1, FXMVECTOR V2) noexcept; + XMVECTOR XM_CALLCONV XMVectorRound(FXMVECTOR V) noexcept; + XMVECTOR XM_CALLCONV XMVectorTruncate(FXMVECTOR V) noexcept; + XMVECTOR XM_CALLCONV XMVectorFloor(FXMVECTOR V) noexcept; + XMVECTOR XM_CALLCONV XMVectorCeiling(FXMVECTOR V) noexcept; + XMVECTOR XM_CALLCONV XMVectorClamp(FXMVECTOR V, FXMVECTOR Min, FXMVECTOR Max) noexcept; + XMVECTOR XM_CALLCONV XMVectorSaturate(FXMVECTOR V) noexcept; + + XMVECTOR XM_CALLCONV XMVectorAndInt(FXMVECTOR V1, FXMVECTOR V2) noexcept; + XMVECTOR XM_CALLCONV XMVectorAndCInt(FXMVECTOR V1, FXMVECTOR V2) noexcept; + XMVECTOR XM_CALLCONV XMVectorOrInt(FXMVECTOR V1, FXMVECTOR V2) noexcept; + XMVECTOR XM_CALLCONV XMVectorNorInt(FXMVECTOR V1, FXMVECTOR V2) noexcept; + XMVECTOR XM_CALLCONV XMVectorXorInt(FXMVECTOR V1, FXMVECTOR V2) noexcept; + + XMVECTOR XM_CALLCONV XMVectorNegate(FXMVECTOR V) noexcept; + XMVECTOR XM_CALLCONV XMVectorAdd(FXMVECTOR V1, FXMVECTOR V2) noexcept; + XMVECTOR XM_CALLCONV XMVectorSum(FXMVECTOR V) noexcept; + XMVECTOR XM_CALLCONV XMVectorAddAngles(FXMVECTOR V1, FXMVECTOR V2) noexcept; + XMVECTOR XM_CALLCONV XMVectorSubtract(FXMVECTOR V1, FXMVECTOR V2) noexcept; + XMVECTOR XM_CALLCONV XMVectorSubtractAngles(FXMVECTOR V1, FXMVECTOR V2) noexcept; + XMVECTOR XM_CALLCONV XMVectorMultiply(FXMVECTOR V1, FXMVECTOR V2) noexcept; + XMVECTOR XM_CALLCONV XMVectorMultiplyAdd(FXMVECTOR V1, FXMVECTOR V2, FXMVECTOR V3) noexcept; + XMVECTOR XM_CALLCONV XMVectorDivide(FXMVECTOR V1, FXMVECTOR V2) noexcept; + XMVECTOR XM_CALLCONV XMVectorNegativeMultiplySubtract(FXMVECTOR V1, FXMVECTOR V2, FXMVECTOR V3) noexcept; + XMVECTOR XM_CALLCONV XMVectorScale(FXMVECTOR V, float ScaleFactor) noexcept; + XMVECTOR XM_CALLCONV XMVectorReciprocalEst(FXMVECTOR V) noexcept; + XMVECTOR XM_CALLCONV XMVectorReciprocal(FXMVECTOR V) noexcept; + XMVECTOR XM_CALLCONV XMVectorSqrtEst(FXMVECTOR V) noexcept; + XMVECTOR XM_CALLCONV XMVectorSqrt(FXMVECTOR V) noexcept; + XMVECTOR XM_CALLCONV XMVectorReciprocalSqrtEst(FXMVECTOR V) noexcept; + XMVECTOR XM_CALLCONV XMVectorReciprocalSqrt(FXMVECTOR V) noexcept; + XMVECTOR XM_CALLCONV XMVectorExp2(FXMVECTOR V) noexcept; + XMVECTOR XM_CALLCONV XMVectorExpE(FXMVECTOR V) noexcept; + XMVECTOR XM_CALLCONV XMVectorExp(FXMVECTOR V) noexcept; + XMVECTOR XM_CALLCONV XMVectorLog2(FXMVECTOR V) noexcept; + XMVECTOR XM_CALLCONV XMVectorLogE(FXMVECTOR V) noexcept; + XMVECTOR XM_CALLCONV XMVectorLog(FXMVECTOR V) noexcept; + XMVECTOR XM_CALLCONV XMVectorPow(FXMVECTOR V1, FXMVECTOR V2) noexcept; + XMVECTOR XM_CALLCONV XMVectorAbs(FXMVECTOR V) noexcept; + XMVECTOR XM_CALLCONV XMVectorMod(FXMVECTOR V1, FXMVECTOR V2) noexcept; + XMVECTOR XM_CALLCONV XMVectorModAngles(FXMVECTOR Angles) noexcept; + XMVECTOR XM_CALLCONV XMVectorSin(FXMVECTOR V) noexcept; + XMVECTOR XM_CALLCONV XMVectorSinEst(FXMVECTOR V) noexcept; + XMVECTOR XM_CALLCONV XMVectorCos(FXMVECTOR V) noexcept; + XMVECTOR XM_CALLCONV XMVectorCosEst(FXMVECTOR V) noexcept; + void XM_CALLCONV XMVectorSinCos(_Out_ XMVECTOR* pSin, _Out_ XMVECTOR* pCos, _In_ FXMVECTOR V) noexcept; + void XM_CALLCONV XMVectorSinCosEst(_Out_ XMVECTOR* pSin, _Out_ XMVECTOR* pCos, _In_ FXMVECTOR V) noexcept; + XMVECTOR XM_CALLCONV XMVectorTan(FXMVECTOR V) noexcept; + XMVECTOR XM_CALLCONV XMVectorTanEst(FXMVECTOR V) noexcept; + XMVECTOR XM_CALLCONV XMVectorSinH(FXMVECTOR V) noexcept; + XMVECTOR XM_CALLCONV XMVectorCosH(FXMVECTOR V) noexcept; + XMVECTOR XM_CALLCONV XMVectorTanH(FXMVECTOR V) noexcept; + XMVECTOR XM_CALLCONV XMVectorASin(FXMVECTOR V) noexcept; + XMVECTOR XM_CALLCONV XMVectorASinEst(FXMVECTOR V) noexcept; + XMVECTOR XM_CALLCONV XMVectorACos(FXMVECTOR V) noexcept; + XMVECTOR XM_CALLCONV XMVectorACosEst(FXMVECTOR V) noexcept; + XMVECTOR XM_CALLCONV XMVectorATan(FXMVECTOR V) noexcept; + XMVECTOR XM_CALLCONV XMVectorATanEst(FXMVECTOR V) noexcept; + XMVECTOR XM_CALLCONV XMVectorATan2(FXMVECTOR Y, FXMVECTOR X) noexcept; + XMVECTOR XM_CALLCONV XMVectorATan2Est(FXMVECTOR Y, FXMVECTOR X) noexcept; + XMVECTOR XM_CALLCONV XMVectorLerp(FXMVECTOR V0, FXMVECTOR V1, float t) noexcept; + XMVECTOR XM_CALLCONV XMVectorLerpV(FXMVECTOR V0, FXMVECTOR V1, FXMVECTOR T) noexcept; + XMVECTOR XM_CALLCONV XMVectorHermite(FXMVECTOR Position0, FXMVECTOR Tangent0, FXMVECTOR Position1, GXMVECTOR Tangent1, float t) noexcept; + XMVECTOR XM_CALLCONV XMVectorHermiteV(FXMVECTOR Position0, FXMVECTOR Tangent0, FXMVECTOR Position1, GXMVECTOR Tangent1, HXMVECTOR T) noexcept; + XMVECTOR XM_CALLCONV XMVectorCatmullRom(FXMVECTOR Position0, FXMVECTOR Position1, FXMVECTOR Position2, GXMVECTOR Position3, float t) noexcept; + XMVECTOR XM_CALLCONV XMVectorCatmullRomV(FXMVECTOR Position0, FXMVECTOR Position1, FXMVECTOR Position2, GXMVECTOR Position3, HXMVECTOR T) noexcept; + XMVECTOR XM_CALLCONV XMVectorBaryCentric(FXMVECTOR Position0, FXMVECTOR Position1, FXMVECTOR Position2, float f, float g) noexcept; + XMVECTOR XM_CALLCONV XMVectorBaryCentricV(FXMVECTOR Position0, FXMVECTOR Position1, FXMVECTOR Position2, GXMVECTOR F, HXMVECTOR G) noexcept; + + /**************************************************************************** + * + * 2D vector operations + * + ****************************************************************************/ + + bool XM_CALLCONV XMVector2Equal(FXMVECTOR V1, FXMVECTOR V2) noexcept; + uint32_t XM_CALLCONV XMVector2EqualR(FXMVECTOR V1, FXMVECTOR V2) noexcept; + bool XM_CALLCONV XMVector2EqualInt(FXMVECTOR V1, FXMVECTOR V2) noexcept; + uint32_t XM_CALLCONV XMVector2EqualIntR(FXMVECTOR V1, FXMVECTOR V2) noexcept; + bool XM_CALLCONV XMVector2NearEqual(FXMVECTOR V1, FXMVECTOR V2, FXMVECTOR Epsilon) noexcept; + bool XM_CALLCONV XMVector2NotEqual(FXMVECTOR V1, FXMVECTOR V2) noexcept; + bool XM_CALLCONV XMVector2NotEqualInt(FXMVECTOR V1, FXMVECTOR V2) noexcept; + bool XM_CALLCONV XMVector2Greater(FXMVECTOR V1, FXMVECTOR V2) noexcept; + uint32_t XM_CALLCONV XMVector2GreaterR(FXMVECTOR V1, FXMVECTOR V2) noexcept; + bool XM_CALLCONV XMVector2GreaterOrEqual(FXMVECTOR V1, FXMVECTOR V2) noexcept; + uint32_t XM_CALLCONV XMVector2GreaterOrEqualR(FXMVECTOR V1, FXMVECTOR V2) noexcept; + bool XM_CALLCONV XMVector2Less(FXMVECTOR V1, FXMVECTOR V2) noexcept; + bool XM_CALLCONV XMVector2LessOrEqual(FXMVECTOR V1, FXMVECTOR V2) noexcept; + bool XM_CALLCONV XMVector2InBounds(FXMVECTOR V, FXMVECTOR Bounds) noexcept; + + bool XM_CALLCONV XMVector2IsNaN(FXMVECTOR V) noexcept; + bool XM_CALLCONV XMVector2IsInfinite(FXMVECTOR V) noexcept; + + XMVECTOR XM_CALLCONV XMVector2Dot(FXMVECTOR V1, FXMVECTOR V2) noexcept; + XMVECTOR XM_CALLCONV XMVector2Cross(FXMVECTOR V1, FXMVECTOR V2) noexcept; + XMVECTOR XM_CALLCONV XMVector2LengthSq(FXMVECTOR V) noexcept; + XMVECTOR XM_CALLCONV XMVector2ReciprocalLengthEst(FXMVECTOR V) noexcept; + XMVECTOR XM_CALLCONV XMVector2ReciprocalLength(FXMVECTOR V) noexcept; + XMVECTOR XM_CALLCONV XMVector2LengthEst(FXMVECTOR V) noexcept; + XMVECTOR XM_CALLCONV XMVector2Length(FXMVECTOR V) noexcept; + XMVECTOR XM_CALLCONV XMVector2NormalizeEst(FXMVECTOR V) noexcept; + XMVECTOR XM_CALLCONV XMVector2Normalize(FXMVECTOR V) noexcept; + XMVECTOR XM_CALLCONV XMVector2ClampLength(FXMVECTOR V, float LengthMin, float LengthMax) noexcept; + XMVECTOR XM_CALLCONV XMVector2ClampLengthV(FXMVECTOR V, FXMVECTOR LengthMin, FXMVECTOR LengthMax) noexcept; + XMVECTOR XM_CALLCONV XMVector2Reflect(FXMVECTOR Incident, FXMVECTOR Normal) noexcept; + XMVECTOR XM_CALLCONV XMVector2Refract(FXMVECTOR Incident, FXMVECTOR Normal, float RefractionIndex) noexcept; + XMVECTOR XM_CALLCONV XMVector2RefractV(FXMVECTOR Incident, FXMVECTOR Normal, FXMVECTOR RefractionIndex) noexcept; + XMVECTOR XM_CALLCONV XMVector2Orthogonal(FXMVECTOR V) noexcept; + XMVECTOR XM_CALLCONV XMVector2AngleBetweenNormalsEst(FXMVECTOR N1, FXMVECTOR N2) noexcept; + XMVECTOR XM_CALLCONV XMVector2AngleBetweenNormals(FXMVECTOR N1, FXMVECTOR N2) noexcept; + XMVECTOR XM_CALLCONV XMVector2AngleBetweenVectors(FXMVECTOR V1, FXMVECTOR V2) noexcept; + XMVECTOR XM_CALLCONV XMVector2LinePointDistance(FXMVECTOR LinePoint1, FXMVECTOR LinePoint2, FXMVECTOR Point) noexcept; + XMVECTOR XM_CALLCONV XMVector2IntersectLine(FXMVECTOR Line1Point1, FXMVECTOR Line1Point2, FXMVECTOR Line2Point1, GXMVECTOR Line2Point2) noexcept; + XMVECTOR XM_CALLCONV XMVector2Transform(FXMVECTOR V, FXMMATRIX M) noexcept; + XMFLOAT4* XM_CALLCONV XMVector2TransformStream(_Out_writes_bytes_(sizeof(XMFLOAT4) + OutputStride * (VectorCount - 1)) XMFLOAT4* pOutputStream, + _In_ size_t OutputStride, + _In_reads_bytes_(sizeof(XMFLOAT2) + InputStride * (VectorCount - 1)) const XMFLOAT2* pInputStream, + _In_ size_t InputStride, _In_ size_t VectorCount, _In_ FXMMATRIX M) noexcept; + XMVECTOR XM_CALLCONV XMVector2TransformCoord(FXMVECTOR V, FXMMATRIX M) noexcept; + XMFLOAT2* XM_CALLCONV XMVector2TransformCoordStream(_Out_writes_bytes_(sizeof(XMFLOAT2) + OutputStride * (VectorCount - 1)) XMFLOAT2* pOutputStream, + _In_ size_t OutputStride, + _In_reads_bytes_(sizeof(XMFLOAT2) + InputStride * (VectorCount - 1)) const XMFLOAT2* pInputStream, + _In_ size_t InputStride, _In_ size_t VectorCount, _In_ FXMMATRIX M) noexcept; + XMVECTOR XM_CALLCONV XMVector2TransformNormal(FXMVECTOR V, FXMMATRIX M) noexcept; + XMFLOAT2* XM_CALLCONV XMVector2TransformNormalStream(_Out_writes_bytes_(sizeof(XMFLOAT2) + OutputStride * (VectorCount - 1)) XMFLOAT2* pOutputStream, + _In_ size_t OutputStride, + _In_reads_bytes_(sizeof(XMFLOAT2) + InputStride * (VectorCount - 1)) const XMFLOAT2* pInputStream, + _In_ size_t InputStride, _In_ size_t VectorCount, _In_ FXMMATRIX M) noexcept; + + /**************************************************************************** + * + * 3D vector operations + * + ****************************************************************************/ + + bool XM_CALLCONV XMVector3Equal(FXMVECTOR V1, FXMVECTOR V2) noexcept; + uint32_t XM_CALLCONV XMVector3EqualR(FXMVECTOR V1, FXMVECTOR V2) noexcept; + bool XM_CALLCONV XMVector3EqualInt(FXMVECTOR V1, FXMVECTOR V2) noexcept; + uint32_t XM_CALLCONV XMVector3EqualIntR(FXMVECTOR V1, FXMVECTOR V2) noexcept; + bool XM_CALLCONV XMVector3NearEqual(FXMVECTOR V1, FXMVECTOR V2, FXMVECTOR Epsilon) noexcept; + bool XM_CALLCONV XMVector3NotEqual(FXMVECTOR V1, FXMVECTOR V2) noexcept; + bool XM_CALLCONV XMVector3NotEqualInt(FXMVECTOR V1, FXMVECTOR V2) noexcept; + bool XM_CALLCONV XMVector3Greater(FXMVECTOR V1, FXMVECTOR V2) noexcept; + uint32_t XM_CALLCONV XMVector3GreaterR(FXMVECTOR V1, FXMVECTOR V2) noexcept; + bool XM_CALLCONV XMVector3GreaterOrEqual(FXMVECTOR V1, FXMVECTOR V2) noexcept; + uint32_t XM_CALLCONV XMVector3GreaterOrEqualR(FXMVECTOR V1, FXMVECTOR V2) noexcept; + bool XM_CALLCONV XMVector3Less(FXMVECTOR V1, FXMVECTOR V2) noexcept; + bool XM_CALLCONV XMVector3LessOrEqual(FXMVECTOR V1, FXMVECTOR V2) noexcept; + bool XM_CALLCONV XMVector3InBounds(FXMVECTOR V, FXMVECTOR Bounds) noexcept; + + bool XM_CALLCONV XMVector3IsNaN(FXMVECTOR V) noexcept; + bool XM_CALLCONV XMVector3IsInfinite(FXMVECTOR V) noexcept; + + XMVECTOR XM_CALLCONV XMVector3Dot(FXMVECTOR V1, FXMVECTOR V2) noexcept; + XMVECTOR XM_CALLCONV XMVector3Cross(FXMVECTOR V1, FXMVECTOR V2) noexcept; + XMVECTOR XM_CALLCONV XMVector3LengthSq(FXMVECTOR V) noexcept; + XMVECTOR XM_CALLCONV XMVector3ReciprocalLengthEst(FXMVECTOR V) noexcept; + XMVECTOR XM_CALLCONV XMVector3ReciprocalLength(FXMVECTOR V) noexcept; + XMVECTOR XM_CALLCONV XMVector3LengthEst(FXMVECTOR V) noexcept; + XMVECTOR XM_CALLCONV XMVector3Length(FXMVECTOR V) noexcept; + XMVECTOR XM_CALLCONV XMVector3NormalizeEst(FXMVECTOR V) noexcept; + XMVECTOR XM_CALLCONV XMVector3Normalize(FXMVECTOR V) noexcept; + XMVECTOR XM_CALLCONV XMVector3ClampLength(FXMVECTOR V, float LengthMin, float LengthMax) noexcept; + XMVECTOR XM_CALLCONV XMVector3ClampLengthV(FXMVECTOR V, FXMVECTOR LengthMin, FXMVECTOR LengthMax) noexcept; + XMVECTOR XM_CALLCONV XMVector3Reflect(FXMVECTOR Incident, FXMVECTOR Normal) noexcept; + XMVECTOR XM_CALLCONV XMVector3Refract(FXMVECTOR Incident, FXMVECTOR Normal, float RefractionIndex) noexcept; + XMVECTOR XM_CALLCONV XMVector3RefractV(FXMVECTOR Incident, FXMVECTOR Normal, FXMVECTOR RefractionIndex) noexcept; + XMVECTOR XM_CALLCONV XMVector3Orthogonal(FXMVECTOR V) noexcept; + XMVECTOR XM_CALLCONV XMVector3AngleBetweenNormalsEst(FXMVECTOR N1, FXMVECTOR N2) noexcept; + XMVECTOR XM_CALLCONV XMVector3AngleBetweenNormals(FXMVECTOR N1, FXMVECTOR N2) noexcept; + XMVECTOR XM_CALLCONV XMVector3AngleBetweenVectors(FXMVECTOR V1, FXMVECTOR V2) noexcept; + XMVECTOR XM_CALLCONV XMVector3LinePointDistance(FXMVECTOR LinePoint1, FXMVECTOR LinePoint2, FXMVECTOR Point) noexcept; + void XM_CALLCONV XMVector3ComponentsFromNormal(_Out_ XMVECTOR* pParallel, _Out_ XMVECTOR* pPerpendicular, _In_ FXMVECTOR V, _In_ FXMVECTOR Normal) noexcept; + XMVECTOR XM_CALLCONV XMVector3Rotate(FXMVECTOR V, FXMVECTOR RotationQuaternion) noexcept; + XMVECTOR XM_CALLCONV XMVector3InverseRotate(FXMVECTOR V, FXMVECTOR RotationQuaternion) noexcept; + XMVECTOR XM_CALLCONV XMVector3Transform(FXMVECTOR V, FXMMATRIX M) noexcept; + XMFLOAT4* XM_CALLCONV XMVector3TransformStream(_Out_writes_bytes_(sizeof(XMFLOAT4) + OutputStride * (VectorCount - 1)) XMFLOAT4* pOutputStream, + _In_ size_t OutputStride, + _In_reads_bytes_(sizeof(XMFLOAT3) + InputStride * (VectorCount - 1)) const XMFLOAT3* pInputStream, + _In_ size_t InputStride, _In_ size_t VectorCount, _In_ FXMMATRIX M) noexcept; + XMVECTOR XM_CALLCONV XMVector3TransformCoord(FXMVECTOR V, FXMMATRIX M) noexcept; + XMFLOAT3* XM_CALLCONV XMVector3TransformCoordStream(_Out_writes_bytes_(sizeof(XMFLOAT3) + OutputStride * (VectorCount - 1)) XMFLOAT3* pOutputStream, + _In_ size_t OutputStride, + _In_reads_bytes_(sizeof(XMFLOAT3) + InputStride * (VectorCount - 1)) const XMFLOAT3* pInputStream, + _In_ size_t InputStride, _In_ size_t VectorCount, _In_ FXMMATRIX M) noexcept; + XMVECTOR XM_CALLCONV XMVector3TransformNormal(FXMVECTOR V, FXMMATRIX M) noexcept; + XMFLOAT3* XM_CALLCONV XMVector3TransformNormalStream(_Out_writes_bytes_(sizeof(XMFLOAT3) + OutputStride * (VectorCount - 1)) XMFLOAT3* pOutputStream, + _In_ size_t OutputStride, + _In_reads_bytes_(sizeof(XMFLOAT3) + InputStride * (VectorCount - 1)) const XMFLOAT3* pInputStream, + _In_ size_t InputStride, _In_ size_t VectorCount, _In_ FXMMATRIX M) noexcept; + XMVECTOR XM_CALLCONV XMVector3Project(FXMVECTOR V, float ViewportX, float ViewportY, float ViewportWidth, float ViewportHeight, float ViewportMinZ, float ViewportMaxZ, + FXMMATRIX Projection, CXMMATRIX View, CXMMATRIX World) noexcept; + XMFLOAT3* XM_CALLCONV XMVector3ProjectStream(_Out_writes_bytes_(sizeof(XMFLOAT3) + OutputStride * (VectorCount - 1)) XMFLOAT3* pOutputStream, + _In_ size_t OutputStride, + _In_reads_bytes_(sizeof(XMFLOAT3) + InputStride * (VectorCount - 1)) const XMFLOAT3* pInputStream, + _In_ size_t InputStride, _In_ size_t VectorCount, + _In_ float ViewportX, _In_ float ViewportY, _In_ float ViewportWidth, _In_ float ViewportHeight, _In_ float ViewportMinZ, _In_ float ViewportMaxZ, + _In_ FXMMATRIX Projection, _In_ CXMMATRIX View, _In_ CXMMATRIX World) noexcept; + XMVECTOR XM_CALLCONV XMVector3Unproject(FXMVECTOR V, float ViewportX, float ViewportY, float ViewportWidth, float ViewportHeight, float ViewportMinZ, float ViewportMaxZ, + FXMMATRIX Projection, CXMMATRIX View, CXMMATRIX World) noexcept; + XMFLOAT3* XM_CALLCONV XMVector3UnprojectStream(_Out_writes_bytes_(sizeof(XMFLOAT3) + OutputStride * (VectorCount - 1)) XMFLOAT3* pOutputStream, + _In_ size_t OutputStride, + _In_reads_bytes_(sizeof(XMFLOAT3) + InputStride * (VectorCount - 1)) const XMFLOAT3* pInputStream, + _In_ size_t InputStride, _In_ size_t VectorCount, + _In_ float ViewportX, _In_ float ViewportY, _In_ float ViewportWidth, _In_ float ViewportHeight, _In_ float ViewportMinZ, _In_ float ViewportMaxZ, + _In_ FXMMATRIX Projection, _In_ CXMMATRIX View, _In_ CXMMATRIX World) noexcept; + + /**************************************************************************** + * + * 4D vector operations + * + ****************************************************************************/ + + bool XM_CALLCONV XMVector4Equal(FXMVECTOR V1, FXMVECTOR V2) noexcept; + uint32_t XM_CALLCONV XMVector4EqualR(FXMVECTOR V1, FXMVECTOR V2) noexcept; + bool XM_CALLCONV XMVector4EqualInt(FXMVECTOR V1, FXMVECTOR V2) noexcept; + uint32_t XM_CALLCONV XMVector4EqualIntR(FXMVECTOR V1, FXMVECTOR V2) noexcept; + bool XM_CALLCONV XMVector4NearEqual(FXMVECTOR V1, FXMVECTOR V2, FXMVECTOR Epsilon) noexcept; + bool XM_CALLCONV XMVector4NotEqual(FXMVECTOR V1, FXMVECTOR V2) noexcept; + bool XM_CALLCONV XMVector4NotEqualInt(FXMVECTOR V1, FXMVECTOR V2) noexcept; + bool XM_CALLCONV XMVector4Greater(FXMVECTOR V1, FXMVECTOR V2) noexcept; + uint32_t XM_CALLCONV XMVector4GreaterR(FXMVECTOR V1, FXMVECTOR V2) noexcept; + bool XM_CALLCONV XMVector4GreaterOrEqual(FXMVECTOR V1, FXMVECTOR V2) noexcept; + uint32_t XM_CALLCONV XMVector4GreaterOrEqualR(FXMVECTOR V1, FXMVECTOR V2) noexcept; + bool XM_CALLCONV XMVector4Less(FXMVECTOR V1, FXMVECTOR V2) noexcept; + bool XM_CALLCONV XMVector4LessOrEqual(FXMVECTOR V1, FXMVECTOR V2) noexcept; + bool XM_CALLCONV XMVector4InBounds(FXMVECTOR V, FXMVECTOR Bounds) noexcept; + + bool XM_CALLCONV XMVector4IsNaN(FXMVECTOR V) noexcept; + bool XM_CALLCONV XMVector4IsInfinite(FXMVECTOR V) noexcept; + + XMVECTOR XM_CALLCONV XMVector4Dot(FXMVECTOR V1, FXMVECTOR V2) noexcept; + XMVECTOR XM_CALLCONV XMVector4Cross(FXMVECTOR V1, FXMVECTOR V2, FXMVECTOR V3) noexcept; + XMVECTOR XM_CALLCONV XMVector4LengthSq(FXMVECTOR V) noexcept; + XMVECTOR XM_CALLCONV XMVector4ReciprocalLengthEst(FXMVECTOR V) noexcept; + XMVECTOR XM_CALLCONV XMVector4ReciprocalLength(FXMVECTOR V) noexcept; + XMVECTOR XM_CALLCONV XMVector4LengthEst(FXMVECTOR V) noexcept; + XMVECTOR XM_CALLCONV XMVector4Length(FXMVECTOR V) noexcept; + XMVECTOR XM_CALLCONV XMVector4NormalizeEst(FXMVECTOR V) noexcept; + XMVECTOR XM_CALLCONV XMVector4Normalize(FXMVECTOR V) noexcept; + XMVECTOR XM_CALLCONV XMVector4ClampLength(FXMVECTOR V, float LengthMin, float LengthMax) noexcept; + XMVECTOR XM_CALLCONV XMVector4ClampLengthV(FXMVECTOR V, FXMVECTOR LengthMin, FXMVECTOR LengthMax) noexcept; + XMVECTOR XM_CALLCONV XMVector4Reflect(FXMVECTOR Incident, FXMVECTOR Normal) noexcept; + XMVECTOR XM_CALLCONV XMVector4Refract(FXMVECTOR Incident, FXMVECTOR Normal, float RefractionIndex) noexcept; + XMVECTOR XM_CALLCONV XMVector4RefractV(FXMVECTOR Incident, FXMVECTOR Normal, FXMVECTOR RefractionIndex) noexcept; + XMVECTOR XM_CALLCONV XMVector4Orthogonal(FXMVECTOR V) noexcept; + XMVECTOR XM_CALLCONV XMVector4AngleBetweenNormalsEst(FXMVECTOR N1, FXMVECTOR N2) noexcept; + XMVECTOR XM_CALLCONV XMVector4AngleBetweenNormals(FXMVECTOR N1, FXMVECTOR N2) noexcept; + XMVECTOR XM_CALLCONV XMVector4AngleBetweenVectors(FXMVECTOR V1, FXMVECTOR V2) noexcept; + XMVECTOR XM_CALLCONV XMVector4Transform(FXMVECTOR V, FXMMATRIX M) noexcept; + XMFLOAT4* XM_CALLCONV XMVector4TransformStream(_Out_writes_bytes_(sizeof(XMFLOAT4) + OutputStride * (VectorCount - 1)) XMFLOAT4* pOutputStream, + _In_ size_t OutputStride, + _In_reads_bytes_(sizeof(XMFLOAT4) + InputStride * (VectorCount - 1)) const XMFLOAT4* pInputStream, + _In_ size_t InputStride, _In_ size_t VectorCount, _In_ FXMMATRIX M) noexcept; + + /**************************************************************************** + * + * Matrix operations + * + ****************************************************************************/ + + bool XM_CALLCONV XMMatrixIsNaN(FXMMATRIX M) noexcept; + bool XM_CALLCONV XMMatrixIsInfinite(FXMMATRIX M) noexcept; + bool XM_CALLCONV XMMatrixIsIdentity(FXMMATRIX M) noexcept; + + XMMATRIX XM_CALLCONV XMMatrixMultiply(FXMMATRIX M1, CXMMATRIX M2) noexcept; + XMMATRIX XM_CALLCONV XMMatrixMultiplyTranspose(FXMMATRIX M1, CXMMATRIX M2) noexcept; + XMMATRIX XM_CALLCONV XMMatrixTranspose(FXMMATRIX M) noexcept; + XMMATRIX XM_CALLCONV XMMatrixInverse(_Out_opt_ XMVECTOR* pDeterminant, _In_ FXMMATRIX M) noexcept; + XMMATRIX XM_CALLCONV XMMatrixVectorTensorProduct(FXMVECTOR V1, FXMVECTOR V2) noexcept; + XMVECTOR XM_CALLCONV XMMatrixDeterminant(FXMMATRIX M) noexcept; + + _Success_(return) + bool XM_CALLCONV XMMatrixDecompose(_Out_ XMVECTOR* outScale, _Out_ XMVECTOR* outRotQuat, _Out_ XMVECTOR* outTrans, _In_ FXMMATRIX M) noexcept; + + XMMATRIX XM_CALLCONV XMMatrixIdentity() noexcept; + XMMATRIX XM_CALLCONV XMMatrixSet(float m00, float m01, float m02, float m03, + float m10, float m11, float m12, float m13, + float m20, float m21, float m22, float m23, + float m30, float m31, float m32, float m33) noexcept; + XMMATRIX XM_CALLCONV XMMatrixTranslation(float OffsetX, float OffsetY, float OffsetZ) noexcept; + XMMATRIX XM_CALLCONV XMMatrixTranslationFromVector(FXMVECTOR Offset) noexcept; + XMMATRIX XM_CALLCONV XMMatrixScaling(float ScaleX, float ScaleY, float ScaleZ) noexcept; + XMMATRIX XM_CALLCONV XMMatrixScalingFromVector(FXMVECTOR Scale) noexcept; + XMMATRIX XM_CALLCONV XMMatrixRotationX(float Angle) noexcept; + XMMATRIX XM_CALLCONV XMMatrixRotationY(float Angle) noexcept; + XMMATRIX XM_CALLCONV XMMatrixRotationZ(float Angle) noexcept; + XMMATRIX XM_CALLCONV XMMatrixRotationRollPitchYaw(float Pitch, float Yaw, float Roll) noexcept; + XMMATRIX XM_CALLCONV XMMatrixRotationRollPitchYawFromVector(FXMVECTOR Angles) noexcept; + XMMATRIX XM_CALLCONV XMMatrixRotationNormal(FXMVECTOR NormalAxis, float Angle) noexcept; + XMMATRIX XM_CALLCONV XMMatrixRotationAxis(FXMVECTOR Axis, float Angle) noexcept; + XMMATRIX XM_CALLCONV XMMatrixRotationQuaternion(FXMVECTOR Quaternion) noexcept; + XMMATRIX XM_CALLCONV XMMatrixTransformation2D(FXMVECTOR ScalingOrigin, float ScalingOrientation, FXMVECTOR Scaling, + FXMVECTOR RotationOrigin, float Rotation, GXMVECTOR Translation) noexcept; + XMMATRIX XM_CALLCONV XMMatrixTransformation(FXMVECTOR ScalingOrigin, FXMVECTOR ScalingOrientationQuaternion, FXMVECTOR Scaling, + GXMVECTOR RotationOrigin, HXMVECTOR RotationQuaternion, HXMVECTOR Translation) noexcept; + XMMATRIX XM_CALLCONV XMMatrixAffineTransformation2D(FXMVECTOR Scaling, FXMVECTOR RotationOrigin, float Rotation, FXMVECTOR Translation) noexcept; + XMMATRIX XM_CALLCONV XMMatrixAffineTransformation(FXMVECTOR Scaling, FXMVECTOR RotationOrigin, FXMVECTOR RotationQuaternion, GXMVECTOR Translation) noexcept; + XMMATRIX XM_CALLCONV XMMatrixReflect(FXMVECTOR ReflectionPlane) noexcept; + XMMATRIX XM_CALLCONV XMMatrixShadow(FXMVECTOR ShadowPlane, FXMVECTOR LightPosition) noexcept; + + XMMATRIX XM_CALLCONV XMMatrixLookAtLH(FXMVECTOR EyePosition, FXMVECTOR FocusPosition, FXMVECTOR UpDirection) noexcept; + XMMATRIX XM_CALLCONV XMMatrixLookAtRH(FXMVECTOR EyePosition, FXMVECTOR FocusPosition, FXMVECTOR UpDirection) noexcept; + XMMATRIX XM_CALLCONV XMMatrixLookToLH(FXMVECTOR EyePosition, FXMVECTOR EyeDirection, FXMVECTOR UpDirection) noexcept; + XMMATRIX XM_CALLCONV XMMatrixLookToRH(FXMVECTOR EyePosition, FXMVECTOR EyeDirection, FXMVECTOR UpDirection) noexcept; + XMMATRIX XM_CALLCONV XMMatrixPerspectiveLH(float ViewWidth, float ViewHeight, float NearZ, float FarZ) noexcept; + XMMATRIX XM_CALLCONV XMMatrixPerspectiveRH(float ViewWidth, float ViewHeight, float NearZ, float FarZ) noexcept; + XMMATRIX XM_CALLCONV XMMatrixPerspectiveFovLH(float FovAngleY, float AspectRatio, float NearZ, float FarZ) noexcept; + XMMATRIX XM_CALLCONV XMMatrixPerspectiveFovRH(float FovAngleY, float AspectRatio, float NearZ, float FarZ) noexcept; + XMMATRIX XM_CALLCONV XMMatrixPerspectiveOffCenterLH(float ViewLeft, float ViewRight, float ViewBottom, float ViewTop, float NearZ, float FarZ) noexcept; + XMMATRIX XM_CALLCONV XMMatrixPerspectiveOffCenterRH(float ViewLeft, float ViewRight, float ViewBottom, float ViewTop, float NearZ, float FarZ) noexcept; + XMMATRIX XM_CALLCONV XMMatrixOrthographicLH(float ViewWidth, float ViewHeight, float NearZ, float FarZ) noexcept; + XMMATRIX XM_CALLCONV XMMatrixOrthographicRH(float ViewWidth, float ViewHeight, float NearZ, float FarZ) noexcept; + XMMATRIX XM_CALLCONV XMMatrixOrthographicOffCenterLH(float ViewLeft, float ViewRight, float ViewBottom, float ViewTop, float NearZ, float FarZ) noexcept; + XMMATRIX XM_CALLCONV XMMatrixOrthographicOffCenterRH(float ViewLeft, float ViewRight, float ViewBottom, float ViewTop, float NearZ, float FarZ) noexcept; + + + /**************************************************************************** + * + * Quaternion operations + * + ****************************************************************************/ + + bool XM_CALLCONV XMQuaternionEqual(FXMVECTOR Q1, FXMVECTOR Q2) noexcept; + bool XM_CALLCONV XMQuaternionNotEqual(FXMVECTOR Q1, FXMVECTOR Q2) noexcept; + + bool XM_CALLCONV XMQuaternionIsNaN(FXMVECTOR Q) noexcept; + bool XM_CALLCONV XMQuaternionIsInfinite(FXMVECTOR Q) noexcept; + bool XM_CALLCONV XMQuaternionIsIdentity(FXMVECTOR Q) noexcept; + + XMVECTOR XM_CALLCONV XMQuaternionDot(FXMVECTOR Q1, FXMVECTOR Q2) noexcept; + XMVECTOR XM_CALLCONV XMQuaternionMultiply(FXMVECTOR Q1, FXMVECTOR Q2) noexcept; + XMVECTOR XM_CALLCONV XMQuaternionLengthSq(FXMVECTOR Q) noexcept; + XMVECTOR XM_CALLCONV XMQuaternionReciprocalLength(FXMVECTOR Q) noexcept; + XMVECTOR XM_CALLCONV XMQuaternionLength(FXMVECTOR Q) noexcept; + XMVECTOR XM_CALLCONV XMQuaternionNormalizeEst(FXMVECTOR Q) noexcept; + XMVECTOR XM_CALLCONV XMQuaternionNormalize(FXMVECTOR Q) noexcept; + XMVECTOR XM_CALLCONV XMQuaternionConjugate(FXMVECTOR Q) noexcept; + XMVECTOR XM_CALLCONV XMQuaternionInverse(FXMVECTOR Q) noexcept; + XMVECTOR XM_CALLCONV XMQuaternionLn(FXMVECTOR Q) noexcept; + XMVECTOR XM_CALLCONV XMQuaternionExp(FXMVECTOR Q) noexcept; + XMVECTOR XM_CALLCONV XMQuaternionSlerp(FXMVECTOR Q0, FXMVECTOR Q1, float t) noexcept; + XMVECTOR XM_CALLCONV XMQuaternionSlerpV(FXMVECTOR Q0, FXMVECTOR Q1, FXMVECTOR T) noexcept; + XMVECTOR XM_CALLCONV XMQuaternionSquad(FXMVECTOR Q0, FXMVECTOR Q1, FXMVECTOR Q2, GXMVECTOR Q3, float t) noexcept; + XMVECTOR XM_CALLCONV XMQuaternionSquadV(FXMVECTOR Q0, FXMVECTOR Q1, FXMVECTOR Q2, GXMVECTOR Q3, HXMVECTOR T) noexcept; + void XM_CALLCONV XMQuaternionSquadSetup(_Out_ XMVECTOR* pA, _Out_ XMVECTOR* pB, _Out_ XMVECTOR* pC, _In_ FXMVECTOR Q0, _In_ FXMVECTOR Q1, _In_ FXMVECTOR Q2, _In_ GXMVECTOR Q3) noexcept; + XMVECTOR XM_CALLCONV XMQuaternionBaryCentric(FXMVECTOR Q0, FXMVECTOR Q1, FXMVECTOR Q2, float f, float g) noexcept; + XMVECTOR XM_CALLCONV XMQuaternionBaryCentricV(FXMVECTOR Q0, FXMVECTOR Q1, FXMVECTOR Q2, GXMVECTOR F, HXMVECTOR G) noexcept; + + XMVECTOR XM_CALLCONV XMQuaternionIdentity() noexcept; + XMVECTOR XM_CALLCONV XMQuaternionRotationRollPitchYaw(float Pitch, float Yaw, float Roll) noexcept; + XMVECTOR XM_CALLCONV XMQuaternionRotationRollPitchYawFromVector(FXMVECTOR Angles) noexcept; + XMVECTOR XM_CALLCONV XMQuaternionRotationNormal(FXMVECTOR NormalAxis, float Angle) noexcept; + XMVECTOR XM_CALLCONV XMQuaternionRotationAxis(FXMVECTOR Axis, float Angle) noexcept; + XMVECTOR XM_CALLCONV XMQuaternionRotationMatrix(FXMMATRIX M) noexcept; + + void XM_CALLCONV XMQuaternionToAxisAngle(_Out_ XMVECTOR* pAxis, _Out_ float* pAngle, _In_ FXMVECTOR Q) noexcept; + + /**************************************************************************** + * + * Plane operations + * + ****************************************************************************/ + + bool XM_CALLCONV XMPlaneEqual(FXMVECTOR P1, FXMVECTOR P2) noexcept; + bool XM_CALLCONV XMPlaneNearEqual(FXMVECTOR P1, FXMVECTOR P2, FXMVECTOR Epsilon) noexcept; + bool XM_CALLCONV XMPlaneNotEqual(FXMVECTOR P1, FXMVECTOR P2) noexcept; + + bool XM_CALLCONV XMPlaneIsNaN(FXMVECTOR P) noexcept; + bool XM_CALLCONV XMPlaneIsInfinite(FXMVECTOR P) noexcept; + + XMVECTOR XM_CALLCONV XMPlaneDot(FXMVECTOR P, FXMVECTOR V) noexcept; + XMVECTOR XM_CALLCONV XMPlaneDotCoord(FXMVECTOR P, FXMVECTOR V) noexcept; + XMVECTOR XM_CALLCONV XMPlaneDotNormal(FXMVECTOR P, FXMVECTOR V) noexcept; + XMVECTOR XM_CALLCONV XMPlaneNormalizeEst(FXMVECTOR P) noexcept; + XMVECTOR XM_CALLCONV XMPlaneNormalize(FXMVECTOR P) noexcept; + XMVECTOR XM_CALLCONV XMPlaneIntersectLine(FXMVECTOR P, FXMVECTOR LinePoint1, FXMVECTOR LinePoint2) noexcept; + void XM_CALLCONV XMPlaneIntersectPlane(_Out_ XMVECTOR* pLinePoint1, _Out_ XMVECTOR* pLinePoint2, _In_ FXMVECTOR P1, _In_ FXMVECTOR P2) noexcept; + XMVECTOR XM_CALLCONV XMPlaneTransform(FXMVECTOR P, FXMMATRIX M) noexcept; + XMFLOAT4* XM_CALLCONV XMPlaneTransformStream(_Out_writes_bytes_(sizeof(XMFLOAT4) + OutputStride * (PlaneCount - 1)) XMFLOAT4* pOutputStream, + _In_ size_t OutputStride, + _In_reads_bytes_(sizeof(XMFLOAT4) + InputStride * (PlaneCount - 1)) const XMFLOAT4* pInputStream, + _In_ size_t InputStride, _In_ size_t PlaneCount, _In_ FXMMATRIX M) noexcept; + + XMVECTOR XM_CALLCONV XMPlaneFromPointNormal(FXMVECTOR Point, FXMVECTOR Normal) noexcept; + XMVECTOR XM_CALLCONV XMPlaneFromPoints(FXMVECTOR Point1, FXMVECTOR Point2, FXMVECTOR Point3) noexcept; + + /**************************************************************************** + * + * Color operations + * + ****************************************************************************/ + + bool XM_CALLCONV XMColorEqual(FXMVECTOR C1, FXMVECTOR C2) noexcept; + bool XM_CALLCONV XMColorNotEqual(FXMVECTOR C1, FXMVECTOR C2) noexcept; + bool XM_CALLCONV XMColorGreater(FXMVECTOR C1, FXMVECTOR C2) noexcept; + bool XM_CALLCONV XMColorGreaterOrEqual(FXMVECTOR C1, FXMVECTOR C2) noexcept; + bool XM_CALLCONV XMColorLess(FXMVECTOR C1, FXMVECTOR C2) noexcept; + bool XM_CALLCONV XMColorLessOrEqual(FXMVECTOR C1, FXMVECTOR C2) noexcept; + + bool XM_CALLCONV XMColorIsNaN(FXMVECTOR C) noexcept; + bool XM_CALLCONV XMColorIsInfinite(FXMVECTOR C) noexcept; + + XMVECTOR XM_CALLCONV XMColorNegative(FXMVECTOR C) noexcept; + XMVECTOR XM_CALLCONV XMColorModulate(FXMVECTOR C1, FXMVECTOR C2) noexcept; + XMVECTOR XM_CALLCONV XMColorAdjustSaturation(FXMVECTOR C, float Saturation) noexcept; + XMVECTOR XM_CALLCONV XMColorAdjustContrast(FXMVECTOR C, float Contrast) noexcept; + + XMVECTOR XM_CALLCONV XMColorRGBToHSL(FXMVECTOR rgb) noexcept; + XMVECTOR XM_CALLCONV XMColorHSLToRGB(FXMVECTOR hsl) noexcept; + + XMVECTOR XM_CALLCONV XMColorRGBToHSV(FXMVECTOR rgb) noexcept; + XMVECTOR XM_CALLCONV XMColorHSVToRGB(FXMVECTOR hsv) noexcept; + + XMVECTOR XM_CALLCONV XMColorRGBToYUV(FXMVECTOR rgb) noexcept; + XMVECTOR XM_CALLCONV XMColorYUVToRGB(FXMVECTOR yuv) noexcept; + + XMVECTOR XM_CALLCONV XMColorRGBToYUV_HD(FXMVECTOR rgb) noexcept; + XMVECTOR XM_CALLCONV XMColorYUVToRGB_HD(FXMVECTOR yuv) noexcept; + + XMVECTOR XM_CALLCONV XMColorRGBToXYZ(FXMVECTOR rgb) noexcept; + XMVECTOR XM_CALLCONV XMColorXYZToRGB(FXMVECTOR xyz) noexcept; + + XMVECTOR XM_CALLCONV XMColorXYZToSRGB(FXMVECTOR xyz) noexcept; + XMVECTOR XM_CALLCONV XMColorSRGBToXYZ(FXMVECTOR srgb) noexcept; + + XMVECTOR XM_CALLCONV XMColorRGBToSRGB(FXMVECTOR rgb) noexcept; + XMVECTOR XM_CALLCONV XMColorSRGBToRGB(FXMVECTOR srgb) noexcept; + + + /**************************************************************************** + * + * Miscellaneous operations + * + ****************************************************************************/ + + bool XMVerifyCPUSupport() noexcept; + + XMVECTOR XM_CALLCONV XMFresnelTerm(FXMVECTOR CosIncidentAngle, FXMVECTOR RefractionIndex) noexcept; + + bool XMScalarNearEqual(float S1, float S2, float Epsilon) noexcept; + float XMScalarModAngle(float Value) noexcept; + + float XMScalarSin(float Value) noexcept; + float XMScalarSinEst(float Value) noexcept; + + float XMScalarCos(float Value) noexcept; + float XMScalarCosEst(float Value) noexcept; + + void XMScalarSinCos(_Out_ float* pSin, _Out_ float* pCos, float Value) noexcept; + void XMScalarSinCosEst(_Out_ float* pSin, _Out_ float* pCos, float Value) noexcept; + + float XMScalarASin(float Value) noexcept; + float XMScalarASinEst(float Value) noexcept; + + float XMScalarACos(float Value) noexcept; + float XMScalarACosEst(float Value) noexcept; + + /**************************************************************************** + * + * Templates + * + ****************************************************************************/ + +#if defined(__XNAMATH_H__) && defined(XMMin) +#undef XMMin +#undef XMMax +#endif + + template inline T XMMin(T a, T b) { return (a < b) ? a : b; } + template inline T XMMax(T a, T b) { return (a > b) ? a : b; } + + //------------------------------------------------------------------------------ + +#if defined(_XM_SSE_INTRINSICS_) && !defined(_XM_NO_INTRINSICS_) + +// PermuteHelper internal template (SSE only) + namespace Internal + { + // Slow path fallback for permutes that do not map to a single SSE shuffle opcode. + template struct PermuteHelper + { + static XMVECTOR XM_CALLCONV Permute(FXMVECTOR v1, FXMVECTOR v2) noexcept + { + static const XMVECTORU32 selectMask = + { { { + WhichX ? 0xFFFFFFFF : 0, + WhichY ? 0xFFFFFFFF : 0, + WhichZ ? 0xFFFFFFFF : 0, + WhichW ? 0xFFFFFFFF : 0, + } } }; + + XMVECTOR shuffled1 = XM_PERMUTE_PS(v1, Shuffle); + XMVECTOR shuffled2 = XM_PERMUTE_PS(v2, Shuffle); + + XMVECTOR masked1 = _mm_andnot_ps(selectMask, shuffled1); + XMVECTOR masked2 = _mm_and_ps(selectMask, shuffled2); + + return _mm_or_ps(masked1, masked2); + } + }; + + // Fast path for permutes that only read from the first vector. + template struct PermuteHelper + { + static XMVECTOR XM_CALLCONV Permute(FXMVECTOR v1, FXMVECTOR) noexcept { return XM_PERMUTE_PS(v1, Shuffle); } + }; + + // Fast path for permutes that only read from the second vector. + template struct PermuteHelper + { + static XMVECTOR XM_CALLCONV Permute(FXMVECTOR, FXMVECTOR v2) noexcept { return XM_PERMUTE_PS(v2, Shuffle); } + }; + + // Fast path for permutes that read XY from the first vector, ZW from the second. + template struct PermuteHelper + { + static XMVECTOR XM_CALLCONV Permute(FXMVECTOR v1, FXMVECTOR v2) noexcept { return _mm_shuffle_ps(v1, v2, Shuffle); } + }; + + // Fast path for permutes that read XY from the second vector, ZW from the first. + template struct PermuteHelper + { + static XMVECTOR XM_CALLCONV Permute(FXMVECTOR v1, FXMVECTOR v2) noexcept { return _mm_shuffle_ps(v2, v1, Shuffle); } + }; + } + +#endif // _XM_SSE_INTRINSICS_ && !_XM_NO_INTRINSICS_ + + // General permute template + template + inline XMVECTOR XM_CALLCONV XMVectorPermute(FXMVECTOR V1, FXMVECTOR V2) noexcept + { + static_assert(PermuteX <= 7, "PermuteX template parameter out of range"); + static_assert(PermuteY <= 7, "PermuteY template parameter out of range"); + static_assert(PermuteZ <= 7, "PermuteZ template parameter out of range"); + static_assert(PermuteW <= 7, "PermuteW template parameter out of range"); + +#if defined(_XM_SSE_INTRINSICS_) && !defined(_XM_NO_INTRINSICS_) + const uint32_t Shuffle = _MM_SHUFFLE(PermuteW & 3, PermuteZ & 3, PermuteY & 3, PermuteX & 3); + + const bool WhichX = PermuteX > 3; + const bool WhichY = PermuteY > 3; + const bool WhichZ = PermuteZ > 3; + const bool WhichW = PermuteW > 3; + + return Internal::PermuteHelper::Permute(V1, V2); +#else + + return XMVectorPermute(V1, V2, PermuteX, PermuteY, PermuteZ, PermuteW); + +#endif + } + + // Special-case permute templates + template<> inline XMVECTOR XM_CALLCONV XMVectorPermute<0, 1, 2, 3>(FXMVECTOR V1, FXMVECTOR) noexcept { return V1; } + template<> inline XMVECTOR XM_CALLCONV XMVectorPermute<4, 5, 6, 7>(FXMVECTOR, FXMVECTOR V2) noexcept { return V2; } + +#if defined(_XM_SSE_INTRINSICS_) && !defined(_XM_NO_INTRINSICS_) + template<> inline XMVECTOR XM_CALLCONV XMVectorPermute<0, 1, 4, 5>(FXMVECTOR V1, FXMVECTOR V2) noexcept { return _mm_movelh_ps(V1, V2); } + template<> inline XMVECTOR XM_CALLCONV XMVectorPermute<6, 7, 2, 3>(FXMVECTOR V1, FXMVECTOR V2) noexcept { return _mm_movehl_ps(V1, V2); } + template<> inline XMVECTOR XM_CALLCONV XMVectorPermute<0, 4, 1, 5>(FXMVECTOR V1, FXMVECTOR V2) noexcept { return _mm_unpacklo_ps(V1, V2); } + template<> inline XMVECTOR XM_CALLCONV XMVectorPermute<2, 6, 3, 7>(FXMVECTOR V1, FXMVECTOR V2) noexcept { return _mm_unpackhi_ps(V1, V2); } + template<> inline XMVECTOR XM_CALLCONV XMVectorPermute<2, 3, 6, 7>(FXMVECTOR V1, FXMVECTOR V2) noexcept { return _mm_castpd_ps(_mm_unpackhi_pd(_mm_castps_pd(V1), _mm_castps_pd(V2))); } +#endif + +#if defined(_XM_SSE4_INTRINSICS_) && !defined(_XM_NO_INTRINSICS_) + template<> inline XMVECTOR XM_CALLCONV XMVectorPermute<4, 1, 2, 3>(FXMVECTOR V1, FXMVECTOR V2) noexcept { return _mm_blend_ps(V1, V2, 0x1); } + template<> inline XMVECTOR XM_CALLCONV XMVectorPermute<0, 5, 2, 3>(FXMVECTOR V1, FXMVECTOR V2) noexcept { return _mm_blend_ps(V1, V2, 0x2); } + template<> inline XMVECTOR XM_CALLCONV XMVectorPermute<4, 5, 2, 3>(FXMVECTOR V1, FXMVECTOR V2) noexcept { return _mm_blend_ps(V1, V2, 0x3); } + template<> inline XMVECTOR XM_CALLCONV XMVectorPermute<0, 1, 6, 3>(FXMVECTOR V1, FXMVECTOR V2) noexcept { return _mm_blend_ps(V1, V2, 0x4); } + template<> inline XMVECTOR XM_CALLCONV XMVectorPermute<4, 1, 6, 3>(FXMVECTOR V1, FXMVECTOR V2) noexcept { return _mm_blend_ps(V1, V2, 0x5); } + template<> inline XMVECTOR XM_CALLCONV XMVectorPermute<0, 5, 6, 3>(FXMVECTOR V1, FXMVECTOR V2) noexcept { return _mm_blend_ps(V1, V2, 0x6); } + template<> inline XMVECTOR XM_CALLCONV XMVectorPermute<4, 5, 6, 3>(FXMVECTOR V1, FXMVECTOR V2) noexcept { return _mm_blend_ps(V1, V2, 0x7); } + template<> inline XMVECTOR XM_CALLCONV XMVectorPermute<0, 1, 2, 7>(FXMVECTOR V1, FXMVECTOR V2) noexcept { return _mm_blend_ps(V1, V2, 0x8); } + template<> inline XMVECTOR XM_CALLCONV XMVectorPermute<4, 1, 2, 7>(FXMVECTOR V1, FXMVECTOR V2) noexcept { return _mm_blend_ps(V1, V2, 0x9); } + template<> inline XMVECTOR XM_CALLCONV XMVectorPermute<0, 5, 2, 7>(FXMVECTOR V1, FXMVECTOR V2) noexcept { return _mm_blend_ps(V1, V2, 0xA); } + template<> inline XMVECTOR XM_CALLCONV XMVectorPermute<4, 5, 2, 7>(FXMVECTOR V1, FXMVECTOR V2) noexcept { return _mm_blend_ps(V1, V2, 0xB); } + template<> inline XMVECTOR XM_CALLCONV XMVectorPermute<0, 1, 6, 7>(FXMVECTOR V1, FXMVECTOR V2) noexcept { return _mm_blend_ps(V1, V2, 0xC); } + template<> inline XMVECTOR XM_CALLCONV XMVectorPermute<4, 1, 6, 7>(FXMVECTOR V1, FXMVECTOR V2) noexcept { return _mm_blend_ps(V1, V2, 0xD); } + template<> inline XMVECTOR XM_CALLCONV XMVectorPermute<0, 5, 6, 7>(FXMVECTOR V1, FXMVECTOR V2) noexcept { return _mm_blend_ps(V1, V2, 0xE); } +#endif + +#if defined(_XM_ARM_NEON_INTRINSICS_) && !defined(_XM_NO_INTRINSICS_) + + // If the indices are all in the range 0-3 or 4-7, then use XMVectorSwizzle instead + // The mirror cases are not spelled out here as the programmer can always swap the arguments + // (i.e. prefer permutes where the X element comes from the V1 vector instead of the V2 vector) + + template<> inline XMVECTOR XM_CALLCONV XMVectorPermute<0, 1, 4, 5>(FXMVECTOR V1, FXMVECTOR V2) noexcept { return vcombine_f32(vget_low_f32(V1), vget_low_f32(V2)); } + template<> inline XMVECTOR XM_CALLCONV XMVectorPermute<1, 0, 4, 5>(FXMVECTOR V1, FXMVECTOR V2) noexcept { return vcombine_f32(vrev64_f32(vget_low_f32(V1)), vget_low_f32(V2)); } + template<> inline XMVECTOR XM_CALLCONV XMVectorPermute<0, 1, 5, 4>(FXMVECTOR V1, FXMVECTOR V2) noexcept { return vcombine_f32(vget_low_f32(V1), vrev64_f32(vget_low_f32(V2))); } + template<> inline XMVECTOR XM_CALLCONV XMVectorPermute<1, 0, 5, 4>(FXMVECTOR V1, FXMVECTOR V2) noexcept { return vcombine_f32(vrev64_f32(vget_low_f32(V1)), vrev64_f32(vget_low_f32(V2))); } + + template<> inline XMVECTOR XM_CALLCONV XMVectorPermute<2, 3, 6, 7>(FXMVECTOR V1, FXMVECTOR V2) noexcept { return vcombine_f32(vget_high_f32(V1), vget_high_f32(V2)); } + template<> inline XMVECTOR XM_CALLCONV XMVectorPermute<3, 2, 6, 7>(FXMVECTOR V1, FXMVECTOR V2) noexcept { return vcombine_f32(vrev64_f32(vget_high_f32(V1)), vget_high_f32(V2)); } + template<> inline XMVECTOR XM_CALLCONV XMVectorPermute<2, 3, 7, 6>(FXMVECTOR V1, FXMVECTOR V2) noexcept { return vcombine_f32(vget_high_f32(V1), vrev64_f32(vget_high_f32(V2))); } + template<> inline XMVECTOR XM_CALLCONV XMVectorPermute<3, 2, 7, 6>(FXMVECTOR V1, FXMVECTOR V2) noexcept { return vcombine_f32(vrev64_f32(vget_high_f32(V1)), vrev64_f32(vget_high_f32(V2))); } + + template<> inline XMVECTOR XM_CALLCONV XMVectorPermute<0, 1, 6, 7>(FXMVECTOR V1, FXMVECTOR V2) noexcept { return vcombine_f32(vget_low_f32(V1), vget_high_f32(V2)); } + template<> inline XMVECTOR XM_CALLCONV XMVectorPermute<1, 0, 6, 7>(FXMVECTOR V1, FXMVECTOR V2) noexcept { return vcombine_f32(vrev64_f32(vget_low_f32(V1)), vget_high_f32(V2)); } + template<> inline XMVECTOR XM_CALLCONV XMVectorPermute<0, 1, 7, 6>(FXMVECTOR V1, FXMVECTOR V2) noexcept { return vcombine_f32(vget_low_f32(V1), vrev64_f32(vget_high_f32(V2))); } + template<> inline XMVECTOR XM_CALLCONV XMVectorPermute<1, 0, 7, 6>(FXMVECTOR V1, FXMVECTOR V2) noexcept { return vcombine_f32(vrev64_f32(vget_low_f32(V1)), vrev64_f32(vget_high_f32(V2))); } + + template<> inline XMVECTOR XM_CALLCONV XMVectorPermute<3, 2, 4, 5>(FXMVECTOR V1, FXMVECTOR V2) noexcept { return vcombine_f32(vrev64_f32(vget_high_f32(V1)), vget_low_f32(V2)); } + template<> inline XMVECTOR XM_CALLCONV XMVectorPermute<2, 3, 5, 4>(FXMVECTOR V1, FXMVECTOR V2) noexcept { return vcombine_f32(vget_high_f32(V1), vrev64_f32(vget_low_f32(V2))); } + template<> inline XMVECTOR XM_CALLCONV XMVectorPermute<3, 2, 5, 4>(FXMVECTOR V1, FXMVECTOR V2) noexcept { return vcombine_f32(vrev64_f32(vget_high_f32(V1)), vrev64_f32(vget_low_f32(V2))); } + + template<> inline XMVECTOR XM_CALLCONV XMVectorPermute<0, 4, 2, 6>(FXMVECTOR V1, FXMVECTOR V2) noexcept { return vtrnq_f32(V1, V2).val[0]; } + template<> inline XMVECTOR XM_CALLCONV XMVectorPermute<1, 5, 3, 7>(FXMVECTOR V1, FXMVECTOR V2) noexcept { return vtrnq_f32(V1, V2).val[1]; } + + template<> inline XMVECTOR XM_CALLCONV XMVectorPermute<0, 4, 1, 5>(FXMVECTOR V1, FXMVECTOR V2) noexcept { return vzipq_f32(V1, V2).val[0]; } + template<> inline XMVECTOR XM_CALLCONV XMVectorPermute<2, 6, 3, 7>(FXMVECTOR V1, FXMVECTOR V2) noexcept { return vzipq_f32(V1, V2).val[1]; } + + template<> inline XMVECTOR XM_CALLCONV XMVectorPermute<0, 2, 4, 6>(FXMVECTOR V1, FXMVECTOR V2) noexcept { return vuzpq_f32(V1, V2).val[0]; } + template<> inline XMVECTOR XM_CALLCONV XMVectorPermute<1, 3, 5, 7>(FXMVECTOR V1, FXMVECTOR V2) noexcept { return vuzpq_f32(V1, V2).val[1]; } + + template<> inline XMVECTOR XM_CALLCONV XMVectorPermute<1, 2, 3, 4>(FXMVECTOR V1, FXMVECTOR V2) noexcept { return vextq_f32(V1, V2, 1); } + template<> inline XMVECTOR XM_CALLCONV XMVectorPermute<2, 3, 4, 5>(FXMVECTOR V1, FXMVECTOR V2) noexcept { return vextq_f32(V1, V2, 2); } + template<> inline XMVECTOR XM_CALLCONV XMVectorPermute<3, 4, 5, 6>(FXMVECTOR V1, FXMVECTOR V2) noexcept { return vextq_f32(V1, V2, 3); } + +#endif // _XM_ARM_NEON_INTRINSICS_ && !_XM_NO_INTRINSICS_ + + //------------------------------------------------------------------------------ + + // General swizzle template + template + inline XMVECTOR XM_CALLCONV XMVectorSwizzle(FXMVECTOR V) noexcept + { + static_assert(SwizzleX <= 3, "SwizzleX template parameter out of range"); + static_assert(SwizzleY <= 3, "SwizzleY template parameter out of range"); + static_assert(SwizzleZ <= 3, "SwizzleZ template parameter out of range"); + static_assert(SwizzleW <= 3, "SwizzleW template parameter out of range"); + +#if defined(_XM_SSE_INTRINSICS_) && !defined(_XM_NO_INTRINSICS_) + return XM_PERMUTE_PS(V, _MM_SHUFFLE(SwizzleW, SwizzleZ, SwizzleY, SwizzleX)); +#else + + return XMVectorSwizzle(V, SwizzleX, SwizzleY, SwizzleZ, SwizzleW); + +#endif + } + + // Specialized swizzles + template<> inline XMVECTOR XM_CALLCONV XMVectorSwizzle<0, 1, 2, 3>(FXMVECTOR V) noexcept { return V; } + +#if defined(_XM_SSE_INTRINSICS_) && !defined(_XM_NO_INTRINSICS_) + template<> inline XMVECTOR XM_CALLCONV XMVectorSwizzle<0, 1, 0, 1>(FXMVECTOR V) noexcept { return _mm_movelh_ps(V, V); } + template<> inline XMVECTOR XM_CALLCONV XMVectorSwizzle<2, 3, 2, 3>(FXMVECTOR V) noexcept { return _mm_movehl_ps(V, V); } + template<> inline XMVECTOR XM_CALLCONV XMVectorSwizzle<0, 0, 1, 1>(FXMVECTOR V) noexcept { return _mm_unpacklo_ps(V, V); } + template<> inline XMVECTOR XM_CALLCONV XMVectorSwizzle<2, 2, 3, 3>(FXMVECTOR V) noexcept { return _mm_unpackhi_ps(V, V); } +#endif + +#if defined(_XM_SSE3_INTRINSICS_) && !defined(_XM_NO_INTRINSICS_) + template<> inline XMVECTOR XM_CALLCONV XMVectorSwizzle<0, 0, 2, 2>(FXMVECTOR V) noexcept { return _mm_moveldup_ps(V); } + template<> inline XMVECTOR XM_CALLCONV XMVectorSwizzle<1, 1, 3, 3>(FXMVECTOR V) noexcept { return _mm_movehdup_ps(V); } +#endif + +#if defined(_XM_AVX2_INTRINSICS_) && !defined(_XM_NO_INTRINSICS_) && defined(_XM_FAVOR_INTEL_) + template<> inline XMVECTOR XM_CALLCONV XMVectorSwizzle<0, 0, 0, 0>(FXMVECTOR V) noexcept { return _mm_broadcastss_ps(V); } +#endif + +#if defined(_XM_ARM_NEON_INTRINSICS_) && !defined(_XM_NO_INTRINSICS_) + + template<> inline XMVECTOR XM_CALLCONV XMVectorSwizzle<0, 0, 0, 0>(FXMVECTOR V) noexcept { return vdupq_lane_f32(vget_low_f32(V), 0); } + template<> inline XMVECTOR XM_CALLCONV XMVectorSwizzle<1, 1, 1, 1>(FXMVECTOR V) noexcept { return vdupq_lane_f32(vget_low_f32(V), 1); } + template<> inline XMVECTOR XM_CALLCONV XMVectorSwizzle<2, 2, 2, 2>(FXMVECTOR V) noexcept { return vdupq_lane_f32(vget_high_f32(V), 0); } + template<> inline XMVECTOR XM_CALLCONV XMVectorSwizzle<3, 3, 3, 3>(FXMVECTOR V) noexcept { return vdupq_lane_f32(vget_high_f32(V), 1); } + + template<> inline XMVECTOR XM_CALLCONV XMVectorSwizzle<1, 0, 3, 2>(FXMVECTOR V) noexcept { return vrev64q_f32(V); } + + template<> inline XMVECTOR XM_CALLCONV XMVectorSwizzle<0, 1, 0, 1>(FXMVECTOR V) noexcept { float32x2_t vt = vget_low_f32(V); return vcombine_f32(vt, vt); } + template<> inline XMVECTOR XM_CALLCONV XMVectorSwizzle<2, 3, 2, 3>(FXMVECTOR V) noexcept { float32x2_t vt = vget_high_f32(V); return vcombine_f32(vt, vt); } + template<> inline XMVECTOR XM_CALLCONV XMVectorSwizzle<1, 0, 1, 0>(FXMVECTOR V) noexcept { float32x2_t vt = vrev64_f32(vget_low_f32(V)); return vcombine_f32(vt, vt); } + template<> inline XMVECTOR XM_CALLCONV XMVectorSwizzle<3, 2, 3, 2>(FXMVECTOR V) noexcept { float32x2_t vt = vrev64_f32(vget_high_f32(V)); return vcombine_f32(vt, vt); } + + template<> inline XMVECTOR XM_CALLCONV XMVectorSwizzle<0, 1, 3, 2>(FXMVECTOR V) noexcept { return vcombine_f32(vget_low_f32(V), vrev64_f32(vget_high_f32(V))); } + template<> inline XMVECTOR XM_CALLCONV XMVectorSwizzle<1, 0, 2, 3>(FXMVECTOR V) noexcept { return vcombine_f32(vrev64_f32(vget_low_f32(V)), vget_high_f32(V)); } + template<> inline XMVECTOR XM_CALLCONV XMVectorSwizzle<2, 3, 1, 0>(FXMVECTOR V) noexcept { return vcombine_f32(vget_high_f32(V), vrev64_f32(vget_low_f32(V))); } + template<> inline XMVECTOR XM_CALLCONV XMVectorSwizzle<3, 2, 0, 1>(FXMVECTOR V) noexcept { return vcombine_f32(vrev64_f32(vget_high_f32(V)), vget_low_f32(V)); } + template<> inline XMVECTOR XM_CALLCONV XMVectorSwizzle<3, 2, 1, 0>(FXMVECTOR V) noexcept { return vcombine_f32(vrev64_f32(vget_high_f32(V)), vrev64_f32(vget_low_f32(V))); } + + template<> inline XMVECTOR XM_CALLCONV XMVectorSwizzle<0, 0, 2, 2>(FXMVECTOR V) noexcept { return vtrnq_f32(V, V).val[0]; } + template<> inline XMVECTOR XM_CALLCONV XMVectorSwizzle<1, 1, 3, 3>(FXMVECTOR V) noexcept { return vtrnq_f32(V, V).val[1]; } + + template<> inline XMVECTOR XM_CALLCONV XMVectorSwizzle<0, 0, 1, 1>(FXMVECTOR V) noexcept { return vzipq_f32(V, V).val[0]; } + template<> inline XMVECTOR XM_CALLCONV XMVectorSwizzle<2, 2, 3, 3>(FXMVECTOR V) noexcept { return vzipq_f32(V, V).val[1]; } + + template<> inline XMVECTOR XM_CALLCONV XMVectorSwizzle<0, 2, 0, 2>(FXMVECTOR V) noexcept { return vuzpq_f32(V, V).val[0]; } + template<> inline XMVECTOR XM_CALLCONV XMVectorSwizzle<1, 3, 1, 3>(FXMVECTOR V) noexcept { return vuzpq_f32(V, V).val[1]; } + + template<> inline XMVECTOR XM_CALLCONV XMVectorSwizzle<1, 2, 3, 0>(FXMVECTOR V) noexcept { return vextq_f32(V, V, 1); } + template<> inline XMVECTOR XM_CALLCONV XMVectorSwizzle<2, 3, 0, 1>(FXMVECTOR V) noexcept { return vextq_f32(V, V, 2); } + template<> inline XMVECTOR XM_CALLCONV XMVectorSwizzle<3, 0, 1, 2>(FXMVECTOR V) noexcept { return vextq_f32(V, V, 3); } + +#endif // _XM_ARM_NEON_INTRINSICS_ && !_XM_NO_INTRINSICS_ + + //------------------------------------------------------------------------------ + + template + inline XMVECTOR XM_CALLCONV XMVectorShiftLeft(FXMVECTOR V1, FXMVECTOR V2) noexcept + { + static_assert(Elements < 4, "Elements template parameter out of range"); + return XMVectorPermute(V1, V2); + } + + template + inline XMVECTOR XM_CALLCONV XMVectorRotateLeft(FXMVECTOR V) noexcept + { + static_assert(Elements < 4, "Elements template parameter out of range"); + return XMVectorSwizzle(V); + } + + template + inline XMVECTOR XM_CALLCONV XMVectorRotateRight(FXMVECTOR V) noexcept + { + static_assert(Elements < 4, "Elements template parameter out of range"); + return XMVectorSwizzle<(4 - Elements) & 3, (5 - Elements) & 3, (6 - Elements) & 3, (7 - Elements) & 3>(V); + } + + template + inline XMVECTOR XM_CALLCONV XMVectorInsert(FXMVECTOR VD, FXMVECTOR VS) noexcept + { + XMVECTOR Control = XMVectorSelectControl(Select0 & 1, Select1 & 1, Select2 & 1, Select3 & 1); + return XMVectorSelect(VD, XMVectorRotateLeft(VS), Control); + } + + /**************************************************************************** + * + * Globals + * + ****************************************************************************/ + + // The purpose of the following global constants is to prevent redundant + // reloading of the constants when they are referenced by more than one + // separate inline math routine called within the same function. Declaring + // a constant locally within a routine is sufficient to prevent redundant + // reloads of that constant when that single routine is called multiple + // times in a function, but if the constant is used (and declared) in a + // separate math routine it would be reloaded. + +#ifndef XMGLOBALCONST +#if defined(__GNUC__) && !defined(__MINGW32__) +#define XMGLOBALCONST extern const __attribute__((weak)) +#else +#define XMGLOBALCONST extern const __declspec(selectany) +#endif +#endif + + XMGLOBALCONST XMVECTORF32 g_XMSinCoefficients0 = { { { -0.16666667f, +0.0083333310f, -0.00019840874f, +2.7525562e-06f } } }; + XMGLOBALCONST XMVECTORF32 g_XMSinCoefficients1 = { { { -2.3889859e-08f, -0.16665852f /*Est1*/, +0.0083139502f /*Est2*/, -0.00018524670f /*Est3*/ } } }; + XMGLOBALCONST XMVECTORF32 g_XMCosCoefficients0 = { { { -0.5f, +0.041666638f, -0.0013888378f, +2.4760495e-05f } } }; + XMGLOBALCONST XMVECTORF32 g_XMCosCoefficients1 = { { { -2.6051615e-07f, -0.49992746f /*Est1*/, +0.041493919f /*Est2*/, -0.0012712436f /*Est3*/ } } }; + XMGLOBALCONST XMVECTORF32 g_XMTanCoefficients0 = { { { 1.0f, 0.333333333f, 0.133333333f, 5.396825397e-2f } } }; + XMGLOBALCONST XMVECTORF32 g_XMTanCoefficients1 = { { { 2.186948854e-2f, 8.863235530e-3f, 3.592128167e-3f, 1.455834485e-3f } } }; + XMGLOBALCONST XMVECTORF32 g_XMTanCoefficients2 = { { { 5.900274264e-4f, 2.391290764e-4f, 9.691537707e-5f, 3.927832950e-5f } } }; + XMGLOBALCONST XMVECTORF32 g_XMArcCoefficients0 = { { { +1.5707963050f, -0.2145988016f, +0.0889789874f, -0.0501743046f } } }; + XMGLOBALCONST XMVECTORF32 g_XMArcCoefficients1 = { { { +0.0308918810f, -0.0170881256f, +0.0066700901f, -0.0012624911f } } }; + XMGLOBALCONST XMVECTORF32 g_XMATanCoefficients0 = { { { -0.3333314528f, +0.1999355085f, -0.1420889944f, +0.1065626393f } } }; + XMGLOBALCONST XMVECTORF32 g_XMATanCoefficients1 = { { { -0.0752896400f, +0.0429096138f, -0.0161657367f, +0.0028662257f } } }; + XMGLOBALCONST XMVECTORF32 g_XMATanEstCoefficients0 = { { { +0.999866f, +0.999866f, +0.999866f, +0.999866f } } }; + XMGLOBALCONST XMVECTORF32 g_XMATanEstCoefficients1 = { { { -0.3302995f, +0.180141f, -0.085133f, +0.0208351f } } }; + XMGLOBALCONST XMVECTORF32 g_XMTanEstCoefficients = { { { 2.484f, -1.954923183e-1f, 2.467401101f, XM_1DIVPI } } }; + XMGLOBALCONST XMVECTORF32 g_XMArcEstCoefficients = { { { +1.5707288f, -0.2121144f, +0.0742610f, -0.0187293f } } }; + XMGLOBALCONST XMVECTORF32 g_XMPiConstants0 = { { { XM_PI, XM_2PI, XM_1DIVPI, XM_1DIV2PI } } }; + XMGLOBALCONST XMVECTORF32 g_XMIdentityR0 = { { { 1.0f, 0.0f, 0.0f, 0.0f } } }; + XMGLOBALCONST XMVECTORF32 g_XMIdentityR1 = { { { 0.0f, 1.0f, 0.0f, 0.0f } } }; + XMGLOBALCONST XMVECTORF32 g_XMIdentityR2 = { { { 0.0f, 0.0f, 1.0f, 0.0f } } }; + XMGLOBALCONST XMVECTORF32 g_XMIdentityR3 = { { { 0.0f, 0.0f, 0.0f, 1.0f } } }; + XMGLOBALCONST XMVECTORF32 g_XMNegIdentityR0 = { { { -1.0f, 0.0f, 0.0f, 0.0f } } }; + XMGLOBALCONST XMVECTORF32 g_XMNegIdentityR1 = { { { 0.0f, -1.0f, 0.0f, 0.0f } } }; + XMGLOBALCONST XMVECTORF32 g_XMNegIdentityR2 = { { { 0.0f, 0.0f, -1.0f, 0.0f } } }; + XMGLOBALCONST XMVECTORF32 g_XMNegIdentityR3 = { { { 0.0f, 0.0f, 0.0f, -1.0f } } }; + XMGLOBALCONST XMVECTORU32 g_XMNegativeZero = { { { 0x80000000, 0x80000000, 0x80000000, 0x80000000 } } }; + XMGLOBALCONST XMVECTORU32 g_XMNegate3 = { { { 0x80000000, 0x80000000, 0x80000000, 0x00000000 } } }; + XMGLOBALCONST XMVECTORU32 g_XMMaskXY = { { { 0xFFFFFFFF, 0xFFFFFFFF, 0x00000000, 0x00000000 } } }; + XMGLOBALCONST XMVECTORU32 g_XMMask3 = { { { 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0x00000000 } } }; + XMGLOBALCONST XMVECTORU32 g_XMMaskX = { { { 0xFFFFFFFF, 0x00000000, 0x00000000, 0x00000000 } } }; + XMGLOBALCONST XMVECTORU32 g_XMMaskY = { { { 0x00000000, 0xFFFFFFFF, 0x00000000, 0x00000000 } } }; + XMGLOBALCONST XMVECTORU32 g_XMMaskZ = { { { 0x00000000, 0x00000000, 0xFFFFFFFF, 0x00000000 } } }; + XMGLOBALCONST XMVECTORU32 g_XMMaskW = { { { 0x00000000, 0x00000000, 0x00000000, 0xFFFFFFFF } } }; + XMGLOBALCONST XMVECTORF32 g_XMOne = { { { 1.0f, 1.0f, 1.0f, 1.0f } } }; + XMGLOBALCONST XMVECTORF32 g_XMOne3 = { { { 1.0f, 1.0f, 1.0f, 0.0f } } }; + XMGLOBALCONST XMVECTORF32 g_XMZero = { { { 0.0f, 0.0f, 0.0f, 0.0f } } }; + XMGLOBALCONST XMVECTORF32 g_XMTwo = { { { 2.f, 2.f, 2.f, 2.f } } }; + XMGLOBALCONST XMVECTORF32 g_XMFour = { { { 4.f, 4.f, 4.f, 4.f } } }; + XMGLOBALCONST XMVECTORF32 g_XMSix = { { { 6.f, 6.f, 6.f, 6.f } } }; + XMGLOBALCONST XMVECTORF32 g_XMNegativeOne = { { { -1.0f, -1.0f, -1.0f, -1.0f } } }; + XMGLOBALCONST XMVECTORF32 g_XMOneHalf = { { { 0.5f, 0.5f, 0.5f, 0.5f } } }; + XMGLOBALCONST XMVECTORF32 g_XMNegativeOneHalf = { { { -0.5f, -0.5f, -0.5f, -0.5f } } }; + XMGLOBALCONST XMVECTORF32 g_XMNegativeTwoPi = { { { -XM_2PI, -XM_2PI, -XM_2PI, -XM_2PI } } }; + XMGLOBALCONST XMVECTORF32 g_XMNegativePi = { { { -XM_PI, -XM_PI, -XM_PI, -XM_PI } } }; + XMGLOBALCONST XMVECTORF32 g_XMHalfPi = { { { XM_PIDIV2, XM_PIDIV2, XM_PIDIV2, XM_PIDIV2 } } }; + XMGLOBALCONST XMVECTORF32 g_XMPi = { { { XM_PI, XM_PI, XM_PI, XM_PI } } }; + XMGLOBALCONST XMVECTORF32 g_XMReciprocalPi = { { { XM_1DIVPI, XM_1DIVPI, XM_1DIVPI, XM_1DIVPI } } }; + XMGLOBALCONST XMVECTORF32 g_XMTwoPi = { { { XM_2PI, XM_2PI, XM_2PI, XM_2PI } } }; + XMGLOBALCONST XMVECTORF32 g_XMReciprocalTwoPi = { { { XM_1DIV2PI, XM_1DIV2PI, XM_1DIV2PI, XM_1DIV2PI } } }; + XMGLOBALCONST XMVECTORF32 g_XMEpsilon = { { { 1.192092896e-7f, 1.192092896e-7f, 1.192092896e-7f, 1.192092896e-7f } } }; + XMGLOBALCONST XMVECTORI32 g_XMInfinity = { { { 0x7F800000, 0x7F800000, 0x7F800000, 0x7F800000 } } }; + XMGLOBALCONST XMVECTORI32 g_XMQNaN = { { { 0x7FC00000, 0x7FC00000, 0x7FC00000, 0x7FC00000 } } }; + XMGLOBALCONST XMVECTORI32 g_XMQNaNTest = { { { 0x007FFFFF, 0x007FFFFF, 0x007FFFFF, 0x007FFFFF } } }; + XMGLOBALCONST XMVECTORI32 g_XMAbsMask = { { { 0x7FFFFFFF, 0x7FFFFFFF, 0x7FFFFFFF, 0x7FFFFFFF } } }; + XMGLOBALCONST XMVECTORI32 g_XMFltMin = { { { 0x00800000, 0x00800000, 0x00800000, 0x00800000 } } }; + XMGLOBALCONST XMVECTORI32 g_XMFltMax = { { { 0x7F7FFFFF, 0x7F7FFFFF, 0x7F7FFFFF, 0x7F7FFFFF } } }; + XMGLOBALCONST XMVECTORU32 g_XMNegOneMask = { { { 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF } } }; + XMGLOBALCONST XMVECTORU32 g_XMMaskA8R8G8B8 = { { { 0x00FF0000, 0x0000FF00, 0x000000FF, 0xFF000000 } } }; + XMGLOBALCONST XMVECTORU32 g_XMFlipA8R8G8B8 = { { { 0x00000000, 0x00000000, 0x00000000, 0x80000000 } } }; + XMGLOBALCONST XMVECTORF32 g_XMFixAA8R8G8B8 = { { { 0.0f, 0.0f, 0.0f, float(0x80000000U) } } }; + XMGLOBALCONST XMVECTORF32 g_XMNormalizeA8R8G8B8 = { { { 1.0f / (255.0f * float(0x10000)), 1.0f / (255.0f * float(0x100)), 1.0f / 255.0f, 1.0f / (255.0f * float(0x1000000)) } } }; + XMGLOBALCONST XMVECTORU32 g_XMMaskA2B10G10R10 = { { { 0x000003FF, 0x000FFC00, 0x3FF00000, 0xC0000000 } } }; + XMGLOBALCONST XMVECTORU32 g_XMFlipA2B10G10R10 = { { { 0x00000200, 0x00080000, 0x20000000, 0x80000000 } } }; + XMGLOBALCONST XMVECTORF32 g_XMFixAA2B10G10R10 = { { { -512.0f, -512.0f * float(0x400), -512.0f * float(0x100000), float(0x80000000U) } } }; + XMGLOBALCONST XMVECTORF32 g_XMNormalizeA2B10G10R10 = { { { 1.0f / 511.0f, 1.0f / (511.0f * float(0x400)), 1.0f / (511.0f * float(0x100000)), 1.0f / (3.0f * float(0x40000000)) } } }; + XMGLOBALCONST XMVECTORU32 g_XMMaskX16Y16 = { { { 0x0000FFFF, 0xFFFF0000, 0x00000000, 0x00000000 } } }; + XMGLOBALCONST XMVECTORI32 g_XMFlipX16Y16 = { { { 0x00008000, 0x00000000, 0x00000000, 0x00000000 } } }; + XMGLOBALCONST XMVECTORF32 g_XMFixX16Y16 = { { { -32768.0f, 0.0f, 0.0f, 0.0f } } }; + XMGLOBALCONST XMVECTORF32 g_XMNormalizeX16Y16 = { { { 1.0f / 32767.0f, 1.0f / (32767.0f * 65536.0f), 0.0f, 0.0f } } }; + XMGLOBALCONST XMVECTORU32 g_XMMaskX16Y16Z16W16 = { { { 0x0000FFFF, 0x0000FFFF, 0xFFFF0000, 0xFFFF0000 } } }; + XMGLOBALCONST XMVECTORI32 g_XMFlipX16Y16Z16W16 = { { { 0x00008000, 0x00008000, 0x00000000, 0x00000000 } } }; + XMGLOBALCONST XMVECTORF32 g_XMFixX16Y16Z16W16 = { { { -32768.0f, -32768.0f, 0.0f, 0.0f } } }; + XMGLOBALCONST XMVECTORF32 g_XMNormalizeX16Y16Z16W16 = { { { 1.0f / 32767.0f, 1.0f / 32767.0f, 1.0f / (32767.0f * 65536.0f), 1.0f / (32767.0f * 65536.0f) } } }; + XMGLOBALCONST XMVECTORF32 g_XMNoFraction = { { { 8388608.0f, 8388608.0f, 8388608.0f, 8388608.0f } } }; + XMGLOBALCONST XMVECTORI32 g_XMMaskByte = { { { 0x000000FF, 0x000000FF, 0x000000FF, 0x000000FF } } }; + XMGLOBALCONST XMVECTORF32 g_XMNegateX = { { { -1.0f, 1.0f, 1.0f, 1.0f } } }; + XMGLOBALCONST XMVECTORF32 g_XMNegateY = { { { 1.0f, -1.0f, 1.0f, 1.0f } } }; + XMGLOBALCONST XMVECTORF32 g_XMNegateZ = { { { 1.0f, 1.0f, -1.0f, 1.0f } } }; + XMGLOBALCONST XMVECTORF32 g_XMNegateW = { { { 1.0f, 1.0f, 1.0f, -1.0f } } }; + XMGLOBALCONST XMVECTORU32 g_XMSelect0101 = { { { XM_SELECT_0, XM_SELECT_1, XM_SELECT_0, XM_SELECT_1 } } }; + XMGLOBALCONST XMVECTORU32 g_XMSelect1010 = { { { XM_SELECT_1, XM_SELECT_0, XM_SELECT_1, XM_SELECT_0 } } }; + XMGLOBALCONST XMVECTORI32 g_XMOneHalfMinusEpsilon = { { { 0x3EFFFFFD, 0x3EFFFFFD, 0x3EFFFFFD, 0x3EFFFFFD } } }; + XMGLOBALCONST XMVECTORU32 g_XMSelect1000 = { { { XM_SELECT_1, XM_SELECT_0, XM_SELECT_0, XM_SELECT_0 } } }; + XMGLOBALCONST XMVECTORU32 g_XMSelect1100 = { { { XM_SELECT_1, XM_SELECT_1, XM_SELECT_0, XM_SELECT_0 } } }; + XMGLOBALCONST XMVECTORU32 g_XMSelect1110 = { { { XM_SELECT_1, XM_SELECT_1, XM_SELECT_1, XM_SELECT_0 } } }; + XMGLOBALCONST XMVECTORU32 g_XMSelect1011 = { { { XM_SELECT_1, XM_SELECT_0, XM_SELECT_1, XM_SELECT_1 } } }; + XMGLOBALCONST XMVECTORF32 g_XMFixupY16 = { { { 1.0f, 1.0f / 65536.0f, 0.0f, 0.0f } } }; + XMGLOBALCONST XMVECTORF32 g_XMFixupY16W16 = { { { 1.0f, 1.0f, 1.0f / 65536.0f, 1.0f / 65536.0f } } }; + XMGLOBALCONST XMVECTORU32 g_XMFlipY = { { { 0, 0x80000000, 0, 0 } } }; + XMGLOBALCONST XMVECTORU32 g_XMFlipZ = { { { 0, 0, 0x80000000, 0 } } }; + XMGLOBALCONST XMVECTORU32 g_XMFlipW = { { { 0, 0, 0, 0x80000000 } } }; + XMGLOBALCONST XMVECTORU32 g_XMFlipYZ = { { { 0, 0x80000000, 0x80000000, 0 } } }; + XMGLOBALCONST XMVECTORU32 g_XMFlipZW = { { { 0, 0, 0x80000000, 0x80000000 } } }; + XMGLOBALCONST XMVECTORU32 g_XMFlipYW = { { { 0, 0x80000000, 0, 0x80000000 } } }; + XMGLOBALCONST XMVECTORI32 g_XMMaskDec4 = { { { 0x3FF, 0x3FF << 10, 0x3FF << 20, static_cast(0xC0000000) } } }; + XMGLOBALCONST XMVECTORI32 g_XMXorDec4 = { { { 0x200, 0x200 << 10, 0x200 << 20, 0 } } }; + XMGLOBALCONST XMVECTORF32 g_XMAddUDec4 = { { { 0, 0, 0, 32768.0f * 65536.0f } } }; + XMGLOBALCONST XMVECTORF32 g_XMAddDec4 = { { { -512.0f, -512.0f * 1024.0f, -512.0f * 1024.0f * 1024.0f, 0 } } }; + XMGLOBALCONST XMVECTORF32 g_XMMulDec4 = { { { 1.0f, 1.0f / 1024.0f, 1.0f / (1024.0f * 1024.0f), 1.0f / (1024.0f * 1024.0f * 1024.0f) } } }; + XMGLOBALCONST XMVECTORU32 g_XMMaskByte4 = { { { 0xFF, 0xFF00, 0xFF0000, 0xFF000000 } } }; + XMGLOBALCONST XMVECTORI32 g_XMXorByte4 = { { { 0x80, 0x8000, 0x800000, 0x00000000 } } }; + XMGLOBALCONST XMVECTORF32 g_XMAddByte4 = { { { -128.0f, -128.0f * 256.0f, -128.0f * 65536.0f, 0 } } }; + XMGLOBALCONST XMVECTORF32 g_XMFixUnsigned = { { { 32768.0f * 65536.0f, 32768.0f * 65536.0f, 32768.0f * 65536.0f, 32768.0f * 65536.0f } } }; + XMGLOBALCONST XMVECTORF32 g_XMMaxInt = { { { 65536.0f * 32768.0f - 128.0f, 65536.0f * 32768.0f - 128.0f, 65536.0f * 32768.0f - 128.0f, 65536.0f * 32768.0f - 128.0f } } }; + XMGLOBALCONST XMVECTORF32 g_XMMaxUInt = { { { 65536.0f * 65536.0f - 256.0f, 65536.0f * 65536.0f - 256.0f, 65536.0f * 65536.0f - 256.0f, 65536.0f * 65536.0f - 256.0f } } }; + XMGLOBALCONST XMVECTORF32 g_XMUnsignedFix = { { { 32768.0f * 65536.0f, 32768.0f * 65536.0f, 32768.0f * 65536.0f, 32768.0f * 65536.0f } } }; + XMGLOBALCONST XMVECTORF32 g_XMsrgbScale = { { { 12.92f, 12.92f, 12.92f, 1.0f } } }; + XMGLOBALCONST XMVECTORF32 g_XMsrgbA = { { { 0.055f, 0.055f, 0.055f, 0.0f } } }; + XMGLOBALCONST XMVECTORF32 g_XMsrgbA1 = { { { 1.055f, 1.055f, 1.055f, 1.0f } } }; + XMGLOBALCONST XMVECTORI32 g_XMExponentBias = { { { 127, 127, 127, 127 } } }; + XMGLOBALCONST XMVECTORI32 g_XMSubnormalExponent = { { { -126, -126, -126, -126 } } }; + XMGLOBALCONST XMVECTORI32 g_XMNumTrailing = { { { 23, 23, 23, 23 } } }; + XMGLOBALCONST XMVECTORI32 g_XMMinNormal = { { { 0x00800000, 0x00800000, 0x00800000, 0x00800000 } } }; + XMGLOBALCONST XMVECTORU32 g_XMNegInfinity = { { { 0xFF800000, 0xFF800000, 0xFF800000, 0xFF800000 } } }; + XMGLOBALCONST XMVECTORU32 g_XMNegQNaN = { { { 0xFFC00000, 0xFFC00000, 0xFFC00000, 0xFFC00000 } } }; + XMGLOBALCONST XMVECTORI32 g_XMBin128 = { { { 0x43000000, 0x43000000, 0x43000000, 0x43000000 } } }; + XMGLOBALCONST XMVECTORU32 g_XMBinNeg150 = { { { 0xC3160000, 0xC3160000, 0xC3160000, 0xC3160000 } } }; + XMGLOBALCONST XMVECTORI32 g_XM253 = { { { 253, 253, 253, 253 } } }; + XMGLOBALCONST XMVECTORF32 g_XMExpEst1 = { { { -6.93147182e-1f, -6.93147182e-1f, -6.93147182e-1f, -6.93147182e-1f } } }; + XMGLOBALCONST XMVECTORF32 g_XMExpEst2 = { { { +2.40226462e-1f, +2.40226462e-1f, +2.40226462e-1f, +2.40226462e-1f } } }; + XMGLOBALCONST XMVECTORF32 g_XMExpEst3 = { { { -5.55036440e-2f, -5.55036440e-2f, -5.55036440e-2f, -5.55036440e-2f } } }; + XMGLOBALCONST XMVECTORF32 g_XMExpEst4 = { { { +9.61597636e-3f, +9.61597636e-3f, +9.61597636e-3f, +9.61597636e-3f } } }; + XMGLOBALCONST XMVECTORF32 g_XMExpEst5 = { { { -1.32823968e-3f, -1.32823968e-3f, -1.32823968e-3f, -1.32823968e-3f } } }; + XMGLOBALCONST XMVECTORF32 g_XMExpEst6 = { { { +1.47491097e-4f, +1.47491097e-4f, +1.47491097e-4f, +1.47491097e-4f } } }; + XMGLOBALCONST XMVECTORF32 g_XMExpEst7 = { { { -1.08635004e-5f, -1.08635004e-5f, -1.08635004e-5f, -1.08635004e-5f } } }; + XMGLOBALCONST XMVECTORF32 g_XMLogEst0 = { { { +1.442693f, +1.442693f, +1.442693f, +1.442693f } } }; + XMGLOBALCONST XMVECTORF32 g_XMLogEst1 = { { { -0.721242f, -0.721242f, -0.721242f, -0.721242f } } }; + XMGLOBALCONST XMVECTORF32 g_XMLogEst2 = { { { +0.479384f, +0.479384f, +0.479384f, +0.479384f } } }; + XMGLOBALCONST XMVECTORF32 g_XMLogEst3 = { { { -0.350295f, -0.350295f, -0.350295f, -0.350295f } } }; + XMGLOBALCONST XMVECTORF32 g_XMLogEst4 = { { { +0.248590f, +0.248590f, +0.248590f, +0.248590f } } }; + XMGLOBALCONST XMVECTORF32 g_XMLogEst5 = { { { -0.145700f, -0.145700f, -0.145700f, -0.145700f } } }; + XMGLOBALCONST XMVECTORF32 g_XMLogEst6 = { { { +0.057148f, +0.057148f, +0.057148f, +0.057148f } } }; + XMGLOBALCONST XMVECTORF32 g_XMLogEst7 = { { { -0.010578f, -0.010578f, -0.010578f, -0.010578f } } }; + XMGLOBALCONST XMVECTORF32 g_XMLgE = { { { +1.442695f, +1.442695f, +1.442695f, +1.442695f } } }; + XMGLOBALCONST XMVECTORF32 g_XMInvLgE = { { { +6.93147182e-1f, +6.93147182e-1f, +6.93147182e-1f, +6.93147182e-1f } } }; + XMGLOBALCONST XMVECTORF32 g_UByteMax = { { { 255.0f, 255.0f, 255.0f, 255.0f } } }; + XMGLOBALCONST XMVECTORF32 g_ByteMin = { { { -127.0f, -127.0f, -127.0f, -127.0f } } }; + XMGLOBALCONST XMVECTORF32 g_ByteMax = { { { 127.0f, 127.0f, 127.0f, 127.0f } } }; + XMGLOBALCONST XMVECTORF32 g_ShortMin = { { { -32767.0f, -32767.0f, -32767.0f, -32767.0f } } }; + XMGLOBALCONST XMVECTORF32 g_ShortMax = { { { 32767.0f, 32767.0f, 32767.0f, 32767.0f } } }; + XMGLOBALCONST XMVECTORF32 g_UShortMax = { { { 65535.0f, 65535.0f, 65535.0f, 65535.0f } } }; + + /**************************************************************************** + * + * Implementation + * + ****************************************************************************/ + +#pragma warning(push) +#pragma warning(disable:4068 4214 4204 4365 4616 4640 6001 6101) + // C4068/4616: ignore unknown pragmas + // C4214/4204: nonstandard extension used + // C4365/4640: Off by default noise + // C6001/6101: False positives + +#ifdef _PREFAST_ +#pragma prefast(push) +#pragma prefast(disable : 25000, "FXMVECTOR is 16 bytes") +#pragma prefast(disable : 26495, "Union initialization confuses /analyze") +#endif + +#ifdef __clang__ +#pragma clang diagnostic push +#pragma clang diagnostic ignored "-Wundefined-reinterpret-cast" +#endif + +//------------------------------------------------------------------------------ + + inline XMVECTOR XM_CALLCONV XMVectorSetBinaryConstant(uint32_t C0, uint32_t C1, uint32_t C2, uint32_t C3) noexcept + { +#if defined(_XM_NO_INTRINSICS_) + XMVECTORU32 vResult; + vResult.u[0] = (0 - (C0 & 1)) & 0x3F800000; + vResult.u[1] = (0 - (C1 & 1)) & 0x3F800000; + vResult.u[2] = (0 - (C2 & 1)) & 0x3F800000; + vResult.u[3] = (0 - (C3 & 1)) & 0x3F800000; + return vResult.v; +#elif defined(_XM_ARM_NEON_INTRINSICS_) + XMVECTORU32 vResult; + vResult.u[0] = (0 - (C0 & 1)) & 0x3F800000; + vResult.u[1] = (0 - (C1 & 1)) & 0x3F800000; + vResult.u[2] = (0 - (C2 & 1)) & 0x3F800000; + vResult.u[3] = (0 - (C3 & 1)) & 0x3F800000; + return vResult.v; +#else // XM_SSE_INTRINSICS_ + static const XMVECTORU32 g_vMask1 = { { { 1, 1, 1, 1 } } }; + // Move the parms to a vector + __m128i vTemp = _mm_set_epi32(static_cast(C3), static_cast(C2), static_cast(C1), static_cast(C0)); + // Mask off the low bits + vTemp = _mm_and_si128(vTemp, g_vMask1); + // 0xFFFFFFFF on true bits + vTemp = _mm_cmpeq_epi32(vTemp, g_vMask1); + // 0xFFFFFFFF -> 1.0f, 0x00000000 -> 0.0f + vTemp = _mm_and_si128(vTemp, g_XMOne); + return _mm_castsi128_ps(vTemp); +#endif + } + + //------------------------------------------------------------------------------ + + inline XMVECTOR XM_CALLCONV XMVectorSplatConstant(int32_t IntConstant, uint32_t DivExponent) noexcept + { + assert(IntConstant >= -16 && IntConstant <= 15); + assert(DivExponent < 32); +#if defined(_XM_NO_INTRINSICS_) + + using DirectX::XMConvertVectorIntToFloat; + + XMVECTORI32 V = { { { IntConstant, IntConstant, IntConstant, IntConstant } } }; + return XMConvertVectorIntToFloat(V.v, DivExponent); + +#elif defined(_XM_ARM_NEON_INTRINSICS_) + // Splat the int + int32x4_t vScale = vdupq_n_s32(IntConstant); + // Convert to a float + XMVECTOR vResult = vcvtq_f32_s32(vScale); + // Convert DivExponent into 1.0f/(1<(&vScale)[0]); + return vResult; +#else // XM_SSE_INTRINSICS_ + // Splat the int + __m128i vScale = _mm_set1_epi32(IntConstant); + // Convert to a float + XMVECTOR vResult = _mm_cvtepi32_ps(vScale); + // Convert DivExponent into 1.0f/(1<(uScale)); + // Multiply by the reciprocal (Perform a right shift by DivExponent) + vResult = _mm_mul_ps(vResult, _mm_castsi128_ps(vScale)); + return vResult; +#endif + } + + //------------------------------------------------------------------------------ + + inline XMVECTOR XM_CALLCONV XMVectorSplatConstantInt(int32_t IntConstant) noexcept + { + assert(IntConstant >= -16 && IntConstant <= 15); +#if defined(_XM_NO_INTRINSICS_) + + XMVECTORI32 V = { { { IntConstant, IntConstant, IntConstant, IntConstant } } }; + return V.v; + +#elif defined(_XM_ARM_NEON_INTRINSICS_) + int32x4_t V = vdupq_n_s32(IntConstant); + return reinterpret_cast(&V)[0]; +#else // XM_SSE_INTRINSICS_ + __m128i V = _mm_set1_epi32(IntConstant); + return _mm_castsi128_ps(V); +#endif + } + +#include "directxmath/directxmathconvert.inl" +#include "directxmath/directxmathvector.inl" +#include "directxmath/directxmathmatrix.inl" +#include "directxmath/directxmathmisc.inl" + +#ifdef __clang__ +#pragma clang diagnostic pop +#endif + +#ifdef _PREFAST_ +#pragma prefast(pop) +#endif + +#pragma warning(pop) + +} // namespace DirectX + +using namespace DirectX; + +#pragma GCC diagnostic pop diff --git a/include/directxmath/directxmathconvert.inl b/include/directxmath/directxmathconvert.inl new file mode 100644 index 0000000..dbf153c --- /dev/null +++ b/include/directxmath/directxmathconvert.inl @@ -0,0 +1,2187 @@ +//------------------------------------------------------------------------------------- +// DirectXMathConvert.inl -- SIMD C++ Math library +// +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. +// +// http://go.microsoft.com/fwlink/?LinkID=615560 +//------------------------------------------------------------------------------------- + +#pragma once + +/**************************************************************************** + * + * Data conversion + * + ****************************************************************************/ + + //------------------------------------------------------------------------------ + +#pragma warning(push) +#pragma warning(disable:4701) +// C4701: false positives + +inline XMVECTOR XM_CALLCONV XMConvertVectorIntToFloat +( + FXMVECTOR VInt, + uint32_t DivExponent +) noexcept +{ + assert(DivExponent < 32); +#if defined(_XM_NO_INTRINSICS_) + float fScale = 1.0f / static_cast(1U << DivExponent); + uint32_t ElementIndex = 0; + XMVECTOR Result; + do { + auto iTemp = static_cast(VInt.vector4_u32[ElementIndex]); + Result.vector4_f32[ElementIndex] = static_cast(iTemp)* fScale; + } while (++ElementIndex < 4); + return Result; +#elif defined(_XM_ARM_NEON_INTRINSICS_) + float fScale = 1.0f / (float)(1U << DivExponent); + float32x4_t vResult = vcvtq_f32_s32(VInt); + return vmulq_n_f32(vResult, fScale); +#else // _XM_SSE_INTRINSICS_ + // Convert to floats + XMVECTOR vResult = _mm_cvtepi32_ps(_mm_castps_si128(VInt)); + // Convert DivExponent into 1.0f/(1<(uScale)); + vResult = _mm_mul_ps(vResult, _mm_castsi128_ps(vScale)); + return vResult; +#endif +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMConvertVectorFloatToInt +( + FXMVECTOR VFloat, + uint32_t MulExponent +) noexcept +{ + assert(MulExponent < 32); +#if defined(_XM_NO_INTRINSICS_) + // Get the scalar factor. + auto fScale = static_cast(1U << MulExponent); + uint32_t ElementIndex = 0; + XMVECTOR Result; + do { + int32_t iResult; + float fTemp = VFloat.vector4_f32[ElementIndex] * fScale; + if (fTemp <= -(65536.0f * 32768.0f)) + { + iResult = (-0x7FFFFFFF) - 1; + } + else if (fTemp > (65536.0f * 32768.0f) - 128.0f) + { + iResult = 0x7FFFFFFF; + } + else { + iResult = static_cast(fTemp); + } + Result.vector4_u32[ElementIndex] = static_cast(iResult); + } while (++ElementIndex < 4); + return Result; +#elif defined(_XM_ARM_NEON_INTRINSICS_) + float32x4_t vResult = vmulq_n_f32(VFloat, (float)(1U << MulExponent)); + // In case of positive overflow, detect it + uint32x4_t vOverflow = vcgtq_f32(vResult, g_XMMaxInt); + // Float to int conversion + int32x4_t vResulti = vcvtq_s32_f32(vResult); + // If there was positive overflow, set to 0x7FFFFFFF + vResult = vandq_u32(vOverflow, g_XMAbsMask); + vOverflow = vbicq_u32(vResulti, vOverflow); + vOverflow = vorrq_u32(vOverflow, vResult); + return vOverflow; +#else // _XM_SSE_INTRINSICS_ + XMVECTOR vResult = _mm_set_ps1(static_cast(1U << MulExponent)); + vResult = _mm_mul_ps(vResult, VFloat); + // In case of positive overflow, detect it + XMVECTOR vOverflow = _mm_cmpgt_ps(vResult, g_XMMaxInt); + // Float to int conversion + __m128i vResulti = _mm_cvttps_epi32(vResult); + // If there was positive overflow, set to 0x7FFFFFFF + vResult = _mm_and_ps(vOverflow, g_XMAbsMask); + vOverflow = _mm_andnot_ps(vOverflow, _mm_castsi128_ps(vResulti)); + vOverflow = _mm_or_ps(vOverflow, vResult); + return vOverflow; +#endif +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMConvertVectorUIntToFloat +( + FXMVECTOR VUInt, + uint32_t DivExponent +) noexcept +{ + assert(DivExponent < 32); +#if defined(_XM_NO_INTRINSICS_) + float fScale = 1.0f / static_cast(1U << DivExponent); + uint32_t ElementIndex = 0; + XMVECTOR Result; + do { + Result.vector4_f32[ElementIndex] = static_cast(VUInt.vector4_u32[ElementIndex])* fScale; + } while (++ElementIndex < 4); + return Result; +#elif defined(_XM_ARM_NEON_INTRINSICS_) + float fScale = 1.0f / (float)(1U << DivExponent); + float32x4_t vResult = vcvtq_f32_u32(VUInt); + return vmulq_n_f32(vResult, fScale); +#else // _XM_SSE_INTRINSICS_ + // For the values that are higher than 0x7FFFFFFF, a fixup is needed + // Determine which ones need the fix. + XMVECTOR vMask = _mm_and_ps(VUInt, g_XMNegativeZero); + // Force all values positive + XMVECTOR vResult = _mm_xor_ps(VUInt, vMask); + // Convert to floats + vResult = _mm_cvtepi32_ps(_mm_castps_si128(vResult)); + // Convert 0x80000000 -> 0xFFFFFFFF + __m128i iMask = _mm_srai_epi32(_mm_castps_si128(vMask), 31); + // For only the ones that are too big, add the fixup + vMask = _mm_and_ps(_mm_castsi128_ps(iMask), g_XMFixUnsigned); + vResult = _mm_add_ps(vResult, vMask); + // Convert DivExponent into 1.0f/(1<(uScale)); + vResult = _mm_mul_ps(vResult, _mm_castsi128_ps(iMask)); + return vResult; +#endif +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMConvertVectorFloatToUInt +( + FXMVECTOR VFloat, + uint32_t MulExponent +) noexcept +{ + assert(MulExponent < 32); +#if defined(_XM_NO_INTRINSICS_) + // Get the scalar factor. + auto fScale = static_cast(1U << MulExponent); + uint32_t ElementIndex = 0; + XMVECTOR Result; + do { + uint32_t uResult; + float fTemp = VFloat.vector4_f32[ElementIndex] * fScale; + if (fTemp <= 0.0f) + { + uResult = 0; + } + else if (fTemp >= (65536.0f * 65536.0f)) + { + uResult = 0xFFFFFFFFU; + } + else { + uResult = static_cast(fTemp); + } + Result.vector4_u32[ElementIndex] = uResult; + } while (++ElementIndex < 4); + return Result; +#elif defined(_XM_ARM_NEON_INTRINSICS_) + float32x4_t vResult = vmulq_n_f32(VFloat, (float)(1U << MulExponent)); + // In case of overflow, detect it + uint32x4_t vOverflow = vcgtq_f32(vResult, g_XMMaxUInt); + // Float to int conversion + uint32x4_t vResulti = vcvtq_u32_f32(vResult); + // If there was overflow, set to 0xFFFFFFFFU + vResult = vbicq_u32(vResulti, vOverflow); + vOverflow = vorrq_u32(vOverflow, vResult); + return vOverflow; +#else // _XM_SSE_INTRINSICS_ + XMVECTOR vResult = _mm_set_ps1(static_cast(1U << MulExponent)); + vResult = _mm_mul_ps(vResult, VFloat); + // Clamp to >=0 + vResult = _mm_max_ps(vResult, g_XMZero); + // Any numbers that are too big, set to 0xFFFFFFFFU + XMVECTOR vOverflow = _mm_cmpgt_ps(vResult, g_XMMaxUInt); + XMVECTOR vValue = g_XMUnsignedFix; + // Too large for a signed integer? + XMVECTOR vMask = _mm_cmpge_ps(vResult, vValue); + // Zero for number's lower than 0x80000000, 32768.0f*65536.0f otherwise + vValue = _mm_and_ps(vValue, vMask); + // Perform fixup only on numbers too large (Keeps low bit precision) + vResult = _mm_sub_ps(vResult, vValue); + __m128i vResulti = _mm_cvttps_epi32(vResult); + // Convert from signed to unsigned pnly if greater than 0x80000000 + vMask = _mm_and_ps(vMask, g_XMNegativeZero); + vResult = _mm_xor_ps(_mm_castsi128_ps(vResulti), vMask); + // On those that are too large, set to 0xFFFFFFFF + vResult = _mm_or_ps(vResult, vOverflow); + return vResult; +#endif +} + +#pragma warning(pop) + +/**************************************************************************** + * + * Vector and matrix load operations + * + ****************************************************************************/ + + //------------------------------------------------------------------------------ +_Use_decl_annotations_ +inline XMVECTOR XM_CALLCONV XMLoadInt(const uint32_t* pSource) noexcept +{ + assert(pSource); +#if defined(_XM_NO_INTRINSICS_) + XMVECTOR V; + V.vector4_u32[0] = *pSource; + V.vector4_u32[1] = 0; + V.vector4_u32[2] = 0; + V.vector4_u32[3] = 0; + return V; +#elif defined(_XM_ARM_NEON_INTRINSICS_) + uint32x4_t zero = vdupq_n_u32(0); + return vld1q_lane_u32(pSource, zero, 0); +#elif defined(_XM_SSE_INTRINSICS_) + return _mm_load_ss(reinterpret_cast(pSource)); +#endif +} + +//------------------------------------------------------------------------------ +_Use_decl_annotations_ +inline XMVECTOR XM_CALLCONV XMLoadFloat(const float* pSource) noexcept +{ + assert(pSource); +#if defined(_XM_NO_INTRINSICS_) + XMVECTOR V; + V.vector4_f32[0] = *pSource; + V.vector4_f32[1] = 0.f; + V.vector4_f32[2] = 0.f; + V.vector4_f32[3] = 0.f; + return V; +#elif defined(_XM_ARM_NEON_INTRINSICS_) + float32x4_t zero = vdupq_n_f32(0); + return vld1q_lane_f32(pSource, zero, 0); +#elif defined(_XM_SSE_INTRINSICS_) + return _mm_load_ss(pSource); +#endif +} + +//------------------------------------------------------------------------------ +_Use_decl_annotations_ +inline XMVECTOR XM_CALLCONV XMLoadInt2(const uint32_t* pSource) noexcept +{ + assert(pSource); +#if defined(_XM_NO_INTRINSICS_) + XMVECTOR V; + V.vector4_u32[0] = pSource[0]; + V.vector4_u32[1] = pSource[1]; + V.vector4_u32[2] = 0; + V.vector4_u32[3] = 0; + return V; +#elif defined(_XM_ARM_NEON_INTRINSICS_) + uint32x2_t x = vld1_u32(pSource); + uint32x2_t zero = vdup_n_u32(0); + return vcombine_u32(x, zero); +#elif defined(_XM_SSE_INTRINSICS_) + return _mm_castpd_ps(_mm_load_sd(reinterpret_cast(pSource))); +#endif +} + +//------------------------------------------------------------------------------ +_Use_decl_annotations_ +inline XMVECTOR XM_CALLCONV XMLoadInt2A(const uint32_t* pSource) noexcept +{ + assert(pSource); + assert((reinterpret_cast(pSource) & 0xF) == 0); +#if defined(_XM_NO_INTRINSICS_) + XMVECTOR V; + V.vector4_u32[0] = pSource[0]; + V.vector4_u32[1] = pSource[1]; + V.vector4_u32[2] = 0; + V.vector4_u32[3] = 0; + return V; +#elif defined(_XM_ARM_NEON_INTRINSICS_) +#ifdef _MSC_VER + uint32x2_t x = vld1_u32_ex(pSource, 64); +#else + uint32x2_t x = vld1_u32(pSource); +#endif + uint32x2_t zero = vdup_n_u32(0); + return vcombine_u32(x, zero); +#elif defined(_XM_SSE_INTRINSICS_) + return _mm_castpd_ps(_mm_load_sd(reinterpret_cast(pSource))); +#endif +} + +//------------------------------------------------------------------------------ +_Use_decl_annotations_ +inline XMVECTOR XM_CALLCONV XMLoadFloat2(const XMFLOAT2* pSource) noexcept +{ + assert(pSource); +#if defined(_XM_NO_INTRINSICS_) + XMVECTOR V; + V.vector4_f32[0] = pSource->x; + V.vector4_f32[1] = pSource->y; + V.vector4_f32[2] = 0.f; + V.vector4_f32[3] = 0.f; + return V; +#elif defined(_XM_ARM_NEON_INTRINSICS_) + float32x2_t x = vld1_f32(reinterpret_cast(pSource)); + float32x2_t zero = vdup_n_f32(0); + return vcombine_f32(x, zero); +#elif defined(_XM_SSE_INTRINSICS_) + return _mm_castpd_ps(_mm_load_sd(reinterpret_cast(pSource))); +#endif +} + +//------------------------------------------------------------------------------ +_Use_decl_annotations_ +inline XMVECTOR XM_CALLCONV XMLoadFloat2A(const XMFLOAT2A* pSource) noexcept +{ + assert(pSource); + assert((reinterpret_cast(pSource) & 0xF) == 0); +#if defined(_XM_NO_INTRINSICS_) + XMVECTOR V; + V.vector4_f32[0] = pSource->x; + V.vector4_f32[1] = pSource->y; + V.vector4_f32[2] = 0.f; + V.vector4_f32[3] = 0.f; + return V; +#elif defined(_XM_ARM_NEON_INTRINSICS_) +#ifdef _MSC_VER + float32x2_t x = vld1_f32_ex(reinterpret_cast(pSource), 64); +#else + float32x2_t x = vld1_f32(reinterpret_cast(pSource)); +#endif + float32x2_t zero = vdup_n_f32(0); + return vcombine_f32(x, zero); +#elif defined(_XM_SSE_INTRINSICS_) + return _mm_castpd_ps(_mm_load_sd(reinterpret_cast(pSource))); +#endif +} + +//------------------------------------------------------------------------------ +_Use_decl_annotations_ +inline XMVECTOR XM_CALLCONV XMLoadSInt2(const XMINT2* pSource) noexcept +{ + assert(pSource); +#if defined(_XM_NO_INTRINSICS_) + XMVECTOR V; + V.vector4_f32[0] = static_cast(pSource->x); + V.vector4_f32[1] = static_cast(pSource->y); + V.vector4_f32[2] = 0.f; + V.vector4_f32[3] = 0.f; + return V; +#elif defined(_XM_ARM_NEON_INTRINSICS_) + int32x2_t x = vld1_s32(reinterpret_cast(pSource)); + float32x2_t v = vcvt_f32_s32(x); + float32x2_t zero = vdup_n_f32(0); + return vcombine_f32(v, zero); +#elif defined(_XM_SSE_INTRINSICS_) + __m128 V = _mm_castpd_ps(_mm_load_sd(reinterpret_cast(pSource))); + return _mm_cvtepi32_ps(_mm_castps_si128(V)); +#endif +} + +//------------------------------------------------------------------------------ +_Use_decl_annotations_ +inline XMVECTOR XM_CALLCONV XMLoadUInt2(const XMUINT2* pSource) noexcept +{ + assert(pSource); +#if defined(_XM_NO_INTRINSICS_) + XMVECTOR V; + V.vector4_f32[0] = static_cast(pSource->x); + V.vector4_f32[1] = static_cast(pSource->y); + V.vector4_f32[2] = 0.f; + V.vector4_f32[3] = 0.f; + return V; +#elif defined(_XM_ARM_NEON_INTRINSICS_) + uint32x2_t x = vld1_u32(reinterpret_cast(pSource)); + float32x2_t v = vcvt_f32_u32(x); + float32x2_t zero = vdup_n_f32(0); + return vcombine_f32(v, zero); +#elif defined(_XM_SSE_INTRINSICS_) + __m128 V = _mm_castpd_ps(_mm_load_sd(reinterpret_cast(pSource))); + // For the values that are higher than 0x7FFFFFFF, a fixup is needed + // Determine which ones need the fix. + XMVECTOR vMask = _mm_and_ps(V, g_XMNegativeZero); + // Force all values positive + XMVECTOR vResult = _mm_xor_ps(V, vMask); + // Convert to floats + vResult = _mm_cvtepi32_ps(_mm_castps_si128(vResult)); + // Convert 0x80000000 -> 0xFFFFFFFF + __m128i iMask = _mm_srai_epi32(_mm_castps_si128(vMask), 31); + // For only the ones that are too big, add the fixup + vMask = _mm_and_ps(_mm_castsi128_ps(iMask), g_XMFixUnsigned); + vResult = _mm_add_ps(vResult, vMask); + return vResult; +#endif +} + +//------------------------------------------------------------------------------ +_Use_decl_annotations_ +inline XMVECTOR XM_CALLCONV XMLoadInt3(const uint32_t* pSource) noexcept +{ + assert(pSource); +#if defined(_XM_NO_INTRINSICS_) + XMVECTOR V; + V.vector4_u32[0] = pSource[0]; + V.vector4_u32[1] = pSource[1]; + V.vector4_u32[2] = pSource[2]; + V.vector4_u32[3] = 0; + return V; +#elif defined(_XM_ARM_NEON_INTRINSICS_) + uint32x2_t x = vld1_u32(pSource); + uint32x2_t zero = vdup_n_u32(0); + uint32x2_t y = vld1_lane_u32(pSource + 2, zero, 0); + return vcombine_u32(x, y); +#elif defined(_XM_SSE4_INTRINSICS_) + __m128 xy = _mm_castpd_ps(_mm_load_sd(reinterpret_cast(pSource))); + __m128 z = _mm_load_ss(reinterpret_cast(pSource + 2)); + return _mm_insert_ps(xy, z, 0x20); +#elif defined(_XM_SSE_INTRINSICS_) + __m128 xy = _mm_castpd_ps(_mm_load_sd(reinterpret_cast(pSource))); + __m128 z = _mm_load_ss(reinterpret_cast(pSource + 2)); + return _mm_movelh_ps(xy, z); +#endif +} + +//------------------------------------------------------------------------------ +_Use_decl_annotations_ +inline XMVECTOR XM_CALLCONV XMLoadInt3A(const uint32_t* pSource) noexcept +{ + assert(pSource); + assert((reinterpret_cast(pSource) & 0xF) == 0); +#if defined(_XM_NO_INTRINSICS_) + XMVECTOR V; + V.vector4_u32[0] = pSource[0]; + V.vector4_u32[1] = pSource[1]; + V.vector4_u32[2] = pSource[2]; + V.vector4_u32[3] = 0; + return V; +#elif defined(_XM_ARM_NEON_INTRINSICS_) + // Reads an extra integer which is zero'd +#ifdef _MSC_VER + uint32x4_t V = vld1q_u32_ex(pSource, 128); +#else + uint32x4_t V = vld1q_u32(pSource); +#endif + return vsetq_lane_u32(0, V, 3); +#elif defined(_XM_SSE4_INTRINSICS_) + __m128 xy = _mm_castpd_ps(_mm_load_sd(reinterpret_cast(pSource))); + __m128 z = _mm_load_ss(reinterpret_cast(pSource + 2)); + return _mm_insert_ps(xy, z, 0x20); +#elif defined(_XM_SSE_INTRINSICS_) + __m128 xy = _mm_castpd_ps(_mm_load_sd(reinterpret_cast(pSource))); + __m128 z = _mm_load_ss(reinterpret_cast(pSource + 2)); + return _mm_movelh_ps(xy, z); +#endif +} + +//------------------------------------------------------------------------------ +_Use_decl_annotations_ +inline XMVECTOR XM_CALLCONV XMLoadFloat3(const XMFLOAT3* pSource) noexcept +{ + assert(pSource); +#if defined(_XM_NO_INTRINSICS_) + XMVECTOR V; + V.vector4_f32[0] = pSource->x; + V.vector4_f32[1] = pSource->y; + V.vector4_f32[2] = pSource->z; + V.vector4_f32[3] = 0.f; + return V; +#elif defined(_XM_ARM_NEON_INTRINSICS_) + float32x2_t x = vld1_f32(reinterpret_cast(pSource)); + float32x2_t zero = vdup_n_f32(0); + float32x2_t y = vld1_lane_f32(reinterpret_cast(pSource) + 2, zero, 0); + return vcombine_f32(x, y); +#elif defined(_XM_SSE4_INTRINSICS_) + __m128 xy = _mm_castpd_ps(_mm_load_sd(reinterpret_cast(pSource))); + __m128 z = _mm_load_ss(&pSource->z); + return _mm_insert_ps(xy, z, 0x20); +#elif defined(_XM_SSE_INTRINSICS_) + __m128 xy = _mm_castpd_ps(_mm_load_sd(reinterpret_cast(pSource))); + __m128 z = _mm_load_ss(&pSource->z); + return _mm_movelh_ps(xy, z); +#endif +} + +//------------------------------------------------------------------------------ +_Use_decl_annotations_ +inline XMVECTOR XM_CALLCONV XMLoadFloat3A(const XMFLOAT3A* pSource) noexcept +{ + assert(pSource); + assert((reinterpret_cast(pSource) & 0xF) == 0); +#if defined(_XM_NO_INTRINSICS_) + XMVECTOR V; + V.vector4_f32[0] = pSource->x; + V.vector4_f32[1] = pSource->y; + V.vector4_f32[2] = pSource->z; + V.vector4_f32[3] = 0.f; + return V; +#elif defined(_XM_ARM_NEON_INTRINSICS_) + // Reads an extra float which is zero'd +#ifdef _MSC_VER + float32x4_t V = vld1q_f32_ex(reinterpret_cast(pSource), 128); +#else + float32x4_t V = vld1q_f32(reinterpret_cast(pSource)); +#endif + return vsetq_lane_f32(0, V, 3); +#elif defined(_XM_SSE_INTRINSICS_) + // Reads an extra float which is zero'd + __m128 V = _mm_load_ps(&pSource->x); + return _mm_and_ps(V, g_XMMask3); +#endif +} + +//------------------------------------------------------------------------------ +_Use_decl_annotations_ +inline XMVECTOR XM_CALLCONV XMLoadSInt3(const XMINT3* pSource) noexcept +{ + assert(pSource); +#if defined(_XM_NO_INTRINSICS_) + + XMVECTOR V; + V.vector4_f32[0] = static_cast(pSource->x); + V.vector4_f32[1] = static_cast(pSource->y); + V.vector4_f32[2] = static_cast(pSource->z); + V.vector4_f32[3] = 0.f; + return V; + +#elif defined(_XM_ARM_NEON_INTRINSICS_) + int32x2_t x = vld1_s32(reinterpret_cast(pSource)); + int32x2_t zero = vdup_n_s32(0); + int32x2_t y = vld1_lane_s32(reinterpret_cast(pSource) + 2, zero, 0); + int32x4_t v = vcombine_s32(x, y); + return vcvtq_f32_s32(v); +#elif defined(_XM_SSE_INTRINSICS_) + __m128 xy = _mm_castpd_ps(_mm_load_sd(reinterpret_cast(pSource))); + __m128 z = _mm_load_ss(reinterpret_cast(&pSource->z)); + __m128 V = _mm_movelh_ps(xy, z); + return _mm_cvtepi32_ps(_mm_castps_si128(V)); +#endif +} + +//------------------------------------------------------------------------------ +_Use_decl_annotations_ +inline XMVECTOR XM_CALLCONV XMLoadUInt3(const XMUINT3* pSource) noexcept +{ + assert(pSource); +#if defined(_XM_NO_INTRINSICS_) + XMVECTOR V; + V.vector4_f32[0] = static_cast(pSource->x); + V.vector4_f32[1] = static_cast(pSource->y); + V.vector4_f32[2] = static_cast(pSource->z); + V.vector4_f32[3] = 0.f; + return V; +#elif defined(_XM_ARM_NEON_INTRINSICS_) + uint32x2_t x = vld1_u32(reinterpret_cast(pSource)); + uint32x2_t zero = vdup_n_u32(0); + uint32x2_t y = vld1_lane_u32(reinterpret_cast(pSource) + 2, zero, 0); + uint32x4_t v = vcombine_u32(x, y); + return vcvtq_f32_u32(v); +#elif defined(_XM_SSE_INTRINSICS_) + __m128 xy = _mm_castpd_ps(_mm_load_sd(reinterpret_cast(pSource))); + __m128 z = _mm_load_ss(reinterpret_cast(&pSource->z)); + __m128 V = _mm_movelh_ps(xy, z); + // For the values that are higher than 0x7FFFFFFF, a fixup is needed + // Determine which ones need the fix. + XMVECTOR vMask = _mm_and_ps(V, g_XMNegativeZero); + // Force all values positive + XMVECTOR vResult = _mm_xor_ps(V, vMask); + // Convert to floats + vResult = _mm_cvtepi32_ps(_mm_castps_si128(vResult)); + // Convert 0x80000000 -> 0xFFFFFFFF + __m128i iMask = _mm_srai_epi32(_mm_castps_si128(vMask), 31); + // For only the ones that are too big, add the fixup + vMask = _mm_and_ps(_mm_castsi128_ps(iMask), g_XMFixUnsigned); + vResult = _mm_add_ps(vResult, vMask); + return vResult; +#endif +} + +//------------------------------------------------------------------------------ +_Use_decl_annotations_ +inline XMVECTOR XM_CALLCONV XMLoadInt4(const uint32_t* pSource) noexcept +{ + assert(pSource); + +#if defined(_XM_NO_INTRINSICS_) + XMVECTOR V; + V.vector4_u32[0] = pSource[0]; + V.vector4_u32[1] = pSource[1]; + V.vector4_u32[2] = pSource[2]; + V.vector4_u32[3] = pSource[3]; + return V; +#elif defined(_XM_ARM_NEON_INTRINSICS_) + return vld1q_u32(pSource); +#elif defined(_XM_SSE_INTRINSICS_) + __m128i V = _mm_loadu_si128(reinterpret_cast(pSource)); + return _mm_castsi128_ps(V); +#endif +} + +//------------------------------------------------------------------------------ +_Use_decl_annotations_ +inline XMVECTOR XM_CALLCONV XMLoadInt4A(const uint32_t* pSource) noexcept +{ + assert(pSource); + assert((reinterpret_cast(pSource) & 0xF) == 0); +#if defined(_XM_NO_INTRINSICS_) + XMVECTOR V; + V.vector4_u32[0] = pSource[0]; + V.vector4_u32[1] = pSource[1]; + V.vector4_u32[2] = pSource[2]; + V.vector4_u32[3] = pSource[3]; + return V; +#elif defined(_XM_ARM_NEON_INTRINSICS_) +#ifdef _MSC_VER + return vld1q_u32_ex(pSource, 128); +#else + return vld1q_u32(pSource); +#endif +#elif defined(_XM_SSE_INTRINSICS_) + __m128i V = _mm_load_si128(reinterpret_cast(pSource)); + return _mm_castsi128_ps(V); +#endif +} + +//------------------------------------------------------------------------------ +_Use_decl_annotations_ +inline XMVECTOR XM_CALLCONV XMLoadFloat4(const XMFLOAT4* pSource) noexcept +{ + assert(pSource); +#if defined(_XM_NO_INTRINSICS_) + XMVECTOR V; + V.vector4_f32[0] = pSource->x; + V.vector4_f32[1] = pSource->y; + V.vector4_f32[2] = pSource->z; + V.vector4_f32[3] = pSource->w; + return V; +#elif defined(_XM_ARM_NEON_INTRINSICS_) + return vld1q_f32(reinterpret_cast(pSource)); +#elif defined(_XM_SSE_INTRINSICS_) + return _mm_loadu_ps(&pSource->x); +#endif +} + +//------------------------------------------------------------------------------ +_Use_decl_annotations_ +inline XMVECTOR XM_CALLCONV XMLoadFloat4A(const XMFLOAT4A* pSource) noexcept +{ + assert(pSource); + assert((reinterpret_cast(pSource) & 0xF) == 0); +#if defined(_XM_NO_INTRINSICS_) + XMVECTOR V; + V.vector4_f32[0] = pSource->x; + V.vector4_f32[1] = pSource->y; + V.vector4_f32[2] = pSource->z; + V.vector4_f32[3] = pSource->w; + return V; +#elif defined(_XM_ARM_NEON_INTRINSICS_) +#ifdef _MSC_VER + return vld1q_f32_ex(reinterpret_cast(pSource), 128); +#else + return vld1q_f32(reinterpret_cast(pSource)); +#endif +#elif defined(_XM_SSE_INTRINSICS_) + return _mm_load_ps(&pSource->x); +#endif +} + +//------------------------------------------------------------------------------ +_Use_decl_annotations_ +inline XMVECTOR XM_CALLCONV XMLoadSInt4(const XMINT4* pSource) noexcept +{ + assert(pSource); +#if defined(_XM_NO_INTRINSICS_) + + XMVECTOR V; + V.vector4_f32[0] = static_cast(pSource->x); + V.vector4_f32[1] = static_cast(pSource->y); + V.vector4_f32[2] = static_cast(pSource->z); + V.vector4_f32[3] = static_cast(pSource->w); + return V; + +#elif defined(_XM_ARM_NEON_INTRINSICS_) + int32x4_t v = vld1q_s32(reinterpret_cast(pSource)); + return vcvtq_f32_s32(v); +#elif defined(_XM_SSE_INTRINSICS_) + __m128i V = _mm_loadu_si128(reinterpret_cast(pSource)); + return _mm_cvtepi32_ps(V); +#endif +} + +//------------------------------------------------------------------------------ +_Use_decl_annotations_ +inline XMVECTOR XM_CALLCONV XMLoadUInt4(const XMUINT4* pSource) noexcept +{ + assert(pSource); +#if defined(_XM_NO_INTRINSICS_) + XMVECTOR V; + V.vector4_f32[0] = static_cast(pSource->x); + V.vector4_f32[1] = static_cast(pSource->y); + V.vector4_f32[2] = static_cast(pSource->z); + V.vector4_f32[3] = static_cast(pSource->w); + return V; +#elif defined(_XM_ARM_NEON_INTRINSICS_) + uint32x4_t v = vld1q_u32(reinterpret_cast(pSource)); + return vcvtq_f32_u32(v); +#elif defined(_XM_SSE_INTRINSICS_) + __m128i V = _mm_loadu_si128(reinterpret_cast(pSource)); + // For the values that are higher than 0x7FFFFFFF, a fixup is needed + // Determine which ones need the fix. + XMVECTOR vMask = _mm_and_ps(_mm_castsi128_ps(V), g_XMNegativeZero); + // Force all values positive + XMVECTOR vResult = _mm_xor_ps(_mm_castsi128_ps(V), vMask); + // Convert to floats + vResult = _mm_cvtepi32_ps(_mm_castps_si128(vResult)); + // Convert 0x80000000 -> 0xFFFFFFFF + __m128i iMask = _mm_srai_epi32(_mm_castps_si128(vMask), 31); + // For only the ones that are too big, add the fixup + vMask = _mm_and_ps(_mm_castsi128_ps(iMask), g_XMFixUnsigned); + vResult = _mm_add_ps(vResult, vMask); + return vResult; +#endif +} + +//------------------------------------------------------------------------------ +_Use_decl_annotations_ +inline XMMATRIX XM_CALLCONV XMLoadFloat3x3(const XMFLOAT3X3* pSource) noexcept +{ + assert(pSource); +#if defined(_XM_NO_INTRINSICS_) + + XMMATRIX M; + M.r[0].vector4_f32[0] = pSource->m[0][0]; + M.r[0].vector4_f32[1] = pSource->m[0][1]; + M.r[0].vector4_f32[2] = pSource->m[0][2]; + M.r[0].vector4_f32[3] = 0.0f; + + M.r[1].vector4_f32[0] = pSource->m[1][0]; + M.r[1].vector4_f32[1] = pSource->m[1][1]; + M.r[1].vector4_f32[2] = pSource->m[1][2]; + M.r[1].vector4_f32[3] = 0.0f; + + M.r[2].vector4_f32[0] = pSource->m[2][0]; + M.r[2].vector4_f32[1] = pSource->m[2][1]; + M.r[2].vector4_f32[2] = pSource->m[2][2]; + M.r[2].vector4_f32[3] = 0.0f; + M.r[3].vector4_f32[0] = 0.0f; + M.r[3].vector4_f32[1] = 0.0f; + M.r[3].vector4_f32[2] = 0.0f; + M.r[3].vector4_f32[3] = 1.0f; + return M; + +#elif defined(_XM_ARM_NEON_INTRINSICS_) + float32x4_t v0 = vld1q_f32(&pSource->m[0][0]); + float32x4_t v1 = vld1q_f32(&pSource->m[1][1]); + float32x2_t v2 = vcreate_f32(static_cast(*reinterpret_cast(&pSource->m[2][2]))); + float32x4_t T = vextq_f32(v0, v1, 3); + + XMMATRIX M; + M.r[0] = vandq_u32(v0, g_XMMask3); + M.r[1] = vandq_u32(T, g_XMMask3); + M.r[2] = vcombine_f32(vget_high_f32(v1), v2); + M.r[3] = g_XMIdentityR3; + return M; +#elif defined(_XM_SSE_INTRINSICS_) + __m128 Z = _mm_setzero_ps(); + + __m128 V1 = _mm_loadu_ps(&pSource->m[0][0]); + __m128 V2 = _mm_loadu_ps(&pSource->m[1][1]); + __m128 V3 = _mm_load_ss(&pSource->m[2][2]); + + __m128 T1 = _mm_unpackhi_ps(V1, Z); + __m128 T2 = _mm_unpacklo_ps(V2, Z); + __m128 T3 = _mm_shuffle_ps(V3, T2, _MM_SHUFFLE(0, 1, 0, 0)); + __m128 T4 = _mm_movehl_ps(T2, T3); + __m128 T5 = _mm_movehl_ps(Z, T1); + + XMMATRIX M; + M.r[0] = _mm_movelh_ps(V1, T1); + M.r[1] = _mm_add_ps(T4, T5); + M.r[2] = _mm_shuffle_ps(V2, V3, _MM_SHUFFLE(1, 0, 3, 2)); + M.r[3] = g_XMIdentityR3; + return M; +#endif +} + +//------------------------------------------------------------------------------ +_Use_decl_annotations_ +inline XMMATRIX XM_CALLCONV XMLoadFloat4x3(const XMFLOAT4X3* pSource) noexcept +{ + assert(pSource); +#if defined(_XM_NO_INTRINSICS_) + + XMMATRIX M; + M.r[0].vector4_f32[0] = pSource->m[0][0]; + M.r[0].vector4_f32[1] = pSource->m[0][1]; + M.r[0].vector4_f32[2] = pSource->m[0][2]; + M.r[0].vector4_f32[3] = 0.0f; + + M.r[1].vector4_f32[0] = pSource->m[1][0]; + M.r[1].vector4_f32[1] = pSource->m[1][1]; + M.r[1].vector4_f32[2] = pSource->m[1][2]; + M.r[1].vector4_f32[3] = 0.0f; + + M.r[2].vector4_f32[0] = pSource->m[2][0]; + M.r[2].vector4_f32[1] = pSource->m[2][1]; + M.r[2].vector4_f32[2] = pSource->m[2][2]; + M.r[2].vector4_f32[3] = 0.0f; + + M.r[3].vector4_f32[0] = pSource->m[3][0]; + M.r[3].vector4_f32[1] = pSource->m[3][1]; + M.r[3].vector4_f32[2] = pSource->m[3][2]; + M.r[3].vector4_f32[3] = 1.0f; + return M; + +#elif defined(_XM_ARM_NEON_INTRINSICS_) + float32x4_t v0 = vld1q_f32(&pSource->m[0][0]); + float32x4_t v1 = vld1q_f32(&pSource->m[1][1]); + float32x4_t v2 = vld1q_f32(&pSource->m[2][2]); + + float32x4_t T1 = vextq_f32(v0, v1, 3); + float32x4_t T2 = vcombine_f32(vget_high_f32(v1), vget_low_f32(v2)); + float32x4_t T3 = vextq_f32(v2, v2, 1); + + XMMATRIX M; + M.r[0] = vandq_u32(v0, g_XMMask3); + M.r[1] = vandq_u32(T1, g_XMMask3); + M.r[2] = vandq_u32(T2, g_XMMask3); + M.r[3] = vsetq_lane_f32(1.f, T3, 3); + return M; +#elif defined(_XM_SSE_INTRINSICS_) + // Use unaligned load instructions to + // load the 12 floats + // vTemp1 = x1,y1,z1,x2 + XMVECTOR vTemp1 = _mm_loadu_ps(&pSource->m[0][0]); + // vTemp2 = y2,z2,x3,y3 + XMVECTOR vTemp2 = _mm_loadu_ps(&pSource->m[1][1]); + // vTemp4 = z3,x4,y4,z4 + XMVECTOR vTemp4 = _mm_loadu_ps(&pSource->m[2][2]); + // vTemp3 = x3,y3,z3,z3 + XMVECTOR vTemp3 = _mm_shuffle_ps(vTemp2, vTemp4, _MM_SHUFFLE(0, 0, 3, 2)); + // vTemp2 = y2,z2,x2,x2 + vTemp2 = _mm_shuffle_ps(vTemp2, vTemp1, _MM_SHUFFLE(3, 3, 1, 0)); + // vTemp2 = x2,y2,z2,z2 + vTemp2 = XM_PERMUTE_PS(vTemp2, _MM_SHUFFLE(1, 1, 0, 2)); + // vTemp1 = x1,y1,z1,0 + vTemp1 = _mm_and_ps(vTemp1, g_XMMask3); + // vTemp2 = x2,y2,z2,0 + vTemp2 = _mm_and_ps(vTemp2, g_XMMask3); + // vTemp3 = x3,y3,z3,0 + vTemp3 = _mm_and_ps(vTemp3, g_XMMask3); + // vTemp4i = x4,y4,z4,0 + __m128i vTemp4i = _mm_srli_si128(_mm_castps_si128(vTemp4), 32 / 8); + // vTemp4i = x4,y4,z4,1.0f + vTemp4i = _mm_or_si128(vTemp4i, g_XMIdentityR3); + XMMATRIX M(vTemp1, + vTemp2, + vTemp3, + _mm_castsi128_ps(vTemp4i)); + return M; +#endif +} + +//------------------------------------------------------------------------------ +_Use_decl_annotations_ +inline XMMATRIX XM_CALLCONV XMLoadFloat4x3A(const XMFLOAT4X3A* pSource) noexcept +{ + assert(pSource); + assert((reinterpret_cast(pSource) & 0xF) == 0); +#if defined(_XM_NO_INTRINSICS_) + + XMMATRIX M; + M.r[0].vector4_f32[0] = pSource->m[0][0]; + M.r[0].vector4_f32[1] = pSource->m[0][1]; + M.r[0].vector4_f32[2] = pSource->m[0][2]; + M.r[0].vector4_f32[3] = 0.0f; + + M.r[1].vector4_f32[0] = pSource->m[1][0]; + M.r[1].vector4_f32[1] = pSource->m[1][1]; + M.r[1].vector4_f32[2] = pSource->m[1][2]; + M.r[1].vector4_f32[3] = 0.0f; + + M.r[2].vector4_f32[0] = pSource->m[2][0]; + M.r[2].vector4_f32[1] = pSource->m[2][1]; + M.r[2].vector4_f32[2] = pSource->m[2][2]; + M.r[2].vector4_f32[3] = 0.0f; + + M.r[3].vector4_f32[0] = pSource->m[3][0]; + M.r[3].vector4_f32[1] = pSource->m[3][1]; + M.r[3].vector4_f32[2] = pSource->m[3][2]; + M.r[3].vector4_f32[3] = 1.0f; + return M; + +#elif defined(_XM_ARM_NEON_INTRINSICS_) +#ifdef _MSC_VER + float32x4_t v0 = vld1q_f32_ex(&pSource->m[0][0], 128); + float32x4_t v1 = vld1q_f32_ex(&pSource->m[1][1], 128); + float32x4_t v2 = vld1q_f32_ex(&pSource->m[2][2], 128); +#else + float32x4_t v0 = vld1q_f32(&pSource->m[0][0]); + float32x4_t v1 = vld1q_f32(&pSource->m[1][1]); + float32x4_t v2 = vld1q_f32(&pSource->m[2][2]); +#endif + + float32x4_t T1 = vextq_f32(v0, v1, 3); + float32x4_t T2 = vcombine_f32(vget_high_f32(v1), vget_low_f32(v2)); + float32x4_t T3 = vextq_f32(v2, v2, 1); + + XMMATRIX M; + M.r[0] = vandq_u32(v0, g_XMMask3); + M.r[1] = vandq_u32(T1, g_XMMask3); + M.r[2] = vandq_u32(T2, g_XMMask3); + M.r[3] = vsetq_lane_f32(1.f, T3, 3); + return M; +#elif defined(_XM_SSE_INTRINSICS_) + // Use aligned load instructions to + // load the 12 floats + // vTemp1 = x1,y1,z1,x2 + XMVECTOR vTemp1 = _mm_load_ps(&pSource->m[0][0]); + // vTemp2 = y2,z2,x3,y3 + XMVECTOR vTemp2 = _mm_load_ps(&pSource->m[1][1]); + // vTemp4 = z3,x4,y4,z4 + XMVECTOR vTemp4 = _mm_load_ps(&pSource->m[2][2]); + // vTemp3 = x3,y3,z3,z3 + XMVECTOR vTemp3 = _mm_shuffle_ps(vTemp2, vTemp4, _MM_SHUFFLE(0, 0, 3, 2)); + // vTemp2 = y2,z2,x2,x2 + vTemp2 = _mm_shuffle_ps(vTemp2, vTemp1, _MM_SHUFFLE(3, 3, 1, 0)); + // vTemp2 = x2,y2,z2,z2 + vTemp2 = XM_PERMUTE_PS(vTemp2, _MM_SHUFFLE(1, 1, 0, 2)); + // vTemp1 = x1,y1,z1,0 + vTemp1 = _mm_and_ps(vTemp1, g_XMMask3); + // vTemp2 = x2,y2,z2,0 + vTemp2 = _mm_and_ps(vTemp2, g_XMMask3); + // vTemp3 = x3,y3,z3,0 + vTemp3 = _mm_and_ps(vTemp3, g_XMMask3); + // vTemp4i = x4,y4,z4,0 + __m128i vTemp4i = _mm_srli_si128(_mm_castps_si128(vTemp4), 32 / 8); + // vTemp4i = x4,y4,z4,1.0f + vTemp4i = _mm_or_si128(vTemp4i, g_XMIdentityR3); + XMMATRIX M(vTemp1, + vTemp2, + vTemp3, + _mm_castsi128_ps(vTemp4i)); + return M; +#endif +} + +//------------------------------------------------------------------------------ +_Use_decl_annotations_ +inline XMMATRIX XM_CALLCONV XMLoadFloat3x4(const XMFLOAT3X4* pSource) noexcept +{ + assert(pSource); +#if defined(_XM_NO_INTRINSICS_) + + XMMATRIX M; + M.r[0].vector4_f32[0] = pSource->m[0][0]; + M.r[0].vector4_f32[1] = pSource->m[1][0]; + M.r[0].vector4_f32[2] = pSource->m[2][0]; + M.r[0].vector4_f32[3] = 0.0f; + + M.r[1].vector4_f32[0] = pSource->m[0][1]; + M.r[1].vector4_f32[1] = pSource->m[1][1]; + M.r[1].vector4_f32[2] = pSource->m[2][1]; + M.r[1].vector4_f32[3] = 0.0f; + + M.r[2].vector4_f32[0] = pSource->m[0][2]; + M.r[2].vector4_f32[1] = pSource->m[1][2]; + M.r[2].vector4_f32[2] = pSource->m[2][2]; + M.r[2].vector4_f32[3] = 0.0f; + + M.r[3].vector4_f32[0] = pSource->m[0][3]; + M.r[3].vector4_f32[1] = pSource->m[1][3]; + M.r[3].vector4_f32[2] = pSource->m[2][3]; + M.r[3].vector4_f32[3] = 1.0f; + return M; + +#elif defined(_XM_ARM_NEON_INTRINSICS_) + float32x2x4_t vTemp0 = vld4_f32(&pSource->_11); + float32x4_t vTemp1 = vld1q_f32(&pSource->_31); + + float32x2_t l = vget_low_f32(vTemp1); + float32x4_t T0 = vcombine_f32(vTemp0.val[0], l); + float32x2_t rl = vrev64_f32(l); + float32x4_t T1 = vcombine_f32(vTemp0.val[1], rl); + + float32x2_t h = vget_high_f32(vTemp1); + float32x4_t T2 = vcombine_f32(vTemp0.val[2], h); + float32x2_t rh = vrev64_f32(h); + float32x4_t T3 = vcombine_f32(vTemp0.val[3], rh); + + XMMATRIX M = {}; + M.r[0] = vandq_u32(T0, g_XMMask3); + M.r[1] = vandq_u32(T1, g_XMMask3); + M.r[2] = vandq_u32(T2, g_XMMask3); + M.r[3] = vsetq_lane_f32(1.f, T3, 3); + return M; +#elif defined(_XM_SSE_INTRINSICS_) + XMMATRIX M; + M.r[0] = _mm_loadu_ps(&pSource->_11); + M.r[1] = _mm_loadu_ps(&pSource->_21); + M.r[2] = _mm_loadu_ps(&pSource->_31); + M.r[3] = g_XMIdentityR3; + + // x.x,x.y,y.x,y.y + XMVECTOR vTemp1 = _mm_shuffle_ps(M.r[0], M.r[1], _MM_SHUFFLE(1, 0, 1, 0)); + // x.z,x.w,y.z,y.w + XMVECTOR vTemp3 = _mm_shuffle_ps(M.r[0], M.r[1], _MM_SHUFFLE(3, 2, 3, 2)); + // z.x,z.y,w.x,w.y + XMVECTOR vTemp2 = _mm_shuffle_ps(M.r[2], M.r[3], _MM_SHUFFLE(1, 0, 1, 0)); + // z.z,z.w,w.z,w.w + XMVECTOR vTemp4 = _mm_shuffle_ps(M.r[2], M.r[3], _MM_SHUFFLE(3, 2, 3, 2)); + XMMATRIX mResult; + + // x.x,y.x,z.x,w.x + mResult.r[0] = _mm_shuffle_ps(vTemp1, vTemp2, _MM_SHUFFLE(2, 0, 2, 0)); + // x.y,y.y,z.y,w.y + mResult.r[1] = _mm_shuffle_ps(vTemp1, vTemp2, _MM_SHUFFLE(3, 1, 3, 1)); + // x.z,y.z,z.z,w.z + mResult.r[2] = _mm_shuffle_ps(vTemp3, vTemp4, _MM_SHUFFLE(2, 0, 2, 0)); + // x.w,y.w,z.w,w.w + mResult.r[3] = _mm_shuffle_ps(vTemp3, vTemp4, _MM_SHUFFLE(3, 1, 3, 1)); + return mResult; +#endif +} + +//------------------------------------------------------------------------------ +_Use_decl_annotations_ +inline XMMATRIX XM_CALLCONV XMLoadFloat3x4A(const XMFLOAT3X4A* pSource) noexcept +{ + assert(pSource); + assert((reinterpret_cast(pSource) & 0xF) == 0); +#if defined(_XM_NO_INTRINSICS_) + + XMMATRIX M; + M.r[0].vector4_f32[0] = pSource->m[0][0]; + M.r[0].vector4_f32[1] = pSource->m[1][0]; + M.r[0].vector4_f32[2] = pSource->m[2][0]; + M.r[0].vector4_f32[3] = 0.0f; + + M.r[1].vector4_f32[0] = pSource->m[0][1]; + M.r[1].vector4_f32[1] = pSource->m[1][1]; + M.r[1].vector4_f32[2] = pSource->m[2][1]; + M.r[1].vector4_f32[3] = 0.0f; + + M.r[2].vector4_f32[0] = pSource->m[0][2]; + M.r[2].vector4_f32[1] = pSource->m[1][2]; + M.r[2].vector4_f32[2] = pSource->m[2][2]; + M.r[2].vector4_f32[3] = 0.0f; + + M.r[3].vector4_f32[0] = pSource->m[0][3]; + M.r[3].vector4_f32[1] = pSource->m[1][3]; + M.r[3].vector4_f32[2] = pSource->m[2][3]; + M.r[3].vector4_f32[3] = 1.0f; + return M; + +#elif defined(_XM_ARM_NEON_INTRINSICS_) +#ifdef _MSC_VER + float32x2x4_t vTemp0 = vld4_f32_ex(&pSource->_11, 128); + float32x4_t vTemp1 = vld1q_f32_ex(&pSource->_31, 128); +#else + float32x2x4_t vTemp0 = vld4_f32(&pSource->_11); + float32x4_t vTemp1 = vld1q_f32(&pSource->_31); +#endif + + float32x2_t l = vget_low_f32(vTemp1); + float32x4_t T0 = vcombine_f32(vTemp0.val[0], l); + float32x2_t rl = vrev64_f32(l); + float32x4_t T1 = vcombine_f32(vTemp0.val[1], rl); + + float32x2_t h = vget_high_f32(vTemp1); + float32x4_t T2 = vcombine_f32(vTemp0.val[2], h); + float32x2_t rh = vrev64_f32(h); + float32x4_t T3 = vcombine_f32(vTemp0.val[3], rh); + + XMMATRIX M = {}; + M.r[0] = vandq_u32(T0, g_XMMask3); + M.r[1] = vandq_u32(T1, g_XMMask3); + M.r[2] = vandq_u32(T2, g_XMMask3); + M.r[3] = vsetq_lane_f32(1.f, T3, 3); + return M; +#elif defined(_XM_SSE_INTRINSICS_) + XMMATRIX M; + M.r[0] = _mm_load_ps(&pSource->_11); + M.r[1] = _mm_load_ps(&pSource->_21); + M.r[2] = _mm_load_ps(&pSource->_31); + M.r[3] = g_XMIdentityR3; + + // x.x,x.y,y.x,y.y + XMVECTOR vTemp1 = _mm_shuffle_ps(M.r[0], M.r[1], _MM_SHUFFLE(1, 0, 1, 0)); + // x.z,x.w,y.z,y.w + XMVECTOR vTemp3 = _mm_shuffle_ps(M.r[0], M.r[1], _MM_SHUFFLE(3, 2, 3, 2)); + // z.x,z.y,w.x,w.y + XMVECTOR vTemp2 = _mm_shuffle_ps(M.r[2], M.r[3], _MM_SHUFFLE(1, 0, 1, 0)); + // z.z,z.w,w.z,w.w + XMVECTOR vTemp4 = _mm_shuffle_ps(M.r[2], M.r[3], _MM_SHUFFLE(3, 2, 3, 2)); + XMMATRIX mResult; + + // x.x,y.x,z.x,w.x + mResult.r[0] = _mm_shuffle_ps(vTemp1, vTemp2, _MM_SHUFFLE(2, 0, 2, 0)); + // x.y,y.y,z.y,w.y + mResult.r[1] = _mm_shuffle_ps(vTemp1, vTemp2, _MM_SHUFFLE(3, 1, 3, 1)); + // x.z,y.z,z.z,w.z + mResult.r[2] = _mm_shuffle_ps(vTemp3, vTemp4, _MM_SHUFFLE(2, 0, 2, 0)); + // x.w,y.w,z.w,w.w + mResult.r[3] = _mm_shuffle_ps(vTemp3, vTemp4, _MM_SHUFFLE(3, 1, 3, 1)); + return mResult; +#endif +} + +//------------------------------------------------------------------------------ +_Use_decl_annotations_ +inline XMMATRIX XM_CALLCONV XMLoadFloat4x4(const XMFLOAT4X4* pSource) noexcept +{ + assert(pSource); +#if defined(_XM_NO_INTRINSICS_) + + XMMATRIX M; + M.r[0].vector4_f32[0] = pSource->m[0][0]; + M.r[0].vector4_f32[1] = pSource->m[0][1]; + M.r[0].vector4_f32[2] = pSource->m[0][2]; + M.r[0].vector4_f32[3] = pSource->m[0][3]; + + M.r[1].vector4_f32[0] = pSource->m[1][0]; + M.r[1].vector4_f32[1] = pSource->m[1][1]; + M.r[1].vector4_f32[2] = pSource->m[1][2]; + M.r[1].vector4_f32[3] = pSource->m[1][3]; + + M.r[2].vector4_f32[0] = pSource->m[2][0]; + M.r[2].vector4_f32[1] = pSource->m[2][1]; + M.r[2].vector4_f32[2] = pSource->m[2][2]; + M.r[2].vector4_f32[3] = pSource->m[2][3]; + + M.r[3].vector4_f32[0] = pSource->m[3][0]; + M.r[3].vector4_f32[1] = pSource->m[3][1]; + M.r[3].vector4_f32[2] = pSource->m[3][2]; + M.r[3].vector4_f32[3] = pSource->m[3][3]; + return M; + +#elif defined(_XM_ARM_NEON_INTRINSICS_) + XMMATRIX M; + M.r[0] = vld1q_f32(reinterpret_cast(&pSource->_11)); + M.r[1] = vld1q_f32(reinterpret_cast(&pSource->_21)); + M.r[2] = vld1q_f32(reinterpret_cast(&pSource->_31)); + M.r[3] = vld1q_f32(reinterpret_cast(&pSource->_41)); + return M; +#elif defined(_XM_SSE_INTRINSICS_) + XMMATRIX M; + M.r[0] = _mm_loadu_ps(&pSource->_11); + M.r[1] = _mm_loadu_ps(&pSource->_21); + M.r[2] = _mm_loadu_ps(&pSource->_31); + M.r[3] = _mm_loadu_ps(&pSource->_41); + return M; +#endif +} + +//------------------------------------------------------------------------------ +_Use_decl_annotations_ +inline XMMATRIX XM_CALLCONV XMLoadFloat4x4A(const XMFLOAT4X4A* pSource) noexcept +{ + assert(pSource); + assert((reinterpret_cast(pSource) & 0xF) == 0); +#if defined(_XM_NO_INTRINSICS_) + + XMMATRIX M; + M.r[0].vector4_f32[0] = pSource->m[0][0]; + M.r[0].vector4_f32[1] = pSource->m[0][1]; + M.r[0].vector4_f32[2] = pSource->m[0][2]; + M.r[0].vector4_f32[3] = pSource->m[0][3]; + + M.r[1].vector4_f32[0] = pSource->m[1][0]; + M.r[1].vector4_f32[1] = pSource->m[1][1]; + M.r[1].vector4_f32[2] = pSource->m[1][2]; + M.r[1].vector4_f32[3] = pSource->m[1][3]; + + M.r[2].vector4_f32[0] = pSource->m[2][0]; + M.r[2].vector4_f32[1] = pSource->m[2][1]; + M.r[2].vector4_f32[2] = pSource->m[2][2]; + M.r[2].vector4_f32[3] = pSource->m[2][3]; + + M.r[3].vector4_f32[0] = pSource->m[3][0]; + M.r[3].vector4_f32[1] = pSource->m[3][1]; + M.r[3].vector4_f32[2] = pSource->m[3][2]; + M.r[3].vector4_f32[3] = pSource->m[3][3]; + return M; + +#elif defined(_XM_ARM_NEON_INTRINSICS_) + XMMATRIX M; +#ifdef _MSC_VER + M.r[0] = vld1q_f32_ex(reinterpret_cast(&pSource->_11), 128); + M.r[1] = vld1q_f32_ex(reinterpret_cast(&pSource->_21), 128); + M.r[2] = vld1q_f32_ex(reinterpret_cast(&pSource->_31), 128); + M.r[3] = vld1q_f32_ex(reinterpret_cast(&pSource->_41), 128); +#else + M.r[0] = vld1q_f32(reinterpret_cast(&pSource->_11)); + M.r[1] = vld1q_f32(reinterpret_cast(&pSource->_21)); + M.r[2] = vld1q_f32(reinterpret_cast(&pSource->_31)); + M.r[3] = vld1q_f32(reinterpret_cast(&pSource->_41)); +#endif + return M; +#elif defined(_XM_SSE_INTRINSICS_) + XMMATRIX M; + M.r[0] = _mm_load_ps(&pSource->_11); + M.r[1] = _mm_load_ps(&pSource->_21); + M.r[2] = _mm_load_ps(&pSource->_31); + M.r[3] = _mm_load_ps(&pSource->_41); + return M; +#endif +} + +/**************************************************************************** + * + * Vector and matrix store operations + * + ****************************************************************************/ +_Use_decl_annotations_ +inline void XM_CALLCONV XMStoreInt +( + uint32_t* pDestination, + FXMVECTOR V +) noexcept +{ + assert(pDestination); +#if defined(_XM_NO_INTRINSICS_) + *pDestination = XMVectorGetIntX(V); +#elif defined(_XM_ARM_NEON_INTRINSICS_) + vst1q_lane_u32(pDestination, *reinterpret_cast(&V), 0); +#elif defined(_XM_SSE_INTRINSICS_) + _mm_store_ss(reinterpret_cast(pDestination), V); +#endif +} + +//------------------------------------------------------------------------------ +_Use_decl_annotations_ +inline void XM_CALLCONV XMStoreFloat +( + float* pDestination, + FXMVECTOR V +) noexcept +{ + assert(pDestination); +#if defined(_XM_NO_INTRINSICS_) + *pDestination = XMVectorGetX(V); +#elif defined(_XM_ARM_NEON_INTRINSICS_) + vst1q_lane_f32(pDestination, V, 0); +#elif defined(_XM_SSE_INTRINSICS_) + _mm_store_ss(pDestination, V); +#endif +} + +//------------------------------------------------------------------------------ +_Use_decl_annotations_ +inline void XM_CALLCONV XMStoreInt2 +( + uint32_t* pDestination, + FXMVECTOR V +) noexcept +{ + assert(pDestination); +#if defined(_XM_NO_INTRINSICS_) + pDestination[0] = V.vector4_u32[0]; + pDestination[1] = V.vector4_u32[1]; +#elif defined(_XM_ARM_NEON_INTRINSICS_) + uint32x2_t VL = vget_low_u32(V); + vst1_u32(pDestination, VL); +#elif defined(_XM_SSE_INTRINSICS_) + _mm_store_sd(reinterpret_cast(pDestination), _mm_castps_pd(V)); +#endif +} + +//------------------------------------------------------------------------------ +_Use_decl_annotations_ +inline void XM_CALLCONV XMStoreInt2A +( + uint32_t* pDestination, + FXMVECTOR V +) noexcept +{ + assert(pDestination); + assert((reinterpret_cast(pDestination) & 0xF) == 0); +#if defined(_XM_NO_INTRINSICS_) + pDestination[0] = V.vector4_u32[0]; + pDestination[1] = V.vector4_u32[1]; +#elif defined(_XM_ARM_NEON_INTRINSICS_) + uint32x2_t VL = vget_low_u32(V); +#ifdef _MSC_VER + vst1_u32_ex(pDestination, VL, 64); +#else + vst1_u32(pDestination, VL); +#endif +#elif defined(_XM_SSE_INTRINSICS_) + _mm_store_sd(reinterpret_cast(pDestination), _mm_castps_pd(V)); +#endif +} + +//------------------------------------------------------------------------------ +_Use_decl_annotations_ +inline void XM_CALLCONV XMStoreFloat2 +( + XMFLOAT2* pDestination, + FXMVECTOR V +) noexcept +{ + assert(pDestination); +#if defined(_XM_NO_INTRINSICS_) + pDestination->x = V.vector4_f32[0]; + pDestination->y = V.vector4_f32[1]; +#elif defined(_XM_ARM_NEON_INTRINSICS_) + float32x2_t VL = vget_low_f32(V); + vst1_f32(reinterpret_cast(pDestination), VL); +#elif defined(_XM_SSE_INTRINSICS_) + _mm_store_sd(reinterpret_cast(pDestination), _mm_castps_pd(V)); +#endif +} + +//------------------------------------------------------------------------------ +_Use_decl_annotations_ +inline void XM_CALLCONV XMStoreFloat2A +( + XMFLOAT2A* pDestination, + FXMVECTOR V +) noexcept +{ + assert(pDestination); + assert((reinterpret_cast(pDestination) & 0xF) == 0); +#if defined(_XM_NO_INTRINSICS_) + pDestination->x = V.vector4_f32[0]; + pDestination->y = V.vector4_f32[1]; +#elif defined(_XM_ARM_NEON_INTRINSICS_) + float32x2_t VL = vget_low_f32(V); +#ifdef _MSC_VER + vst1_f32_ex(reinterpret_cast(pDestination), VL, 64); +#else + vst1_f32(reinterpret_cast(pDestination), VL); +#endif +#elif defined(_XM_SSE_INTRINSICS_) + _mm_store_sd(reinterpret_cast(pDestination), _mm_castps_pd(V)); +#endif +} + +//------------------------------------------------------------------------------ +_Use_decl_annotations_ +inline void XM_CALLCONV XMStoreSInt2 +( + XMINT2* pDestination, + FXMVECTOR V +) noexcept +{ + assert(pDestination); +#if defined(_XM_NO_INTRINSICS_) + pDestination->x = static_cast(V.vector4_f32[0]); + pDestination->y = static_cast(V.vector4_f32[1]); +#elif defined(_XM_ARM_NEON_INTRINSICS_) + int32x2_t v = vget_low_s32(V); + v = vcvt_s32_f32(v); + vst1_s32(reinterpret_cast(pDestination), v); +#elif defined(_XM_SSE_INTRINSICS_) + // In case of positive overflow, detect it + XMVECTOR vOverflow = _mm_cmpgt_ps(V, g_XMMaxInt); + // Float to int conversion + __m128i vResulti = _mm_cvttps_epi32(V); + // If there was positive overflow, set to 0x7FFFFFFF + XMVECTOR vResult = _mm_and_ps(vOverflow, g_XMAbsMask); + vOverflow = _mm_andnot_ps(vOverflow, _mm_castsi128_ps(vResulti)); + vOverflow = _mm_or_ps(vOverflow, vResult); + // Write two ints + _mm_store_sd(reinterpret_cast(pDestination), _mm_castps_pd(vOverflow)); +#endif +} + +//------------------------------------------------------------------------------ +_Use_decl_annotations_ +inline void XM_CALLCONV XMStoreUInt2 +( + XMUINT2* pDestination, + FXMVECTOR V +) noexcept +{ + assert(pDestination); +#if defined(_XM_NO_INTRINSICS_) + pDestination->x = static_cast(V.vector4_f32[0]); + pDestination->y = static_cast(V.vector4_f32[1]); +#elif defined(_XM_ARM_NEON_INTRINSICS_) + float32x2_t v = vget_low_f32(V); + uint32x2_t iv = vcvt_u32_f32(v); + vst1_u32(reinterpret_cast(pDestination), iv); +#elif defined(_XM_SSE_INTRINSICS_) + // Clamp to >=0 + XMVECTOR vResult = _mm_max_ps(V, g_XMZero); + // Any numbers that are too big, set to 0xFFFFFFFFU + XMVECTOR vOverflow = _mm_cmpgt_ps(vResult, g_XMMaxUInt); + XMVECTOR vValue = g_XMUnsignedFix; + // Too large for a signed integer? + XMVECTOR vMask = _mm_cmpge_ps(vResult, vValue); + // Zero for number's lower than 0x80000000, 32768.0f*65536.0f otherwise + vValue = _mm_and_ps(vValue, vMask); + // Perform fixup only on numbers too large (Keeps low bit precision) + vResult = _mm_sub_ps(vResult, vValue); + __m128i vResulti = _mm_cvttps_epi32(vResult); + // Convert from signed to unsigned pnly if greater than 0x80000000 + vMask = _mm_and_ps(vMask, g_XMNegativeZero); + vResult = _mm_xor_ps(_mm_castsi128_ps(vResulti), vMask); + // On those that are too large, set to 0xFFFFFFFF + vResult = _mm_or_ps(vResult, vOverflow); + // Write two uints + _mm_store_sd(reinterpret_cast(pDestination), _mm_castps_pd(vResult)); +#endif +} + +//------------------------------------------------------------------------------ +_Use_decl_annotations_ +inline void XM_CALLCONV XMStoreInt3 +( + uint32_t* pDestination, + FXMVECTOR V +) noexcept +{ + assert(pDestination); +#if defined(_XM_NO_INTRINSICS_) + pDestination[0] = V.vector4_u32[0]; + pDestination[1] = V.vector4_u32[1]; + pDestination[2] = V.vector4_u32[2]; +#elif defined(_XM_ARM_NEON_INTRINSICS_) + uint32x2_t VL = vget_low_u32(V); + vst1_u32(pDestination, VL); + vst1q_lane_u32(pDestination + 2, *reinterpret_cast(&V), 2); +#elif defined(_XM_SSE_INTRINSICS_) + _mm_store_sd(reinterpret_cast(pDestination), _mm_castps_pd(V)); + __m128 z = XM_PERMUTE_PS(V, _MM_SHUFFLE(2, 2, 2, 2)); + _mm_store_ss(reinterpret_cast(&pDestination[2]), z); +#endif +} + +//------------------------------------------------------------------------------ +_Use_decl_annotations_ +inline void XM_CALLCONV XMStoreInt3A +( + uint32_t* pDestination, + FXMVECTOR V +) noexcept +{ + assert(pDestination); + assert((reinterpret_cast(pDestination) & 0xF) == 0); +#if defined(_XM_NO_INTRINSICS_) + pDestination[0] = V.vector4_u32[0]; + pDestination[1] = V.vector4_u32[1]; + pDestination[2] = V.vector4_u32[2]; +#elif defined(_XM_ARM_NEON_INTRINSICS_) + uint32x2_t VL = vget_low_u32(V); +#ifdef _MSC_VER + vst1_u32_ex(pDestination, VL, 64); +#else + vst1_u32(pDestination, VL); +#endif + vst1q_lane_u32(pDestination + 2, *reinterpret_cast(&V), 2); +#elif defined(_XM_SSE_INTRINSICS_) + _mm_store_sd(reinterpret_cast(pDestination), _mm_castps_pd(V)); + __m128 z = _mm_movehl_ps(V, V); + _mm_store_ss(reinterpret_cast(&pDestination[2]), z); +#endif +} + +//------------------------------------------------------------------------------ +_Use_decl_annotations_ +inline void XM_CALLCONV XMStoreFloat3 +( + XMFLOAT3* pDestination, + FXMVECTOR V +) noexcept +{ + assert(pDestination); +#if defined(_XM_NO_INTRINSICS_) + pDestination->x = V.vector4_f32[0]; + pDestination->y = V.vector4_f32[1]; + pDestination->z = V.vector4_f32[2]; +#elif defined(_XM_ARM_NEON_INTRINSICS_) + float32x2_t VL = vget_low_f32(V); + vst1_f32(reinterpret_cast(pDestination), VL); + vst1q_lane_f32(reinterpret_cast(pDestination) + 2, V, 2); +#elif defined(_XM_SSE4_INTRINSICS_) + * reinterpret_cast(&pDestination->x) = _mm_extract_ps(V, 0); + *reinterpret_cast(&pDestination->y) = _mm_extract_ps(V, 1); + *reinterpret_cast(&pDestination->z) = _mm_extract_ps(V, 2); +#elif defined(_XM_SSE_INTRINSICS_) + _mm_store_sd(reinterpret_cast(pDestination), _mm_castps_pd(V)); + __m128 z = XM_PERMUTE_PS(V, _MM_SHUFFLE(2, 2, 2, 2)); + _mm_store_ss(&pDestination->z, z); +#endif +} + +//------------------------------------------------------------------------------ +_Use_decl_annotations_ +inline void XM_CALLCONV XMStoreFloat3A +( + XMFLOAT3A* pDestination, + FXMVECTOR V +) noexcept +{ + assert(pDestination); + assert((reinterpret_cast(pDestination) & 0xF) == 0); +#if defined(_XM_NO_INTRINSICS_) + pDestination->x = V.vector4_f32[0]; + pDestination->y = V.vector4_f32[1]; + pDestination->z = V.vector4_f32[2]; +#elif defined(_XM_ARM_NEON_INTRINSICS_) + float32x2_t VL = vget_low_f32(V); +#ifdef _MSC_VER + vst1_f32_ex(reinterpret_cast(pDestination), VL, 64); +#else + vst1_f32(reinterpret_cast(pDestination), VL); +#endif + vst1q_lane_f32(reinterpret_cast(pDestination) + 2, V, 2); +#elif defined(_XM_SSE4_INTRINSICS_) + _mm_store_sd(reinterpret_cast(pDestination), _mm_castps_pd(V)); + *reinterpret_cast(&pDestination->z) = _mm_extract_ps(V, 2); +#elif defined(_XM_SSE_INTRINSICS_) + _mm_store_sd(reinterpret_cast(pDestination), _mm_castps_pd(V)); + __m128 z = _mm_movehl_ps(V, V); + _mm_store_ss(&pDestination->z, z); +#endif +} + +//------------------------------------------------------------------------------ +_Use_decl_annotations_ +inline void XM_CALLCONV XMStoreSInt3 +( + XMINT3* pDestination, + FXMVECTOR V +) noexcept +{ + assert(pDestination); +#if defined(_XM_NO_INTRINSICS_) + pDestination->x = static_cast(V.vector4_f32[0]); + pDestination->y = static_cast(V.vector4_f32[1]); + pDestination->z = static_cast(V.vector4_f32[2]); +#elif defined(_XM_ARM_NEON_INTRINSICS_) + int32x4_t v = vcvtq_s32_f32(V); + int32x2_t vL = vget_low_s32(v); + vst1_s32(reinterpret_cast(pDestination), vL); + vst1q_lane_s32(reinterpret_cast(pDestination) + 2, v, 2); +#elif defined(_XM_SSE_INTRINSICS_) + // In case of positive overflow, detect it + XMVECTOR vOverflow = _mm_cmpgt_ps(V, g_XMMaxInt); + // Float to int conversion + __m128i vResulti = _mm_cvttps_epi32(V); + // If there was positive overflow, set to 0x7FFFFFFF + XMVECTOR vResult = _mm_and_ps(vOverflow, g_XMAbsMask); + vOverflow = _mm_andnot_ps(vOverflow, _mm_castsi128_ps(vResulti)); + vOverflow = _mm_or_ps(vOverflow, vResult); + // Write 3 uints + _mm_store_sd(reinterpret_cast(pDestination), _mm_castps_pd(vOverflow)); + __m128 z = XM_PERMUTE_PS(vOverflow, _MM_SHUFFLE(2, 2, 2, 2)); + _mm_store_ss(reinterpret_cast(&pDestination->z), z); +#endif +} + +//------------------------------------------------------------------------------ +_Use_decl_annotations_ +inline void XM_CALLCONV XMStoreUInt3 +( + XMUINT3* pDestination, + FXMVECTOR V +) noexcept +{ + assert(pDestination); +#if defined(_XM_NO_INTRINSICS_) + pDestination->x = static_cast(V.vector4_f32[0]); + pDestination->y = static_cast(V.vector4_f32[1]); + pDestination->z = static_cast(V.vector4_f32[2]); +#elif defined(_XM_ARM_NEON_INTRINSICS_) + uint32x4_t v = vcvtq_u32_f32(V); + uint32x2_t vL = vget_low_u32(v); + vst1_u32(reinterpret_cast(pDestination), vL); + vst1q_lane_u32(reinterpret_cast(pDestination) + 2, v, 2); +#elif defined(_XM_SSE_INTRINSICS_) + // Clamp to >=0 + XMVECTOR vResult = _mm_max_ps(V, g_XMZero); + // Any numbers that are too big, set to 0xFFFFFFFFU + XMVECTOR vOverflow = _mm_cmpgt_ps(vResult, g_XMMaxUInt); + XMVECTOR vValue = g_XMUnsignedFix; + // Too large for a signed integer? + XMVECTOR vMask = _mm_cmpge_ps(vResult, vValue); + // Zero for number's lower than 0x80000000, 32768.0f*65536.0f otherwise + vValue = _mm_and_ps(vValue, vMask); + // Perform fixup only on numbers too large (Keeps low bit precision) + vResult = _mm_sub_ps(vResult, vValue); + __m128i vResulti = _mm_cvttps_epi32(vResult); + // Convert from signed to unsigned pnly if greater than 0x80000000 + vMask = _mm_and_ps(vMask, g_XMNegativeZero); + vResult = _mm_xor_ps(_mm_castsi128_ps(vResulti), vMask); + // On those that are too large, set to 0xFFFFFFFF + vResult = _mm_or_ps(vResult, vOverflow); + // Write 3 uints + _mm_store_sd(reinterpret_cast(pDestination), _mm_castps_pd(vResult)); + __m128 z = XM_PERMUTE_PS(vResult, _MM_SHUFFLE(2, 2, 2, 2)); + _mm_store_ss(reinterpret_cast(&pDestination->z), z); +#endif +} + +//------------------------------------------------------------------------------ +_Use_decl_annotations_ +inline void XM_CALLCONV XMStoreInt4 +( + uint32_t* pDestination, + FXMVECTOR V +) noexcept +{ + assert(pDestination); +#if defined(_XM_NO_INTRINSICS_) + pDestination[0] = V.vector4_u32[0]; + pDestination[1] = V.vector4_u32[1]; + pDestination[2] = V.vector4_u32[2]; + pDestination[3] = V.vector4_u32[3]; +#elif defined(_XM_ARM_NEON_INTRINSICS_) + vst1q_u32(pDestination, V); +#elif defined(_XM_SSE_INTRINSICS_) + _mm_storeu_si128(reinterpret_cast<__m128i*>(pDestination), _mm_castps_si128(V)); +#endif +} + +//------------------------------------------------------------------------------ +_Use_decl_annotations_ +inline void XM_CALLCONV XMStoreInt4A +( + uint32_t* pDestination, + FXMVECTOR V +) noexcept +{ + assert(pDestination); + assert((reinterpret_cast(pDestination) & 0xF) == 0); +#if defined(_XM_NO_INTRINSICS_) + pDestination[0] = V.vector4_u32[0]; + pDestination[1] = V.vector4_u32[1]; + pDestination[2] = V.vector4_u32[2]; + pDestination[3] = V.vector4_u32[3]; +#elif defined(_XM_ARM_NEON_INTRINSICS_) +#ifdef _MSC_VER + vst1q_u32_ex(pDestination, V, 128); +#else + vst1q_u32(pDestination, V); +#endif +#elif defined(_XM_SSE_INTRINSICS_) + _mm_store_si128(reinterpret_cast<__m128i*>(pDestination), _mm_castps_si128(V)); +#endif +} + +//------------------------------------------------------------------------------ +_Use_decl_annotations_ +inline void XM_CALLCONV XMStoreFloat4 +( + XMFLOAT4* pDestination, + FXMVECTOR V +) noexcept +{ + assert(pDestination); +#if defined(_XM_NO_INTRINSICS_) + pDestination->x = V.vector4_f32[0]; + pDestination->y = V.vector4_f32[1]; + pDestination->z = V.vector4_f32[2]; + pDestination->w = V.vector4_f32[3]; +#elif defined(_XM_ARM_NEON_INTRINSICS_) + vst1q_f32(reinterpret_cast(pDestination), V); +#elif defined(_XM_SSE_INTRINSICS_) + _mm_storeu_ps(&pDestination->x, V); +#endif +} + +//------------------------------------------------------------------------------ +_Use_decl_annotations_ +inline void XM_CALLCONV XMStoreFloat4A +( + XMFLOAT4A* pDestination, + FXMVECTOR V +) noexcept +{ + assert(pDestination); + assert((reinterpret_cast(pDestination) & 0xF) == 0); +#if defined(_XM_NO_INTRINSICS_) + pDestination->x = V.vector4_f32[0]; + pDestination->y = V.vector4_f32[1]; + pDestination->z = V.vector4_f32[2]; + pDestination->w = V.vector4_f32[3]; +#elif defined(_XM_ARM_NEON_INTRINSICS_) +#ifdef _MSC_VER + vst1q_f32_ex(reinterpret_cast(pDestination), V, 128); +#else + vst1q_f32(reinterpret_cast(pDestination), V); +#endif +#elif defined(_XM_SSE_INTRINSICS_) + _mm_store_ps(&pDestination->x, V); +#endif +} + +//------------------------------------------------------------------------------ +_Use_decl_annotations_ +inline void XM_CALLCONV XMStoreSInt4 +( + XMINT4* pDestination, + FXMVECTOR V +) noexcept +{ + assert(pDestination); +#if defined(_XM_NO_INTRINSICS_) + pDestination->x = static_cast(V.vector4_f32[0]); + pDestination->y = static_cast(V.vector4_f32[1]); + pDestination->z = static_cast(V.vector4_f32[2]); + pDestination->w = static_cast(V.vector4_f32[3]); +#elif defined(_XM_ARM_NEON_INTRINSICS_) + int32x4_t v = vcvtq_s32_f32(V); + vst1q_s32(reinterpret_cast(pDestination), v); +#elif defined(_XM_SSE_INTRINSICS_) + // In case of positive overflow, detect it + XMVECTOR vOverflow = _mm_cmpgt_ps(V, g_XMMaxInt); + // Float to int conversion + __m128i vResulti = _mm_cvttps_epi32(V); + // If there was positive overflow, set to 0x7FFFFFFF + XMVECTOR vResult = _mm_and_ps(vOverflow, g_XMAbsMask); + vOverflow = _mm_andnot_ps(vOverflow, _mm_castsi128_ps(vResulti)); + vOverflow = _mm_or_ps(vOverflow, vResult); + _mm_storeu_si128(reinterpret_cast<__m128i*>(pDestination), _mm_castps_si128(vOverflow)); +#endif +} + +//------------------------------------------------------------------------------ +_Use_decl_annotations_ +inline void XM_CALLCONV XMStoreUInt4 +( + XMUINT4* pDestination, + FXMVECTOR V +) noexcept +{ + assert(pDestination); +#if defined(_XM_NO_INTRINSICS_) + pDestination->x = static_cast(V.vector4_f32[0]); + pDestination->y = static_cast(V.vector4_f32[1]); + pDestination->z = static_cast(V.vector4_f32[2]); + pDestination->w = static_cast(V.vector4_f32[3]); +#elif defined(_XM_ARM_NEON_INTRINSICS_) + uint32x4_t v = vcvtq_u32_f32(V); + vst1q_u32(reinterpret_cast(pDestination), v); +#elif defined(_XM_SSE_INTRINSICS_) + // Clamp to >=0 + XMVECTOR vResult = _mm_max_ps(V, g_XMZero); + // Any numbers that are too big, set to 0xFFFFFFFFU + XMVECTOR vOverflow = _mm_cmpgt_ps(vResult, g_XMMaxUInt); + XMVECTOR vValue = g_XMUnsignedFix; + // Too large for a signed integer? + XMVECTOR vMask = _mm_cmpge_ps(vResult, vValue); + // Zero for number's lower than 0x80000000, 32768.0f*65536.0f otherwise + vValue = _mm_and_ps(vValue, vMask); + // Perform fixup only on numbers too large (Keeps low bit precision) + vResult = _mm_sub_ps(vResult, vValue); + __m128i vResulti = _mm_cvttps_epi32(vResult); + // Convert from signed to unsigned pnly if greater than 0x80000000 + vMask = _mm_and_ps(vMask, g_XMNegativeZero); + vResult = _mm_xor_ps(_mm_castsi128_ps(vResulti), vMask); + // On those that are too large, set to 0xFFFFFFFF + vResult = _mm_or_ps(vResult, vOverflow); + _mm_storeu_si128(reinterpret_cast<__m128i*>(pDestination), _mm_castps_si128(vResult)); +#endif +} + +//------------------------------------------------------------------------------ +_Use_decl_annotations_ +inline void XM_CALLCONV XMStoreFloat3x3 +( + XMFLOAT3X3* pDestination, + FXMMATRIX M +) noexcept +{ + assert(pDestination); +#if defined(_XM_NO_INTRINSICS_) + + pDestination->m[0][0] = M.r[0].vector4_f32[0]; + pDestination->m[0][1] = M.r[0].vector4_f32[1]; + pDestination->m[0][2] = M.r[0].vector4_f32[2]; + + pDestination->m[1][0] = M.r[1].vector4_f32[0]; + pDestination->m[1][1] = M.r[1].vector4_f32[1]; + pDestination->m[1][2] = M.r[1].vector4_f32[2]; + + pDestination->m[2][0] = M.r[2].vector4_f32[0]; + pDestination->m[2][1] = M.r[2].vector4_f32[1]; + pDestination->m[2][2] = M.r[2].vector4_f32[2]; + +#elif defined(_XM_ARM_NEON_INTRINSICS_) + float32x4_t T1 = vextq_f32(M.r[0], M.r[1], 1); + float32x4_t T2 = vbslq_f32(g_XMMask3, M.r[0], T1); + vst1q_f32(&pDestination->m[0][0], T2); + + T1 = vextq_f32(M.r[1], M.r[1], 1); + T2 = vcombine_f32(vget_low_f32(T1), vget_low_f32(M.r[2])); + vst1q_f32(&pDestination->m[1][1], T2); + + vst1q_lane_f32(&pDestination->m[2][2], M.r[2], 2); +#elif defined(_XM_SSE_INTRINSICS_) + XMVECTOR vTemp1 = M.r[0]; + XMVECTOR vTemp2 = M.r[1]; + XMVECTOR vTemp3 = M.r[2]; + XMVECTOR vWork = _mm_shuffle_ps(vTemp1, vTemp2, _MM_SHUFFLE(0, 0, 2, 2)); + vTemp1 = _mm_shuffle_ps(vTemp1, vWork, _MM_SHUFFLE(2, 0, 1, 0)); + _mm_storeu_ps(&pDestination->m[0][0], vTemp1); + vTemp2 = _mm_shuffle_ps(vTemp2, vTemp3, _MM_SHUFFLE(1, 0, 2, 1)); + _mm_storeu_ps(&pDestination->m[1][1], vTemp2); + vTemp3 = XM_PERMUTE_PS(vTemp3, _MM_SHUFFLE(2, 2, 2, 2)); + _mm_store_ss(&pDestination->m[2][2], vTemp3); +#endif +} + +//------------------------------------------------------------------------------ +_Use_decl_annotations_ +inline void XM_CALLCONV XMStoreFloat4x3 +( + XMFLOAT4X3* pDestination, + FXMMATRIX M +) noexcept +{ + assert(pDestination); +#if defined(_XM_NO_INTRINSICS_) + + pDestination->m[0][0] = M.r[0].vector4_f32[0]; + pDestination->m[0][1] = M.r[0].vector4_f32[1]; + pDestination->m[0][2] = M.r[0].vector4_f32[2]; + + pDestination->m[1][0] = M.r[1].vector4_f32[0]; + pDestination->m[1][1] = M.r[1].vector4_f32[1]; + pDestination->m[1][2] = M.r[1].vector4_f32[2]; + + pDestination->m[2][0] = M.r[2].vector4_f32[0]; + pDestination->m[2][1] = M.r[2].vector4_f32[1]; + pDestination->m[2][2] = M.r[2].vector4_f32[2]; + + pDestination->m[3][0] = M.r[3].vector4_f32[0]; + pDestination->m[3][1] = M.r[3].vector4_f32[1]; + pDestination->m[3][2] = M.r[3].vector4_f32[2]; + +#elif defined(_XM_ARM_NEON_INTRINSICS_) + float32x4_t T1 = vextq_f32(M.r[0], M.r[1], 1); + float32x4_t T2 = vbslq_f32(g_XMMask3, M.r[0], T1); + vst1q_f32(&pDestination->m[0][0], T2); + + T1 = vextq_f32(M.r[1], M.r[1], 1); + T2 = vcombine_f32(vget_low_f32(T1), vget_low_f32(M.r[2])); + vst1q_f32(&pDestination->m[1][1], T2); + + T1 = vdupq_lane_f32(vget_high_f32(M.r[2]), 0); + T2 = vextq_f32(T1, M.r[3], 3); + vst1q_f32(&pDestination->m[2][2], T2); +#elif defined(_XM_SSE_INTRINSICS_) + XMVECTOR vTemp1 = M.r[0]; + XMVECTOR vTemp2 = M.r[1]; + XMVECTOR vTemp3 = M.r[2]; + XMVECTOR vTemp4 = M.r[3]; + XMVECTOR vTemp2x = _mm_shuffle_ps(vTemp2, vTemp3, _MM_SHUFFLE(1, 0, 2, 1)); + vTemp2 = _mm_shuffle_ps(vTemp2, vTemp1, _MM_SHUFFLE(2, 2, 0, 0)); + vTemp1 = _mm_shuffle_ps(vTemp1, vTemp2, _MM_SHUFFLE(0, 2, 1, 0)); + vTemp3 = _mm_shuffle_ps(vTemp3, vTemp4, _MM_SHUFFLE(0, 0, 2, 2)); + vTemp3 = _mm_shuffle_ps(vTemp3, vTemp4, _MM_SHUFFLE(2, 1, 2, 0)); + _mm_storeu_ps(&pDestination->m[0][0], vTemp1); + _mm_storeu_ps(&pDestination->m[1][1], vTemp2x); + _mm_storeu_ps(&pDestination->m[2][2], vTemp3); +#endif +} + +//------------------------------------------------------------------------------ +_Use_decl_annotations_ +inline void XM_CALLCONV XMStoreFloat4x3A +( + XMFLOAT4X3A* pDestination, + FXMMATRIX M +) noexcept +{ + assert(pDestination); + assert((reinterpret_cast(pDestination) & 0xF) == 0); +#if defined(_XM_NO_INTRINSICS_) + + pDestination->m[0][0] = M.r[0].vector4_f32[0]; + pDestination->m[0][1] = M.r[0].vector4_f32[1]; + pDestination->m[0][2] = M.r[0].vector4_f32[2]; + + pDestination->m[1][0] = M.r[1].vector4_f32[0]; + pDestination->m[1][1] = M.r[1].vector4_f32[1]; + pDestination->m[1][2] = M.r[1].vector4_f32[2]; + + pDestination->m[2][0] = M.r[2].vector4_f32[0]; + pDestination->m[2][1] = M.r[2].vector4_f32[1]; + pDestination->m[2][2] = M.r[2].vector4_f32[2]; + + pDestination->m[3][0] = M.r[3].vector4_f32[0]; + pDestination->m[3][1] = M.r[3].vector4_f32[1]; + pDestination->m[3][2] = M.r[3].vector4_f32[2]; + +#elif defined(_XM_ARM_NEON_INTRINSICS_) +#ifdef _MSC_VER + float32x4_t T1 = vextq_f32(M.r[0], M.r[1], 1); + float32x4_t T2 = vbslq_f32(g_XMMask3, M.r[0], T1); + vst1q_f32_ex(&pDestination->m[0][0], T2, 128); + + T1 = vextq_f32(M.r[1], M.r[1], 1); + T2 = vcombine_f32(vget_low_f32(T1), vget_low_f32(M.r[2])); + vst1q_f32_ex(&pDestination->m[1][1], T2, 128); + + T1 = vdupq_lane_f32(vget_high_f32(M.r[2]), 0); + T2 = vextq_f32(T1, M.r[3], 3); + vst1q_f32_ex(&pDestination->m[2][2], T2, 128); +#else + float32x4_t T1 = vextq_f32(M.r[0], M.r[1], 1); + float32x4_t T2 = vbslq_f32(g_XMMask3, M.r[0], T1); + vst1q_f32(&pDestination->m[0][0], T2); + + T1 = vextq_f32(M.r[1], M.r[1], 1); + T2 = vcombine_f32(vget_low_f32(T1), vget_low_f32(M.r[2])); + vst1q_f32(&pDestination->m[1][1], T2); + + T1 = vdupq_lane_f32(vget_high_f32(M.r[2]), 0); + T2 = vextq_f32(T1, M.r[3], 3); + vst1q_f32(&pDestination->m[2][2], T2); +#endif +#elif defined(_XM_SSE_INTRINSICS_) + // x1,y1,z1,w1 + XMVECTOR vTemp1 = M.r[0]; + // x2,y2,z2,w2 + XMVECTOR vTemp2 = M.r[1]; + // x3,y3,z3,w3 + XMVECTOR vTemp3 = M.r[2]; + // x4,y4,z4,w4 + XMVECTOR vTemp4 = M.r[3]; + // z1,z1,x2,y2 + XMVECTOR vTemp = _mm_shuffle_ps(vTemp1, vTemp2, _MM_SHUFFLE(1, 0, 2, 2)); + // y2,z2,x3,y3 (Final) + vTemp2 = _mm_shuffle_ps(vTemp2, vTemp3, _MM_SHUFFLE(1, 0, 2, 1)); + // x1,y1,z1,x2 (Final) + vTemp1 = _mm_shuffle_ps(vTemp1, vTemp, _MM_SHUFFLE(2, 0, 1, 0)); + // z3,z3,x4,x4 + vTemp3 = _mm_shuffle_ps(vTemp3, vTemp4, _MM_SHUFFLE(0, 0, 2, 2)); + // z3,x4,y4,z4 (Final) + vTemp3 = _mm_shuffle_ps(vTemp3, vTemp4, _MM_SHUFFLE(2, 1, 2, 0)); + // Store in 3 operations + _mm_store_ps(&pDestination->m[0][0], vTemp1); + _mm_store_ps(&pDestination->m[1][1], vTemp2); + _mm_store_ps(&pDestination->m[2][2], vTemp3); +#endif +} + +//------------------------------------------------------------------------------ +_Use_decl_annotations_ +inline void XM_CALLCONV XMStoreFloat3x4 +( + XMFLOAT3X4* pDestination, + FXMMATRIX M +) noexcept +{ + assert(pDestination); +#if defined(_XM_NO_INTRINSICS_) + + pDestination->m[0][0] = M.r[0].vector4_f32[0]; + pDestination->m[0][1] = M.r[1].vector4_f32[0]; + pDestination->m[0][2] = M.r[2].vector4_f32[0]; + pDestination->m[0][3] = M.r[3].vector4_f32[0]; + + pDestination->m[1][0] = M.r[0].vector4_f32[1]; + pDestination->m[1][1] = M.r[1].vector4_f32[1]; + pDestination->m[1][2] = M.r[2].vector4_f32[1]; + pDestination->m[1][3] = M.r[3].vector4_f32[1]; + + pDestination->m[2][0] = M.r[0].vector4_f32[2]; + pDestination->m[2][1] = M.r[1].vector4_f32[2]; + pDestination->m[2][2] = M.r[2].vector4_f32[2]; + pDestination->m[2][3] = M.r[3].vector4_f32[2]; + +#elif defined(_XM_ARM_NEON_INTRINSICS_) + float32x4x2_t P0 = vzipq_f32(M.r[0], M.r[2]); + float32x4x2_t P1 = vzipq_f32(M.r[1], M.r[3]); + + float32x4x2_t T0 = vzipq_f32(P0.val[0], P1.val[0]); + float32x4x2_t T1 = vzipq_f32(P0.val[1], P1.val[1]); + + vst1q_f32(&pDestination->m[0][0], T0.val[0]); + vst1q_f32(&pDestination->m[1][0], T0.val[1]); + vst1q_f32(&pDestination->m[2][0], T1.val[0]); +#elif defined(_XM_SSE_INTRINSICS_) + // x.x,x.y,y.x,y.y + XMVECTOR vTemp1 = _mm_shuffle_ps(M.r[0], M.r[1], _MM_SHUFFLE(1, 0, 1, 0)); + // x.z,x.w,y.z,y.w + XMVECTOR vTemp3 = _mm_shuffle_ps(M.r[0], M.r[1], _MM_SHUFFLE(3, 2, 3, 2)); + // z.x,z.y,w.x,w.y + XMVECTOR vTemp2 = _mm_shuffle_ps(M.r[2], M.r[3], _MM_SHUFFLE(1, 0, 1, 0)); + // z.z,z.w,w.z,w.w + XMVECTOR vTemp4 = _mm_shuffle_ps(M.r[2], M.r[3], _MM_SHUFFLE(3, 2, 3, 2)); + + // x.x,y.x,z.x,w.x + XMVECTOR r0 = _mm_shuffle_ps(vTemp1, vTemp2, _MM_SHUFFLE(2, 0, 2, 0)); + // x.y,y.y,z.y,w.y + XMVECTOR r1 = _mm_shuffle_ps(vTemp1, vTemp2, _MM_SHUFFLE(3, 1, 3, 1)); + // x.z,y.z,z.z,w.z + XMVECTOR r2 = _mm_shuffle_ps(vTemp3, vTemp4, _MM_SHUFFLE(2, 0, 2, 0)); + + _mm_storeu_ps(&pDestination->m[0][0], r0); + _mm_storeu_ps(&pDestination->m[1][0], r1); + _mm_storeu_ps(&pDestination->m[2][0], r2); +#endif +} + +//------------------------------------------------------------------------------ +_Use_decl_annotations_ +inline void XM_CALLCONV XMStoreFloat3x4A +( + XMFLOAT3X4A* pDestination, + FXMMATRIX M +) noexcept +{ + assert(pDestination); + assert((reinterpret_cast(pDestination) & 0xF) == 0); +#if defined(_XM_NO_INTRINSICS_) + + pDestination->m[0][0] = M.r[0].vector4_f32[0]; + pDestination->m[0][1] = M.r[1].vector4_f32[0]; + pDestination->m[0][2] = M.r[2].vector4_f32[0]; + pDestination->m[0][3] = M.r[3].vector4_f32[0]; + + pDestination->m[1][0] = M.r[0].vector4_f32[1]; + pDestination->m[1][1] = M.r[1].vector4_f32[1]; + pDestination->m[1][2] = M.r[2].vector4_f32[1]; + pDestination->m[1][3] = M.r[3].vector4_f32[1]; + + pDestination->m[2][0] = M.r[0].vector4_f32[2]; + pDestination->m[2][1] = M.r[1].vector4_f32[2]; + pDestination->m[2][2] = M.r[2].vector4_f32[2]; + pDestination->m[2][3] = M.r[3].vector4_f32[2]; + +#elif defined(_XM_ARM_NEON_INTRINSICS_) + float32x4x2_t P0 = vzipq_f32(M.r[0], M.r[2]); + float32x4x2_t P1 = vzipq_f32(M.r[1], M.r[3]); + + float32x4x2_t T0 = vzipq_f32(P0.val[0], P1.val[0]); + float32x4x2_t T1 = vzipq_f32(P0.val[1], P1.val[1]); + +#ifdef _MSC_VER + vst1q_f32_ex(&pDestination->m[0][0], T0.val[0], 128); + vst1q_f32_ex(&pDestination->m[1][0], T0.val[1], 128); + vst1q_f32_ex(&pDestination->m[2][0], T1.val[0], 128); +#else + vst1q_f32(&pDestination->m[0][0], T0.val[0]); + vst1q_f32(&pDestination->m[1][0], T0.val[1]); + vst1q_f32(&pDestination->m[2][0], T1.val[0]); +#endif +#elif defined(_XM_SSE_INTRINSICS_) + // x.x,x.y,y.x,y.y + XMVECTOR vTemp1 = _mm_shuffle_ps(M.r[0], M.r[1], _MM_SHUFFLE(1, 0, 1, 0)); + // x.z,x.w,y.z,y.w + XMVECTOR vTemp3 = _mm_shuffle_ps(M.r[0], M.r[1], _MM_SHUFFLE(3, 2, 3, 2)); + // z.x,z.y,w.x,w.y + XMVECTOR vTemp2 = _mm_shuffle_ps(M.r[2], M.r[3], _MM_SHUFFLE(1, 0, 1, 0)); + // z.z,z.w,w.z,w.w + XMVECTOR vTemp4 = _mm_shuffle_ps(M.r[2], M.r[3], _MM_SHUFFLE(3, 2, 3, 2)); + + // x.x,y.x,z.x,w.x + XMVECTOR r0 = _mm_shuffle_ps(vTemp1, vTemp2, _MM_SHUFFLE(2, 0, 2, 0)); + // x.y,y.y,z.y,w.y + XMVECTOR r1 = _mm_shuffle_ps(vTemp1, vTemp2, _MM_SHUFFLE(3, 1, 3, 1)); + // x.z,y.z,z.z,w.z + XMVECTOR r2 = _mm_shuffle_ps(vTemp3, vTemp4, _MM_SHUFFLE(2, 0, 2, 0)); + + _mm_store_ps(&pDestination->m[0][0], r0); + _mm_store_ps(&pDestination->m[1][0], r1); + _mm_store_ps(&pDestination->m[2][0], r2); +#endif +} + +//------------------------------------------------------------------------------ +_Use_decl_annotations_ +inline void XM_CALLCONV XMStoreFloat4x4 +( + XMFLOAT4X4* pDestination, + FXMMATRIX M +) noexcept +{ + assert(pDestination); +#if defined(_XM_NO_INTRINSICS_) + + pDestination->m[0][0] = M.r[0].vector4_f32[0]; + pDestination->m[0][1] = M.r[0].vector4_f32[1]; + pDestination->m[0][2] = M.r[0].vector4_f32[2]; + pDestination->m[0][3] = M.r[0].vector4_f32[3]; + + pDestination->m[1][0] = M.r[1].vector4_f32[0]; + pDestination->m[1][1] = M.r[1].vector4_f32[1]; + pDestination->m[1][2] = M.r[1].vector4_f32[2]; + pDestination->m[1][3] = M.r[1].vector4_f32[3]; + + pDestination->m[2][0] = M.r[2].vector4_f32[0]; + pDestination->m[2][1] = M.r[2].vector4_f32[1]; + pDestination->m[2][2] = M.r[2].vector4_f32[2]; + pDestination->m[2][3] = M.r[2].vector4_f32[3]; + + pDestination->m[3][0] = M.r[3].vector4_f32[0]; + pDestination->m[3][1] = M.r[3].vector4_f32[1]; + pDestination->m[3][2] = M.r[3].vector4_f32[2]; + pDestination->m[3][3] = M.r[3].vector4_f32[3]; + +#elif defined(_XM_ARM_NEON_INTRINSICS_) + vst1q_f32(reinterpret_cast(&pDestination->_11), M.r[0]); + vst1q_f32(reinterpret_cast(&pDestination->_21), M.r[1]); + vst1q_f32(reinterpret_cast(&pDestination->_31), M.r[2]); + vst1q_f32(reinterpret_cast(&pDestination->_41), M.r[3]); +#elif defined(_XM_SSE_INTRINSICS_) + _mm_storeu_ps(&pDestination->_11, M.r[0]); + _mm_storeu_ps(&pDestination->_21, M.r[1]); + _mm_storeu_ps(&pDestination->_31, M.r[2]); + _mm_storeu_ps(&pDestination->_41, M.r[3]); +#endif +} + +//------------------------------------------------------------------------------ +_Use_decl_annotations_ +inline void XM_CALLCONV XMStoreFloat4x4A +( + XMFLOAT4X4A* pDestination, + FXMMATRIX M +) noexcept +{ + assert(pDestination); + assert((reinterpret_cast(pDestination) & 0xF) == 0); +#if defined(_XM_NO_INTRINSICS_) + + pDestination->m[0][0] = M.r[0].vector4_f32[0]; + pDestination->m[0][1] = M.r[0].vector4_f32[1]; + pDestination->m[0][2] = M.r[0].vector4_f32[2]; + pDestination->m[0][3] = M.r[0].vector4_f32[3]; + + pDestination->m[1][0] = M.r[1].vector4_f32[0]; + pDestination->m[1][1] = M.r[1].vector4_f32[1]; + pDestination->m[1][2] = M.r[1].vector4_f32[2]; + pDestination->m[1][3] = M.r[1].vector4_f32[3]; + + pDestination->m[2][0] = M.r[2].vector4_f32[0]; + pDestination->m[2][1] = M.r[2].vector4_f32[1]; + pDestination->m[2][2] = M.r[2].vector4_f32[2]; + pDestination->m[2][3] = M.r[2].vector4_f32[3]; + + pDestination->m[3][0] = M.r[3].vector4_f32[0]; + pDestination->m[3][1] = M.r[3].vector4_f32[1]; + pDestination->m[3][2] = M.r[3].vector4_f32[2]; + pDestination->m[3][3] = M.r[3].vector4_f32[3]; + +#elif defined(_XM_ARM_NEON_INTRINSICS_) +#ifdef _MSC_VER + vst1q_f32_ex(reinterpret_cast(&pDestination->_11), M.r[0], 128); + vst1q_f32_ex(reinterpret_cast(&pDestination->_21), M.r[1], 128); + vst1q_f32_ex(reinterpret_cast(&pDestination->_31), M.r[2], 128); + vst1q_f32_ex(reinterpret_cast(&pDestination->_41), M.r[3], 128); +#else + vst1q_f32(reinterpret_cast(&pDestination->_11), M.r[0]); + vst1q_f32(reinterpret_cast(&pDestination->_21), M.r[1]); + vst1q_f32(reinterpret_cast(&pDestination->_31), M.r[2]); + vst1q_f32(reinterpret_cast(&pDestination->_41), M.r[3]); +#endif +#elif defined(_XM_SSE_INTRINSICS_) + _mm_store_ps(&pDestination->_11, M.r[0]); + _mm_store_ps(&pDestination->_21, M.r[1]); + _mm_store_ps(&pDestination->_31, M.r[2]); + _mm_store_ps(&pDestination->_41, M.r[3]); +#endif +} + diff --git a/include/directxmath/directxmathmatrix.inl b/include/directxmath/directxmathmatrix.inl new file mode 100644 index 0000000..606a5c6 --- /dev/null +++ b/include/directxmath/directxmathmatrix.inl @@ -0,0 +1,3413 @@ +//------------------------------------------------------------------------------------- +// DirectXMathMatrix.inl -- SIMD C++ Math library +// +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. +// +// http://go.microsoft.com/fwlink/?LinkID=615560 +//------------------------------------------------------------------------------------- + +#pragma once + +/**************************************************************************** + * + * Matrix + * + ****************************************************************************/ + + //------------------------------------------------------------------------------ + // Comparison operations + //------------------------------------------------------------------------------ + + //------------------------------------------------------------------------------ + +#if !defined(_XM_NO_INTRINSICS_) && defined(_MSC_VER) && !defined(__clang__) && !defined(__INTEL_COMPILER) +#pragma float_control(push) +#pragma float_control(precise, on) +#endif + +// Return true if any entry in the matrix is NaN +inline bool XM_CALLCONV XMMatrixIsNaN(FXMMATRIX M) noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + size_t i = 16; + auto pWork = reinterpret_cast(&M.m[0][0]); + do { + // Fetch value into integer unit + uint32_t uTest = pWork[0]; + // Remove sign + uTest &= 0x7FFFFFFFU; + // NaN is 0x7F800001 through 0x7FFFFFFF inclusive + uTest -= 0x7F800001U; + if (uTest < 0x007FFFFFU) + { + break; // NaN found + } + ++pWork; // Next entry + } while (--i); + return (i != 0); // i == 0 if nothing matched +#elif defined(_XM_ARM_NEON_INTRINSICS_) + // Load in registers + XMVECTOR vX = M.r[0]; + XMVECTOR vY = M.r[1]; + XMVECTOR vZ = M.r[2]; + XMVECTOR vW = M.r[3]; + // Test themselves to check for NaN + vX = vmvnq_u32(vceqq_f32(vX, vX)); + vY = vmvnq_u32(vceqq_f32(vY, vY)); + vZ = vmvnq_u32(vceqq_f32(vZ, vZ)); + vW = vmvnq_u32(vceqq_f32(vW, vW)); + // Or all the results + vX = vorrq_u32(vX, vZ); + vY = vorrq_u32(vY, vW); + vX = vorrq_u32(vX, vY); + // If any tested true, return true + uint8x8x2_t vTemp = vzip_u8(vget_low_u8(vX), vget_high_u8(vX)); + uint16x4x2_t vTemp2 = vzip_u16(vTemp.val[0], vTemp.val[1]); + uint32_t r = vget_lane_u32(vTemp2.val[1], 1); + return (r != 0); +#elif defined(_XM_SSE_INTRINSICS_) + // Load in registers + XMVECTOR vX = M.r[0]; + XMVECTOR vY = M.r[1]; + XMVECTOR vZ = M.r[2]; + XMVECTOR vW = M.r[3]; + // Test themselves to check for NaN + vX = _mm_cmpneq_ps(vX, vX); + vY = _mm_cmpneq_ps(vY, vY); + vZ = _mm_cmpneq_ps(vZ, vZ); + vW = _mm_cmpneq_ps(vW, vW); + // Or all the results + vX = _mm_or_ps(vX, vZ); + vY = _mm_or_ps(vY, vW); + vX = _mm_or_ps(vX, vY); + // If any tested true, return true + return (_mm_movemask_ps(vX) != 0); +#else +#endif +} + +#if !defined(_XM_NO_INTRINSICS_) && defined(_MSC_VER) && !defined(__clang__) && !defined(__INTEL_COMPILER) +#pragma float_control(pop) +#endif + +//------------------------------------------------------------------------------ + +// Return true if any entry in the matrix is +/-INF +inline bool XM_CALLCONV XMMatrixIsInfinite(FXMMATRIX M) noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + size_t i = 16; + auto pWork = reinterpret_cast(&M.m[0][0]); + do { + // Fetch value into integer unit + uint32_t uTest = pWork[0]; + // Remove sign + uTest &= 0x7FFFFFFFU; + // INF is 0x7F800000 + if (uTest == 0x7F800000U) + { + break; // INF found + } + ++pWork; // Next entry + } while (--i); + return (i != 0); // i == 0 if nothing matched +#elif defined(_XM_ARM_NEON_INTRINSICS_) + // Mask off the sign bits + XMVECTOR vTemp1 = vandq_u32(M.r[0], g_XMAbsMask); + XMVECTOR vTemp2 = vandq_u32(M.r[1], g_XMAbsMask); + XMVECTOR vTemp3 = vandq_u32(M.r[2], g_XMAbsMask); + XMVECTOR vTemp4 = vandq_u32(M.r[3], g_XMAbsMask); + // Compare to infinity + vTemp1 = vceqq_f32(vTemp1, g_XMInfinity); + vTemp2 = vceqq_f32(vTemp2, g_XMInfinity); + vTemp3 = vceqq_f32(vTemp3, g_XMInfinity); + vTemp4 = vceqq_f32(vTemp4, g_XMInfinity); + // Or the answers together + vTemp1 = vorrq_u32(vTemp1, vTemp2); + vTemp3 = vorrq_u32(vTemp3, vTemp4); + vTemp1 = vorrq_u32(vTemp1, vTemp3); + // If any are infinity, the signs are true. + uint8x8x2_t vTemp = vzip_u8(vget_low_u8(vTemp1), vget_high_u8(vTemp1)); + uint16x4x2_t vTemp5 = vzip_u16(vTemp.val[0], vTemp.val[1]); + uint32_t r = vget_lane_u32(vTemp5.val[1], 1); + return (r != 0); +#elif defined(_XM_SSE_INTRINSICS_) + // Mask off the sign bits + XMVECTOR vTemp1 = _mm_and_ps(M.r[0], g_XMAbsMask); + XMVECTOR vTemp2 = _mm_and_ps(M.r[1], g_XMAbsMask); + XMVECTOR vTemp3 = _mm_and_ps(M.r[2], g_XMAbsMask); + XMVECTOR vTemp4 = _mm_and_ps(M.r[3], g_XMAbsMask); + // Compare to infinity + vTemp1 = _mm_cmpeq_ps(vTemp1, g_XMInfinity); + vTemp2 = _mm_cmpeq_ps(vTemp2, g_XMInfinity); + vTemp3 = _mm_cmpeq_ps(vTemp3, g_XMInfinity); + vTemp4 = _mm_cmpeq_ps(vTemp4, g_XMInfinity); + // Or the answers together + vTemp1 = _mm_or_ps(vTemp1, vTemp2); + vTemp3 = _mm_or_ps(vTemp3, vTemp4); + vTemp1 = _mm_or_ps(vTemp1, vTemp3); + // If any are infinity, the signs are true. + return (_mm_movemask_ps(vTemp1) != 0); +#endif +} + +//------------------------------------------------------------------------------ + +// Return true if the XMMatrix is equal to identity +inline bool XM_CALLCONV XMMatrixIsIdentity(FXMMATRIX M) noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + // Use the integer pipeline to reduce branching to a minimum + auto pWork = reinterpret_cast(&M.m[0][0]); + // Convert 1.0f to zero and or them together + uint32_t uOne = pWork[0] ^ 0x3F800000U; + // Or all the 0.0f entries together + uint32_t uZero = pWork[1]; + uZero |= pWork[2]; + uZero |= pWork[3]; + // 2nd row + uZero |= pWork[4]; + uOne |= pWork[5] ^ 0x3F800000U; + uZero |= pWork[6]; + uZero |= pWork[7]; + // 3rd row + uZero |= pWork[8]; + uZero |= pWork[9]; + uOne |= pWork[10] ^ 0x3F800000U; + uZero |= pWork[11]; + // 4th row + uZero |= pWork[12]; + uZero |= pWork[13]; + uZero |= pWork[14]; + uOne |= pWork[15] ^ 0x3F800000U; + // If all zero entries are zero, the uZero==0 + uZero &= 0x7FFFFFFF; // Allow -0.0f + // If all 1.0f entries are 1.0f, then uOne==0 + uOne |= uZero; + return (uOne == 0); +#elif defined(_XM_ARM_NEON_INTRINSICS_) + XMVECTOR vTemp1 = vceqq_f32(M.r[0], g_XMIdentityR0); + XMVECTOR vTemp2 = vceqq_f32(M.r[1], g_XMIdentityR1); + XMVECTOR vTemp3 = vceqq_f32(M.r[2], g_XMIdentityR2); + XMVECTOR vTemp4 = vceqq_f32(M.r[3], g_XMIdentityR3); + vTemp1 = vandq_u32(vTemp1, vTemp2); + vTemp3 = vandq_u32(vTemp3, vTemp4); + vTemp1 = vandq_u32(vTemp1, vTemp3); + uint8x8x2_t vTemp = vzip_u8(vget_low_u8(vTemp1), vget_high_u8(vTemp1)); + uint16x4x2_t vTemp5 = vzip_u16(vTemp.val[0], vTemp.val[1]); + uint32_t r = vget_lane_u32(vTemp5.val[1], 1); + return (r == 0xFFFFFFFFU); +#elif defined(_XM_SSE_INTRINSICS_) + XMVECTOR vTemp1 = _mm_cmpeq_ps(M.r[0], g_XMIdentityR0); + XMVECTOR vTemp2 = _mm_cmpeq_ps(M.r[1], g_XMIdentityR1); + XMVECTOR vTemp3 = _mm_cmpeq_ps(M.r[2], g_XMIdentityR2); + XMVECTOR vTemp4 = _mm_cmpeq_ps(M.r[3], g_XMIdentityR3); + vTemp1 = _mm_and_ps(vTemp1, vTemp2); + vTemp3 = _mm_and_ps(vTemp3, vTemp4); + vTemp1 = _mm_and_ps(vTemp1, vTemp3); + return (_mm_movemask_ps(vTemp1) == 0x0f); +#endif +} + +//------------------------------------------------------------------------------ +// Computation operations +//------------------------------------------------------------------------------ + +//------------------------------------------------------------------------------ +// Perform a 4x4 matrix multiply by a 4x4 matrix +inline XMMATRIX XM_CALLCONV XMMatrixMultiply +( + FXMMATRIX M1, + CXMMATRIX M2 +) noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + XMMATRIX mResult; + // Cache the invariants in registers + float x = M1.m[0][0]; + float y = M1.m[0][1]; + float z = M1.m[0][2]; + float w = M1.m[0][3]; + // Perform the operation on the first row + mResult.m[0][0] = (M2.m[0][0] * x) + (M2.m[1][0] * y) + (M2.m[2][0] * z) + (M2.m[3][0] * w); + mResult.m[0][1] = (M2.m[0][1] * x) + (M2.m[1][1] * y) + (M2.m[2][1] * z) + (M2.m[3][1] * w); + mResult.m[0][2] = (M2.m[0][2] * x) + (M2.m[1][2] * y) + (M2.m[2][2] * z) + (M2.m[3][2] * w); + mResult.m[0][3] = (M2.m[0][3] * x) + (M2.m[1][3] * y) + (M2.m[2][3] * z) + (M2.m[3][3] * w); + // Repeat for all the other rows + x = M1.m[1][0]; + y = M1.m[1][1]; + z = M1.m[1][2]; + w = M1.m[1][3]; + mResult.m[1][0] = (M2.m[0][0] * x) + (M2.m[1][0] * y) + (M2.m[2][0] * z) + (M2.m[3][0] * w); + mResult.m[1][1] = (M2.m[0][1] * x) + (M2.m[1][1] * y) + (M2.m[2][1] * z) + (M2.m[3][1] * w); + mResult.m[1][2] = (M2.m[0][2] * x) + (M2.m[1][2] * y) + (M2.m[2][2] * z) + (M2.m[3][2] * w); + mResult.m[1][3] = (M2.m[0][3] * x) + (M2.m[1][3] * y) + (M2.m[2][3] * z) + (M2.m[3][3] * w); + x = M1.m[2][0]; + y = M1.m[2][1]; + z = M1.m[2][2]; + w = M1.m[2][3]; + mResult.m[2][0] = (M2.m[0][0] * x) + (M2.m[1][0] * y) + (M2.m[2][0] * z) + (M2.m[3][0] * w); + mResult.m[2][1] = (M2.m[0][1] * x) + (M2.m[1][1] * y) + (M2.m[2][1] * z) + (M2.m[3][1] * w); + mResult.m[2][2] = (M2.m[0][2] * x) + (M2.m[1][2] * y) + (M2.m[2][2] * z) + (M2.m[3][2] * w); + mResult.m[2][3] = (M2.m[0][3] * x) + (M2.m[1][3] * y) + (M2.m[2][3] * z) + (M2.m[3][3] * w); + x = M1.m[3][0]; + y = M1.m[3][1]; + z = M1.m[3][2]; + w = M1.m[3][3]; + mResult.m[3][0] = (M2.m[0][0] * x) + (M2.m[1][0] * y) + (M2.m[2][0] * z) + (M2.m[3][0] * w); + mResult.m[3][1] = (M2.m[0][1] * x) + (M2.m[1][1] * y) + (M2.m[2][1] * z) + (M2.m[3][1] * w); + mResult.m[3][2] = (M2.m[0][2] * x) + (M2.m[1][2] * y) + (M2.m[2][2] * z) + (M2.m[3][2] * w); + mResult.m[3][3] = (M2.m[0][3] * x) + (M2.m[1][3] * y) + (M2.m[2][3] * z) + (M2.m[3][3] * w); + return mResult; +#elif defined(_XM_ARM_NEON_INTRINSICS_) + XMMATRIX mResult; + float32x2_t VL = vget_low_f32(M1.r[0]); + float32x2_t VH = vget_high_f32(M1.r[0]); + // Perform the operation on the first row + XMVECTOR vX = vmulq_lane_f32(M2.r[0], VL, 0); + XMVECTOR vY = vmulq_lane_f32(M2.r[1], VL, 1); + XMVECTOR vZ = vmlaq_lane_f32(vX, M2.r[2], VH, 0); + XMVECTOR vW = vmlaq_lane_f32(vY, M2.r[3], VH, 1); + mResult.r[0] = vaddq_f32(vZ, vW); + // Repeat for the other 3 rows + VL = vget_low_f32(M1.r[1]); + VH = vget_high_f32(M1.r[1]); + vX = vmulq_lane_f32(M2.r[0], VL, 0); + vY = vmulq_lane_f32(M2.r[1], VL, 1); + vZ = vmlaq_lane_f32(vX, M2.r[2], VH, 0); + vW = vmlaq_lane_f32(vY, M2.r[3], VH, 1); + mResult.r[1] = vaddq_f32(vZ, vW); + VL = vget_low_f32(M1.r[2]); + VH = vget_high_f32(M1.r[2]); + vX = vmulq_lane_f32(M2.r[0], VL, 0); + vY = vmulq_lane_f32(M2.r[1], VL, 1); + vZ = vmlaq_lane_f32(vX, M2.r[2], VH, 0); + vW = vmlaq_lane_f32(vY, M2.r[3], VH, 1); + mResult.r[2] = vaddq_f32(vZ, vW); + VL = vget_low_f32(M1.r[3]); + VH = vget_high_f32(M1.r[3]); + vX = vmulq_lane_f32(M2.r[0], VL, 0); + vY = vmulq_lane_f32(M2.r[1], VL, 1); + vZ = vmlaq_lane_f32(vX, M2.r[2], VH, 0); + vW = vmlaq_lane_f32(vY, M2.r[3], VH, 1); + mResult.r[3] = vaddq_f32(vZ, vW); + return mResult; +#elif defined(_XM_AVX2_INTRINSICS_) + __m256 t0 = _mm256_castps128_ps256(M1.r[0]); + t0 = _mm256_insertf128_ps(t0, M1.r[1], 1); + __m256 t1 = _mm256_castps128_ps256(M1.r[2]); + t1 = _mm256_insertf128_ps(t1, M1.r[3], 1); + + __m256 u0 = _mm256_castps128_ps256(M2.r[0]); + u0 = _mm256_insertf128_ps(u0, M2.r[1], 1); + __m256 u1 = _mm256_castps128_ps256(M2.r[2]); + u1 = _mm256_insertf128_ps(u1, M2.r[3], 1); + + __m256 a0 = _mm256_shuffle_ps(t0, t0, _MM_SHUFFLE(0, 0, 0, 0)); + __m256 a1 = _mm256_shuffle_ps(t1, t1, _MM_SHUFFLE(0, 0, 0, 0)); + __m256 b0 = _mm256_permute2f128_ps(u0, u0, 0x00); + __m256 c0 = _mm256_mul_ps(a0, b0); + __m256 c1 = _mm256_mul_ps(a1, b0); + + a0 = _mm256_shuffle_ps(t0, t0, _MM_SHUFFLE(1, 1, 1, 1)); + a1 = _mm256_shuffle_ps(t1, t1, _MM_SHUFFLE(1, 1, 1, 1)); + b0 = _mm256_permute2f128_ps(u0, u0, 0x11); + __m256 c2 = _mm256_fmadd_ps(a0, b0, c0); + __m256 c3 = _mm256_fmadd_ps(a1, b0, c1); + + a0 = _mm256_shuffle_ps(t0, t0, _MM_SHUFFLE(2, 2, 2, 2)); + a1 = _mm256_shuffle_ps(t1, t1, _MM_SHUFFLE(2, 2, 2, 2)); + __m256 b1 = _mm256_permute2f128_ps(u1, u1, 0x00); + __m256 c4 = _mm256_mul_ps(a0, b1); + __m256 c5 = _mm256_mul_ps(a1, b1); + + a0 = _mm256_shuffle_ps(t0, t0, _MM_SHUFFLE(3, 3, 3, 3)); + a1 = _mm256_shuffle_ps(t1, t1, _MM_SHUFFLE(3, 3, 3, 3)); + b1 = _mm256_permute2f128_ps(u1, u1, 0x11); + __m256 c6 = _mm256_fmadd_ps(a0, b1, c4); + __m256 c7 = _mm256_fmadd_ps(a1, b1, c5); + + t0 = _mm256_add_ps(c2, c6); + t1 = _mm256_add_ps(c3, c7); + + XMMATRIX mResult; + mResult.r[0] = _mm256_castps256_ps128(t0); + mResult.r[1] = _mm256_extractf128_ps(t0, 1); + mResult.r[2] = _mm256_castps256_ps128(t1); + mResult.r[3] = _mm256_extractf128_ps(t1, 1); + return mResult; +#elif defined(_XM_SSE_INTRINSICS_) + XMMATRIX mResult; + // Splat the component X,Y,Z then W +#if defined(_XM_AVX_INTRINSICS_) + XMVECTOR vX = _mm_broadcast_ss(reinterpret_cast(&M1.r[0]) + 0); + XMVECTOR vY = _mm_broadcast_ss(reinterpret_cast(&M1.r[0]) + 1); + XMVECTOR vZ = _mm_broadcast_ss(reinterpret_cast(&M1.r[0]) + 2); + XMVECTOR vW = _mm_broadcast_ss(reinterpret_cast(&M1.r[0]) + 3); +#else + // Use vW to hold the original row + XMVECTOR vW = M1.r[0]; + XMVECTOR vX = XM_PERMUTE_PS(vW, _MM_SHUFFLE(0, 0, 0, 0)); + XMVECTOR vY = XM_PERMUTE_PS(vW, _MM_SHUFFLE(1, 1, 1, 1)); + XMVECTOR vZ = XM_PERMUTE_PS(vW, _MM_SHUFFLE(2, 2, 2, 2)); + vW = XM_PERMUTE_PS(vW, _MM_SHUFFLE(3, 3, 3, 3)); +#endif + // Perform the operation on the first row + vX = _mm_mul_ps(vX, M2.r[0]); + vY = _mm_mul_ps(vY, M2.r[1]); + vZ = _mm_mul_ps(vZ, M2.r[2]); + vW = _mm_mul_ps(vW, M2.r[3]); + // Perform a binary add to reduce cumulative errors + vX = _mm_add_ps(vX, vZ); + vY = _mm_add_ps(vY, vW); + vX = _mm_add_ps(vX, vY); + mResult.r[0] = vX; + // Repeat for the other 3 rows +#if defined(_XM_AVX_INTRINSICS_) + vX = _mm_broadcast_ss(reinterpret_cast(&M1.r[1]) + 0); + vY = _mm_broadcast_ss(reinterpret_cast(&M1.r[1]) + 1); + vZ = _mm_broadcast_ss(reinterpret_cast(&M1.r[1]) + 2); + vW = _mm_broadcast_ss(reinterpret_cast(&M1.r[1]) + 3); +#else + vW = M1.r[1]; + vX = XM_PERMUTE_PS(vW, _MM_SHUFFLE(0, 0, 0, 0)); + vY = XM_PERMUTE_PS(vW, _MM_SHUFFLE(1, 1, 1, 1)); + vZ = XM_PERMUTE_PS(vW, _MM_SHUFFLE(2, 2, 2, 2)); + vW = XM_PERMUTE_PS(vW, _MM_SHUFFLE(3, 3, 3, 3)); +#endif + vX = _mm_mul_ps(vX, M2.r[0]); + vY = _mm_mul_ps(vY, M2.r[1]); + vZ = _mm_mul_ps(vZ, M2.r[2]); + vW = _mm_mul_ps(vW, M2.r[3]); + vX = _mm_add_ps(vX, vZ); + vY = _mm_add_ps(vY, vW); + vX = _mm_add_ps(vX, vY); + mResult.r[1] = vX; +#if defined(_XM_AVX_INTRINSICS_) + vX = _mm_broadcast_ss(reinterpret_cast(&M1.r[2]) + 0); + vY = _mm_broadcast_ss(reinterpret_cast(&M1.r[2]) + 1); + vZ = _mm_broadcast_ss(reinterpret_cast(&M1.r[2]) + 2); + vW = _mm_broadcast_ss(reinterpret_cast(&M1.r[2]) + 3); +#else + vW = M1.r[2]; + vX = XM_PERMUTE_PS(vW, _MM_SHUFFLE(0, 0, 0, 0)); + vY = XM_PERMUTE_PS(vW, _MM_SHUFFLE(1, 1, 1, 1)); + vZ = XM_PERMUTE_PS(vW, _MM_SHUFFLE(2, 2, 2, 2)); + vW = XM_PERMUTE_PS(vW, _MM_SHUFFLE(3, 3, 3, 3)); +#endif + vX = _mm_mul_ps(vX, M2.r[0]); + vY = _mm_mul_ps(vY, M2.r[1]); + vZ = _mm_mul_ps(vZ, M2.r[2]); + vW = _mm_mul_ps(vW, M2.r[3]); + vX = _mm_add_ps(vX, vZ); + vY = _mm_add_ps(vY, vW); + vX = _mm_add_ps(vX, vY); + mResult.r[2] = vX; +#if defined(_XM_AVX_INTRINSICS_) + vX = _mm_broadcast_ss(reinterpret_cast(&M1.r[3]) + 0); + vY = _mm_broadcast_ss(reinterpret_cast(&M1.r[3]) + 1); + vZ = _mm_broadcast_ss(reinterpret_cast(&M1.r[3]) + 2); + vW = _mm_broadcast_ss(reinterpret_cast(&M1.r[3]) + 3); +#else + vW = M1.r[3]; + vX = XM_PERMUTE_PS(vW, _MM_SHUFFLE(0, 0, 0, 0)); + vY = XM_PERMUTE_PS(vW, _MM_SHUFFLE(1, 1, 1, 1)); + vZ = XM_PERMUTE_PS(vW, _MM_SHUFFLE(2, 2, 2, 2)); + vW = XM_PERMUTE_PS(vW, _MM_SHUFFLE(3, 3, 3, 3)); +#endif + vX = _mm_mul_ps(vX, M2.r[0]); + vY = _mm_mul_ps(vY, M2.r[1]); + vZ = _mm_mul_ps(vZ, M2.r[2]); + vW = _mm_mul_ps(vW, M2.r[3]); + vX = _mm_add_ps(vX, vZ); + vY = _mm_add_ps(vY, vW); + vX = _mm_add_ps(vX, vY); + mResult.r[3] = vX; + return mResult; +#endif +} + +//------------------------------------------------------------------------------ + +inline XMMATRIX XM_CALLCONV XMMatrixMultiplyTranspose +( + FXMMATRIX M1, + CXMMATRIX M2 +) noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + XMMATRIX mResult; + // Cache the invariants in registers + float x = M2.m[0][0]; + float y = M2.m[1][0]; + float z = M2.m[2][0]; + float w = M2.m[3][0]; + // Perform the operation on the first row + mResult.m[0][0] = (M1.m[0][0] * x) + (M1.m[0][1] * y) + (M1.m[0][2] * z) + (M1.m[0][3] * w); + mResult.m[0][1] = (M1.m[1][0] * x) + (M1.m[1][1] * y) + (M1.m[1][2] * z) + (M1.m[1][3] * w); + mResult.m[0][2] = (M1.m[2][0] * x) + (M1.m[2][1] * y) + (M1.m[2][2] * z) + (M1.m[2][3] * w); + mResult.m[0][3] = (M1.m[3][0] * x) + (M1.m[3][1] * y) + (M1.m[3][2] * z) + (M1.m[3][3] * w); + // Repeat for all the other rows + x = M2.m[0][1]; + y = M2.m[1][1]; + z = M2.m[2][1]; + w = M2.m[3][1]; + mResult.m[1][0] = (M1.m[0][0] * x) + (M1.m[0][1] * y) + (M1.m[0][2] * z) + (M1.m[0][3] * w); + mResult.m[1][1] = (M1.m[1][0] * x) + (M1.m[1][1] * y) + (M1.m[1][2] * z) + (M1.m[1][3] * w); + mResult.m[1][2] = (M1.m[2][0] * x) + (M1.m[2][1] * y) + (M1.m[2][2] * z) + (M1.m[2][3] * w); + mResult.m[1][3] = (M1.m[3][0] * x) + (M1.m[3][1] * y) + (M1.m[3][2] * z) + (M1.m[3][3] * w); + x = M2.m[0][2]; + y = M2.m[1][2]; + z = M2.m[2][2]; + w = M2.m[3][2]; + mResult.m[2][0] = (M1.m[0][0] * x) + (M1.m[0][1] * y) + (M1.m[0][2] * z) + (M1.m[0][3] * w); + mResult.m[2][1] = (M1.m[1][0] * x) + (M1.m[1][1] * y) + (M1.m[1][2] * z) + (M1.m[1][3] * w); + mResult.m[2][2] = (M1.m[2][0] * x) + (M1.m[2][1] * y) + (M1.m[2][2] * z) + (M1.m[2][3] * w); + mResult.m[2][3] = (M1.m[3][0] * x) + (M1.m[3][1] * y) + (M1.m[3][2] * z) + (M1.m[3][3] * w); + x = M2.m[0][3]; + y = M2.m[1][3]; + z = M2.m[2][3]; + w = M2.m[3][3]; + mResult.m[3][0] = (M1.m[0][0] * x) + (M1.m[0][1] * y) + (M1.m[0][2] * z) + (M1.m[0][3] * w); + mResult.m[3][1] = (M1.m[1][0] * x) + (M1.m[1][1] * y) + (M1.m[1][2] * z) + (M1.m[1][3] * w); + mResult.m[3][2] = (M1.m[2][0] * x) + (M1.m[2][1] * y) + (M1.m[2][2] * z) + (M1.m[2][3] * w); + mResult.m[3][3] = (M1.m[3][0] * x) + (M1.m[3][1] * y) + (M1.m[3][2] * z) + (M1.m[3][3] * w); + return mResult; +#elif defined(_XM_ARM_NEON_INTRINSICS_) + float32x2_t VL = vget_low_f32(M1.r[0]); + float32x2_t VH = vget_high_f32(M1.r[0]); + // Perform the operation on the first row + XMVECTOR vX = vmulq_lane_f32(M2.r[0], VL, 0); + XMVECTOR vY = vmulq_lane_f32(M2.r[1], VL, 1); + XMVECTOR vZ = vmlaq_lane_f32(vX, M2.r[2], VH, 0); + XMVECTOR vW = vmlaq_lane_f32(vY, M2.r[3], VH, 1); + float32x4_t r0 = vaddq_f32(vZ, vW); + // Repeat for the other 3 rows + VL = vget_low_f32(M1.r[1]); + VH = vget_high_f32(M1.r[1]); + vX = vmulq_lane_f32(M2.r[0], VL, 0); + vY = vmulq_lane_f32(M2.r[1], VL, 1); + vZ = vmlaq_lane_f32(vX, M2.r[2], VH, 0); + vW = vmlaq_lane_f32(vY, M2.r[3], VH, 1); + float32x4_t r1 = vaddq_f32(vZ, vW); + VL = vget_low_f32(M1.r[2]); + VH = vget_high_f32(M1.r[2]); + vX = vmulq_lane_f32(M2.r[0], VL, 0); + vY = vmulq_lane_f32(M2.r[1], VL, 1); + vZ = vmlaq_lane_f32(vX, M2.r[2], VH, 0); + vW = vmlaq_lane_f32(vY, M2.r[3], VH, 1); + float32x4_t r2 = vaddq_f32(vZ, vW); + VL = vget_low_f32(M1.r[3]); + VH = vget_high_f32(M1.r[3]); + vX = vmulq_lane_f32(M2.r[0], VL, 0); + vY = vmulq_lane_f32(M2.r[1], VL, 1); + vZ = vmlaq_lane_f32(vX, M2.r[2], VH, 0); + vW = vmlaq_lane_f32(vY, M2.r[3], VH, 1); + float32x4_t r3 = vaddq_f32(vZ, vW); + + // Transpose result + float32x4x2_t P0 = vzipq_f32(r0, r2); + float32x4x2_t P1 = vzipq_f32(r1, r3); + + float32x4x2_t T0 = vzipq_f32(P0.val[0], P1.val[0]); + float32x4x2_t T1 = vzipq_f32(P0.val[1], P1.val[1]); + + XMMATRIX mResult; + mResult.r[0] = T0.val[0]; + mResult.r[1] = T0.val[1]; + mResult.r[2] = T1.val[0]; + mResult.r[3] = T1.val[1]; + return mResult; +#elif defined(_XM_AVX2_INTRINSICS_) + __m256 t0 = _mm256_castps128_ps256(M1.r[0]); + t0 = _mm256_insertf128_ps(t0, M1.r[1], 1); + __m256 t1 = _mm256_castps128_ps256(M1.r[2]); + t1 = _mm256_insertf128_ps(t1, M1.r[3], 1); + + __m256 u0 = _mm256_castps128_ps256(M2.r[0]); + u0 = _mm256_insertf128_ps(u0, M2.r[1], 1); + __m256 u1 = _mm256_castps128_ps256(M2.r[2]); + u1 = _mm256_insertf128_ps(u1, M2.r[3], 1); + + __m256 a0 = _mm256_shuffle_ps(t0, t0, _MM_SHUFFLE(0, 0, 0, 0)); + __m256 a1 = _mm256_shuffle_ps(t1, t1, _MM_SHUFFLE(0, 0, 0, 0)); + __m256 b0 = _mm256_permute2f128_ps(u0, u0, 0x00); + __m256 c0 = _mm256_mul_ps(a0, b0); + __m256 c1 = _mm256_mul_ps(a1, b0); + + a0 = _mm256_shuffle_ps(t0, t0, _MM_SHUFFLE(1, 1, 1, 1)); + a1 = _mm256_shuffle_ps(t1, t1, _MM_SHUFFLE(1, 1, 1, 1)); + b0 = _mm256_permute2f128_ps(u0, u0, 0x11); + __m256 c2 = _mm256_fmadd_ps(a0, b0, c0); + __m256 c3 = _mm256_fmadd_ps(a1, b0, c1); + + a0 = _mm256_shuffle_ps(t0, t0, _MM_SHUFFLE(2, 2, 2, 2)); + a1 = _mm256_shuffle_ps(t1, t1, _MM_SHUFFLE(2, 2, 2, 2)); + __m256 b1 = _mm256_permute2f128_ps(u1, u1, 0x00); + __m256 c4 = _mm256_mul_ps(a0, b1); + __m256 c5 = _mm256_mul_ps(a1, b1); + + a0 = _mm256_shuffle_ps(t0, t0, _MM_SHUFFLE(3, 3, 3, 3)); + a1 = _mm256_shuffle_ps(t1, t1, _MM_SHUFFLE(3, 3, 3, 3)); + b1 = _mm256_permute2f128_ps(u1, u1, 0x11); + __m256 c6 = _mm256_fmadd_ps(a0, b1, c4); + __m256 c7 = _mm256_fmadd_ps(a1, b1, c5); + + t0 = _mm256_add_ps(c2, c6); + t1 = _mm256_add_ps(c3, c7); + + // Transpose result + __m256 vTemp = _mm256_unpacklo_ps(t0, t1); + __m256 vTemp2 = _mm256_unpackhi_ps(t0, t1); + __m256 vTemp3 = _mm256_permute2f128_ps(vTemp, vTemp2, 0x20); + __m256 vTemp4 = _mm256_permute2f128_ps(vTemp, vTemp2, 0x31); + vTemp = _mm256_unpacklo_ps(vTemp3, vTemp4); + vTemp2 = _mm256_unpackhi_ps(vTemp3, vTemp4); + t0 = _mm256_permute2f128_ps(vTemp, vTemp2, 0x20); + t1 = _mm256_permute2f128_ps(vTemp, vTemp2, 0x31); + + XMMATRIX mResult; + mResult.r[0] = _mm256_castps256_ps128(t0); + mResult.r[1] = _mm256_extractf128_ps(t0, 1); + mResult.r[2] = _mm256_castps256_ps128(t1); + mResult.r[3] = _mm256_extractf128_ps(t1, 1); + return mResult; +#elif defined(_XM_SSE_INTRINSICS_) + // Splat the component X,Y,Z then W +#if defined(_XM_AVX_INTRINSICS_) + XMVECTOR vX = _mm_broadcast_ss(reinterpret_cast(&M1.r[0]) + 0); + XMVECTOR vY = _mm_broadcast_ss(reinterpret_cast(&M1.r[0]) + 1); + XMVECTOR vZ = _mm_broadcast_ss(reinterpret_cast(&M1.r[0]) + 2); + XMVECTOR vW = _mm_broadcast_ss(reinterpret_cast(&M1.r[0]) + 3); +#else + // Use vW to hold the original row + XMVECTOR vW = M1.r[0]; + XMVECTOR vX = XM_PERMUTE_PS(vW, _MM_SHUFFLE(0, 0, 0, 0)); + XMVECTOR vY = XM_PERMUTE_PS(vW, _MM_SHUFFLE(1, 1, 1, 1)); + XMVECTOR vZ = XM_PERMUTE_PS(vW, _MM_SHUFFLE(2, 2, 2, 2)); + vW = XM_PERMUTE_PS(vW, _MM_SHUFFLE(3, 3, 3, 3)); +#endif + // Perform the operation on the first row + vX = _mm_mul_ps(vX, M2.r[0]); + vY = _mm_mul_ps(vY, M2.r[1]); + vZ = _mm_mul_ps(vZ, M2.r[2]); + vW = _mm_mul_ps(vW, M2.r[3]); + // Perform a binary add to reduce cumulative errors + vX = _mm_add_ps(vX, vZ); + vY = _mm_add_ps(vY, vW); + vX = _mm_add_ps(vX, vY); + XMVECTOR r0 = vX; + // Repeat for the other 3 rows +#if defined(_XM_AVX_INTRINSICS_) + vX = _mm_broadcast_ss(reinterpret_cast(&M1.r[1]) + 0); + vY = _mm_broadcast_ss(reinterpret_cast(&M1.r[1]) + 1); + vZ = _mm_broadcast_ss(reinterpret_cast(&M1.r[1]) + 2); + vW = _mm_broadcast_ss(reinterpret_cast(&M1.r[1]) + 3); +#else + vW = M1.r[1]; + vX = XM_PERMUTE_PS(vW, _MM_SHUFFLE(0, 0, 0, 0)); + vY = XM_PERMUTE_PS(vW, _MM_SHUFFLE(1, 1, 1, 1)); + vZ = XM_PERMUTE_PS(vW, _MM_SHUFFLE(2, 2, 2, 2)); + vW = XM_PERMUTE_PS(vW, _MM_SHUFFLE(3, 3, 3, 3)); +#endif + vX = _mm_mul_ps(vX, M2.r[0]); + vY = _mm_mul_ps(vY, M2.r[1]); + vZ = _mm_mul_ps(vZ, M2.r[2]); + vW = _mm_mul_ps(vW, M2.r[3]); + vX = _mm_add_ps(vX, vZ); + vY = _mm_add_ps(vY, vW); + vX = _mm_add_ps(vX, vY); + XMVECTOR r1 = vX; +#if defined(_XM_AVX_INTRINSICS_) + vX = _mm_broadcast_ss(reinterpret_cast(&M1.r[2]) + 0); + vY = _mm_broadcast_ss(reinterpret_cast(&M1.r[2]) + 1); + vZ = _mm_broadcast_ss(reinterpret_cast(&M1.r[2]) + 2); + vW = _mm_broadcast_ss(reinterpret_cast(&M1.r[2]) + 3); +#else + vW = M1.r[2]; + vX = XM_PERMUTE_PS(vW, _MM_SHUFFLE(0, 0, 0, 0)); + vY = XM_PERMUTE_PS(vW, _MM_SHUFFLE(1, 1, 1, 1)); + vZ = XM_PERMUTE_PS(vW, _MM_SHUFFLE(2, 2, 2, 2)); + vW = XM_PERMUTE_PS(vW, _MM_SHUFFLE(3, 3, 3, 3)); +#endif + vX = _mm_mul_ps(vX, M2.r[0]); + vY = _mm_mul_ps(vY, M2.r[1]); + vZ = _mm_mul_ps(vZ, M2.r[2]); + vW = _mm_mul_ps(vW, M2.r[3]); + vX = _mm_add_ps(vX, vZ); + vY = _mm_add_ps(vY, vW); + vX = _mm_add_ps(vX, vY); + XMVECTOR r2 = vX; +#if defined(_XM_AVX_INTRINSICS_) + vX = _mm_broadcast_ss(reinterpret_cast(&M1.r[3]) + 0); + vY = _mm_broadcast_ss(reinterpret_cast(&M1.r[3]) + 1); + vZ = _mm_broadcast_ss(reinterpret_cast(&M1.r[3]) + 2); + vW = _mm_broadcast_ss(reinterpret_cast(&M1.r[3]) + 3); +#else + vW = M1.r[3]; + vX = XM_PERMUTE_PS(vW, _MM_SHUFFLE(0, 0, 0, 0)); + vY = XM_PERMUTE_PS(vW, _MM_SHUFFLE(1, 1, 1, 1)); + vZ = XM_PERMUTE_PS(vW, _MM_SHUFFLE(2, 2, 2, 2)); + vW = XM_PERMUTE_PS(vW, _MM_SHUFFLE(3, 3, 3, 3)); +#endif + vX = _mm_mul_ps(vX, M2.r[0]); + vY = _mm_mul_ps(vY, M2.r[1]); + vZ = _mm_mul_ps(vZ, M2.r[2]); + vW = _mm_mul_ps(vW, M2.r[3]); + vX = _mm_add_ps(vX, vZ); + vY = _mm_add_ps(vY, vW); + vX = _mm_add_ps(vX, vY); + XMVECTOR r3 = vX; + + // Transpose result + // x.x,x.y,y.x,y.y + XMVECTOR vTemp1 = _mm_shuffle_ps(r0, r1, _MM_SHUFFLE(1, 0, 1, 0)); + // x.z,x.w,y.z,y.w + XMVECTOR vTemp3 = _mm_shuffle_ps(r0, r1, _MM_SHUFFLE(3, 2, 3, 2)); + // z.x,z.y,w.x,w.y + XMVECTOR vTemp2 = _mm_shuffle_ps(r2, r3, _MM_SHUFFLE(1, 0, 1, 0)); + // z.z,z.w,w.z,w.w + XMVECTOR vTemp4 = _mm_shuffle_ps(r2, r3, _MM_SHUFFLE(3, 2, 3, 2)); + + XMMATRIX mResult; + // x.x,y.x,z.x,w.x + mResult.r[0] = _mm_shuffle_ps(vTemp1, vTemp2, _MM_SHUFFLE(2, 0, 2, 0)); + // x.y,y.y,z.y,w.y + mResult.r[1] = _mm_shuffle_ps(vTemp1, vTemp2, _MM_SHUFFLE(3, 1, 3, 1)); + // x.z,y.z,z.z,w.z + mResult.r[2] = _mm_shuffle_ps(vTemp3, vTemp4, _MM_SHUFFLE(2, 0, 2, 0)); + // x.w,y.w,z.w,w.w + mResult.r[3] = _mm_shuffle_ps(vTemp3, vTemp4, _MM_SHUFFLE(3, 1, 3, 1)); + return mResult; +#endif +} + +//------------------------------------------------------------------------------ + +inline XMMATRIX XM_CALLCONV XMMatrixTranspose(FXMMATRIX M) noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + + // Original matrix: + // + // m00m01m02m03 + // m10m11m12m13 + // m20m21m22m23 + // m30m31m32m33 + + XMMATRIX P; + P.r[0] = XMVectorMergeXY(M.r[0], M.r[2]); // m00m20m01m21 + P.r[1] = XMVectorMergeXY(M.r[1], M.r[3]); // m10m30m11m31 + P.r[2] = XMVectorMergeZW(M.r[0], M.r[2]); // m02m22m03m23 + P.r[3] = XMVectorMergeZW(M.r[1], M.r[3]); // m12m32m13m33 + + XMMATRIX MT; + MT.r[0] = XMVectorMergeXY(P.r[0], P.r[1]); // m00m10m20m30 + MT.r[1] = XMVectorMergeZW(P.r[0], P.r[1]); // m01m11m21m31 + MT.r[2] = XMVectorMergeXY(P.r[2], P.r[3]); // m02m12m22m32 + MT.r[3] = XMVectorMergeZW(P.r[2], P.r[3]); // m03m13m23m33 + return MT; + +#elif defined(_XM_ARM_NEON_INTRINSICS_) + float32x4x2_t P0 = vzipq_f32(M.r[0], M.r[2]); + float32x4x2_t P1 = vzipq_f32(M.r[1], M.r[3]); + + float32x4x2_t T0 = vzipq_f32(P0.val[0], P1.val[0]); + float32x4x2_t T1 = vzipq_f32(P0.val[1], P1.val[1]); + + XMMATRIX mResult; + mResult.r[0] = T0.val[0]; + mResult.r[1] = T0.val[1]; + mResult.r[2] = T1.val[0]; + mResult.r[3] = T1.val[1]; + return mResult; +#elif defined(_XM_AVX2_INTRINSICS_) + __m256 t0 = _mm256_castps128_ps256(M.r[0]); + t0 = _mm256_insertf128_ps(t0, M.r[1], 1); + __m256 t1 = _mm256_castps128_ps256(M.r[2]); + t1 = _mm256_insertf128_ps(t1, M.r[3], 1); + + __m256 vTemp = _mm256_unpacklo_ps(t0, t1); + __m256 vTemp2 = _mm256_unpackhi_ps(t0, t1); + __m256 vTemp3 = _mm256_permute2f128_ps(vTemp, vTemp2, 0x20); + __m256 vTemp4 = _mm256_permute2f128_ps(vTemp, vTemp2, 0x31); + vTemp = _mm256_unpacklo_ps(vTemp3, vTemp4); + vTemp2 = _mm256_unpackhi_ps(vTemp3, vTemp4); + t0 = _mm256_permute2f128_ps(vTemp, vTemp2, 0x20); + t1 = _mm256_permute2f128_ps(vTemp, vTemp2, 0x31); + + XMMATRIX mResult; + mResult.r[0] = _mm256_castps256_ps128(t0); + mResult.r[1] = _mm256_extractf128_ps(t0, 1); + mResult.r[2] = _mm256_castps256_ps128(t1); + mResult.r[3] = _mm256_extractf128_ps(t1, 1); + return mResult; +#elif defined(_XM_SSE_INTRINSICS_) + // x.x,x.y,y.x,y.y + XMVECTOR vTemp1 = _mm_shuffle_ps(M.r[0], M.r[1], _MM_SHUFFLE(1, 0, 1, 0)); + // x.z,x.w,y.z,y.w + XMVECTOR vTemp3 = _mm_shuffle_ps(M.r[0], M.r[1], _MM_SHUFFLE(3, 2, 3, 2)); + // z.x,z.y,w.x,w.y + XMVECTOR vTemp2 = _mm_shuffle_ps(M.r[2], M.r[3], _MM_SHUFFLE(1, 0, 1, 0)); + // z.z,z.w,w.z,w.w + XMVECTOR vTemp4 = _mm_shuffle_ps(M.r[2], M.r[3], _MM_SHUFFLE(3, 2, 3, 2)); + + XMMATRIX mResult; + // x.x,y.x,z.x,w.x + mResult.r[0] = _mm_shuffle_ps(vTemp1, vTemp2, _MM_SHUFFLE(2, 0, 2, 0)); + // x.y,y.y,z.y,w.y + mResult.r[1] = _mm_shuffle_ps(vTemp1, vTemp2, _MM_SHUFFLE(3, 1, 3, 1)); + // x.z,y.z,z.z,w.z + mResult.r[2] = _mm_shuffle_ps(vTemp3, vTemp4, _MM_SHUFFLE(2, 0, 2, 0)); + // x.w,y.w,z.w,w.w + mResult.r[3] = _mm_shuffle_ps(vTemp3, vTemp4, _MM_SHUFFLE(3, 1, 3, 1)); + return mResult; +#endif +} + +//------------------------------------------------------------------------------ +// Return the inverse and the determinant of a 4x4 matrix +_Use_decl_annotations_ +inline XMMATRIX XM_CALLCONV XMMatrixInverse +( + XMVECTOR* pDeterminant, + FXMMATRIX M +) noexcept +{ +#if defined(_XM_NO_INTRINSICS_) || defined(_XM_ARM_NEON_INTRINSICS_) + + XMMATRIX MT = XMMatrixTranspose(M); + + XMVECTOR V0[4], V1[4]; + V0[0] = XMVectorSwizzle(MT.r[2]); + V1[0] = XMVectorSwizzle(MT.r[3]); + V0[1] = XMVectorSwizzle(MT.r[0]); + V1[1] = XMVectorSwizzle(MT.r[1]); + V0[2] = XMVectorPermute(MT.r[2], MT.r[0]); + V1[2] = XMVectorPermute(MT.r[3], MT.r[1]); + + XMVECTOR D0 = XMVectorMultiply(V0[0], V1[0]); + XMVECTOR D1 = XMVectorMultiply(V0[1], V1[1]); + XMVECTOR D2 = XMVectorMultiply(V0[2], V1[2]); + + V0[0] = XMVectorSwizzle(MT.r[2]); + V1[0] = XMVectorSwizzle(MT.r[3]); + V0[1] = XMVectorSwizzle(MT.r[0]); + V1[1] = XMVectorSwizzle(MT.r[1]); + V0[2] = XMVectorPermute(MT.r[2], MT.r[0]); + V1[2] = XMVectorPermute(MT.r[3], MT.r[1]); + + D0 = XMVectorNegativeMultiplySubtract(V0[0], V1[0], D0); + D1 = XMVectorNegativeMultiplySubtract(V0[1], V1[1], D1); + D2 = XMVectorNegativeMultiplySubtract(V0[2], V1[2], D2); + + V0[0] = XMVectorSwizzle(MT.r[1]); + V1[0] = XMVectorPermute(D0, D2); + V0[1] = XMVectorSwizzle(MT.r[0]); + V1[1] = XMVectorPermute(D0, D2); + V0[2] = XMVectorSwizzle(MT.r[3]); + V1[2] = XMVectorPermute(D1, D2); + V0[3] = XMVectorSwizzle(MT.r[2]); + V1[3] = XMVectorPermute(D1, D2); + + XMVECTOR C0 = XMVectorMultiply(V0[0], V1[0]); + XMVECTOR C2 = XMVectorMultiply(V0[1], V1[1]); + XMVECTOR C4 = XMVectorMultiply(V0[2], V1[2]); + XMVECTOR C6 = XMVectorMultiply(V0[3], V1[3]); + + V0[0] = XMVectorSwizzle(MT.r[1]); + V1[0] = XMVectorPermute(D0, D2); + V0[1] = XMVectorSwizzle(MT.r[0]); + V1[1] = XMVectorPermute(D0, D2); + V0[2] = XMVectorSwizzle(MT.r[3]); + V1[2] = XMVectorPermute(D1, D2); + V0[3] = XMVectorSwizzle(MT.r[2]); + V1[3] = XMVectorPermute(D1, D2); + + C0 = XMVectorNegativeMultiplySubtract(V0[0], V1[0], C0); + C2 = XMVectorNegativeMultiplySubtract(V0[1], V1[1], C2); + C4 = XMVectorNegativeMultiplySubtract(V0[2], V1[2], C4); + C6 = XMVectorNegativeMultiplySubtract(V0[3], V1[3], C6); + + V0[0] = XMVectorSwizzle(MT.r[1]); + V1[0] = XMVectorPermute(D0, D2); + V0[1] = XMVectorSwizzle(MT.r[0]); + V1[1] = XMVectorPermute(D0, D2); + V0[2] = XMVectorSwizzle(MT.r[3]); + V1[2] = XMVectorPermute(D1, D2); + V0[3] = XMVectorSwizzle(MT.r[2]); + V1[3] = XMVectorPermute(D1, D2); + + XMVECTOR C1 = XMVectorNegativeMultiplySubtract(V0[0], V1[0], C0); + C0 = XMVectorMultiplyAdd(V0[0], V1[0], C0); + XMVECTOR C3 = XMVectorMultiplyAdd(V0[1], V1[1], C2); + C2 = XMVectorNegativeMultiplySubtract(V0[1], V1[1], C2); + XMVECTOR C5 = XMVectorNegativeMultiplySubtract(V0[2], V1[2], C4); + C4 = XMVectorMultiplyAdd(V0[2], V1[2], C4); + XMVECTOR C7 = XMVectorMultiplyAdd(V0[3], V1[3], C6); + C6 = XMVectorNegativeMultiplySubtract(V0[3], V1[3], C6); + + XMMATRIX R; + R.r[0] = XMVectorSelect(C0, C1, g_XMSelect0101.v); + R.r[1] = XMVectorSelect(C2, C3, g_XMSelect0101.v); + R.r[2] = XMVectorSelect(C4, C5, g_XMSelect0101.v); + R.r[3] = XMVectorSelect(C6, C7, g_XMSelect0101.v); + + XMVECTOR Determinant = XMVector4Dot(R.r[0], MT.r[0]); + + if (pDeterminant != nullptr) + *pDeterminant = Determinant; + + XMVECTOR Reciprocal = XMVectorReciprocal(Determinant); + + XMMATRIX Result; + Result.r[0] = XMVectorMultiply(R.r[0], Reciprocal); + Result.r[1] = XMVectorMultiply(R.r[1], Reciprocal); + Result.r[2] = XMVectorMultiply(R.r[2], Reciprocal); + Result.r[3] = XMVectorMultiply(R.r[3], Reciprocal); + return Result; + +#elif defined(_XM_SSE_INTRINSICS_) + // Transpose matrix + XMVECTOR vTemp1 = _mm_shuffle_ps(M.r[0], M.r[1], _MM_SHUFFLE(1, 0, 1, 0)); + XMVECTOR vTemp3 = _mm_shuffle_ps(M.r[0], M.r[1], _MM_SHUFFLE(3, 2, 3, 2)); + XMVECTOR vTemp2 = _mm_shuffle_ps(M.r[2], M.r[3], _MM_SHUFFLE(1, 0, 1, 0)); + XMVECTOR vTemp4 = _mm_shuffle_ps(M.r[2], M.r[3], _MM_SHUFFLE(3, 2, 3, 2)); + + XMMATRIX MT; + MT.r[0] = _mm_shuffle_ps(vTemp1, vTemp2, _MM_SHUFFLE(2, 0, 2, 0)); + MT.r[1] = _mm_shuffle_ps(vTemp1, vTemp2, _MM_SHUFFLE(3, 1, 3, 1)); + MT.r[2] = _mm_shuffle_ps(vTemp3, vTemp4, _MM_SHUFFLE(2, 0, 2, 0)); + MT.r[3] = _mm_shuffle_ps(vTemp3, vTemp4, _MM_SHUFFLE(3, 1, 3, 1)); + + XMVECTOR V00 = XM_PERMUTE_PS(MT.r[2], _MM_SHUFFLE(1, 1, 0, 0)); + XMVECTOR V10 = XM_PERMUTE_PS(MT.r[3], _MM_SHUFFLE(3, 2, 3, 2)); + XMVECTOR V01 = XM_PERMUTE_PS(MT.r[0], _MM_SHUFFLE(1, 1, 0, 0)); + XMVECTOR V11 = XM_PERMUTE_PS(MT.r[1], _MM_SHUFFLE(3, 2, 3, 2)); + XMVECTOR V02 = _mm_shuffle_ps(MT.r[2], MT.r[0], _MM_SHUFFLE(2, 0, 2, 0)); + XMVECTOR V12 = _mm_shuffle_ps(MT.r[3], MT.r[1], _MM_SHUFFLE(3, 1, 3, 1)); + + XMVECTOR D0 = _mm_mul_ps(V00, V10); + XMVECTOR D1 = _mm_mul_ps(V01, V11); + XMVECTOR D2 = _mm_mul_ps(V02, V12); + + V00 = XM_PERMUTE_PS(MT.r[2], _MM_SHUFFLE(3, 2, 3, 2)); + V10 = XM_PERMUTE_PS(MT.r[3], _MM_SHUFFLE(1, 1, 0, 0)); + V01 = XM_PERMUTE_PS(MT.r[0], _MM_SHUFFLE(3, 2, 3, 2)); + V11 = XM_PERMUTE_PS(MT.r[1], _MM_SHUFFLE(1, 1, 0, 0)); + V02 = _mm_shuffle_ps(MT.r[2], MT.r[0], _MM_SHUFFLE(3, 1, 3, 1)); + V12 = _mm_shuffle_ps(MT.r[3], MT.r[1], _MM_SHUFFLE(2, 0, 2, 0)); + + D0 = XM_FNMADD_PS(V00, V10, D0); + D1 = XM_FNMADD_PS(V01, V11, D1); + D2 = XM_FNMADD_PS(V02, V12, D2); + // V11 = D0Y,D0W,D2Y,D2Y + V11 = _mm_shuffle_ps(D0, D2, _MM_SHUFFLE(1, 1, 3, 1)); + V00 = XM_PERMUTE_PS(MT.r[1], _MM_SHUFFLE(1, 0, 2, 1)); + V10 = _mm_shuffle_ps(V11, D0, _MM_SHUFFLE(0, 3, 0, 2)); + V01 = XM_PERMUTE_PS(MT.r[0], _MM_SHUFFLE(0, 1, 0, 2)); + V11 = _mm_shuffle_ps(V11, D0, _MM_SHUFFLE(2, 1, 2, 1)); + // V13 = D1Y,D1W,D2W,D2W + XMVECTOR V13 = _mm_shuffle_ps(D1, D2, _MM_SHUFFLE(3, 3, 3, 1)); + V02 = XM_PERMUTE_PS(MT.r[3], _MM_SHUFFLE(1, 0, 2, 1)); + V12 = _mm_shuffle_ps(V13, D1, _MM_SHUFFLE(0, 3, 0, 2)); + XMVECTOR V03 = XM_PERMUTE_PS(MT.r[2], _MM_SHUFFLE(0, 1, 0, 2)); + V13 = _mm_shuffle_ps(V13, D1, _MM_SHUFFLE(2, 1, 2, 1)); + + XMVECTOR C0 = _mm_mul_ps(V00, V10); + XMVECTOR C2 = _mm_mul_ps(V01, V11); + XMVECTOR C4 = _mm_mul_ps(V02, V12); + XMVECTOR C6 = _mm_mul_ps(V03, V13); + + // V11 = D0X,D0Y,D2X,D2X + V11 = _mm_shuffle_ps(D0, D2, _MM_SHUFFLE(0, 0, 1, 0)); + V00 = XM_PERMUTE_PS(MT.r[1], _MM_SHUFFLE(2, 1, 3, 2)); + V10 = _mm_shuffle_ps(D0, V11, _MM_SHUFFLE(2, 1, 0, 3)); + V01 = XM_PERMUTE_PS(MT.r[0], _MM_SHUFFLE(1, 3, 2, 3)); + V11 = _mm_shuffle_ps(D0, V11, _MM_SHUFFLE(0, 2, 1, 2)); + // V13 = D1X,D1Y,D2Z,D2Z + V13 = _mm_shuffle_ps(D1, D2, _MM_SHUFFLE(2, 2, 1, 0)); + V02 = XM_PERMUTE_PS(MT.r[3], _MM_SHUFFLE(2, 1, 3, 2)); + V12 = _mm_shuffle_ps(D1, V13, _MM_SHUFFLE(2, 1, 0, 3)); + V03 = XM_PERMUTE_PS(MT.r[2], _MM_SHUFFLE(1, 3, 2, 3)); + V13 = _mm_shuffle_ps(D1, V13, _MM_SHUFFLE(0, 2, 1, 2)); + + C0 = XM_FNMADD_PS(V00, V10, C0); + C2 = XM_FNMADD_PS(V01, V11, C2); + C4 = XM_FNMADD_PS(V02, V12, C4); + C6 = XM_FNMADD_PS(V03, V13, C6); + + V00 = XM_PERMUTE_PS(MT.r[1], _MM_SHUFFLE(0, 3, 0, 3)); + // V10 = D0Z,D0Z,D2X,D2Y + V10 = _mm_shuffle_ps(D0, D2, _MM_SHUFFLE(1, 0, 2, 2)); + V10 = XM_PERMUTE_PS(V10, _MM_SHUFFLE(0, 2, 3, 0)); + V01 = XM_PERMUTE_PS(MT.r[0], _MM_SHUFFLE(2, 0, 3, 1)); + // V11 = D0X,D0W,D2X,D2Y + V11 = _mm_shuffle_ps(D0, D2, _MM_SHUFFLE(1, 0, 3, 0)); + V11 = XM_PERMUTE_PS(V11, _MM_SHUFFLE(2, 1, 0, 3)); + V02 = XM_PERMUTE_PS(MT.r[3], _MM_SHUFFLE(0, 3, 0, 3)); + // V12 = D1Z,D1Z,D2Z,D2W + V12 = _mm_shuffle_ps(D1, D2, _MM_SHUFFLE(3, 2, 2, 2)); + V12 = XM_PERMUTE_PS(V12, _MM_SHUFFLE(0, 2, 3, 0)); + V03 = XM_PERMUTE_PS(MT.r[2], _MM_SHUFFLE(2, 0, 3, 1)); + // V13 = D1X,D1W,D2Z,D2W + V13 = _mm_shuffle_ps(D1, D2, _MM_SHUFFLE(3, 2, 3, 0)); + V13 = XM_PERMUTE_PS(V13, _MM_SHUFFLE(2, 1, 0, 3)); + + V00 = _mm_mul_ps(V00, V10); + V01 = _mm_mul_ps(V01, V11); + V02 = _mm_mul_ps(V02, V12); + V03 = _mm_mul_ps(V03, V13); + XMVECTOR C1 = _mm_sub_ps(C0, V00); + C0 = _mm_add_ps(C0, V00); + XMVECTOR C3 = _mm_add_ps(C2, V01); + C2 = _mm_sub_ps(C2, V01); + XMVECTOR C5 = _mm_sub_ps(C4, V02); + C4 = _mm_add_ps(C4, V02); + XMVECTOR C7 = _mm_add_ps(C6, V03); + C6 = _mm_sub_ps(C6, V03); + + C0 = _mm_shuffle_ps(C0, C1, _MM_SHUFFLE(3, 1, 2, 0)); + C2 = _mm_shuffle_ps(C2, C3, _MM_SHUFFLE(3, 1, 2, 0)); + C4 = _mm_shuffle_ps(C4, C5, _MM_SHUFFLE(3, 1, 2, 0)); + C6 = _mm_shuffle_ps(C6, C7, _MM_SHUFFLE(3, 1, 2, 0)); + C0 = XM_PERMUTE_PS(C0, _MM_SHUFFLE(3, 1, 2, 0)); + C2 = XM_PERMUTE_PS(C2, _MM_SHUFFLE(3, 1, 2, 0)); + C4 = XM_PERMUTE_PS(C4, _MM_SHUFFLE(3, 1, 2, 0)); + C6 = XM_PERMUTE_PS(C6, _MM_SHUFFLE(3, 1, 2, 0)); + // Get the determinant + XMVECTOR vTemp = XMVector4Dot(C0, MT.r[0]); + if (pDeterminant != nullptr) + *pDeterminant = vTemp; + vTemp = _mm_div_ps(g_XMOne, vTemp); + XMMATRIX mResult; + mResult.r[0] = _mm_mul_ps(C0, vTemp); + mResult.r[1] = _mm_mul_ps(C2, vTemp); + mResult.r[2] = _mm_mul_ps(C4, vTemp); + mResult.r[3] = _mm_mul_ps(C6, vTemp); + return mResult; +#endif +} + +//------------------------------------------------------------------------------ + +inline XMMATRIX XM_CALLCONV XMMatrixVectorTensorProduct +( + FXMVECTOR V1, + FXMVECTOR V2 +) noexcept +{ + XMMATRIX mResult; + mResult.r[0] = XMVectorMultiply(XMVectorSwizzle<0, 0, 0, 0>(V1), V2); + mResult.r[1] = XMVectorMultiply(XMVectorSwizzle<1, 1, 1, 1>(V1), V2); + mResult.r[2] = XMVectorMultiply(XMVectorSwizzle<2, 2, 2, 2>(V1), V2); + mResult.r[3] = XMVectorMultiply(XMVectorSwizzle<3, 3, 3, 3>(V1), V2); + return mResult; +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMMatrixDeterminant(FXMMATRIX M) noexcept +{ + static const XMVECTORF32 Sign = { { { 1.0f, -1.0f, 1.0f, -1.0f } } }; + + XMVECTOR V0 = XMVectorSwizzle(M.r[2]); + XMVECTOR V1 = XMVectorSwizzle(M.r[3]); + XMVECTOR V2 = XMVectorSwizzle(M.r[2]); + XMVECTOR V3 = XMVectorSwizzle(M.r[3]); + XMVECTOR V4 = XMVectorSwizzle(M.r[2]); + XMVECTOR V5 = XMVectorSwizzle(M.r[3]); + + XMVECTOR P0 = XMVectorMultiply(V0, V1); + XMVECTOR P1 = XMVectorMultiply(V2, V3); + XMVECTOR P2 = XMVectorMultiply(V4, V5); + + V0 = XMVectorSwizzle(M.r[2]); + V1 = XMVectorSwizzle(M.r[3]); + V2 = XMVectorSwizzle(M.r[2]); + V3 = XMVectorSwizzle(M.r[3]); + V4 = XMVectorSwizzle(M.r[2]); + V5 = XMVectorSwizzle(M.r[3]); + + P0 = XMVectorNegativeMultiplySubtract(V0, V1, P0); + P1 = XMVectorNegativeMultiplySubtract(V2, V3, P1); + P2 = XMVectorNegativeMultiplySubtract(V4, V5, P2); + + V0 = XMVectorSwizzle(M.r[1]); + V1 = XMVectorSwizzle(M.r[1]); + V2 = XMVectorSwizzle(M.r[1]); + + XMVECTOR S = XMVectorMultiply(M.r[0], Sign.v); + XMVECTOR R = XMVectorMultiply(V0, P0); + R = XMVectorNegativeMultiplySubtract(V1, P1, R); + R = XMVectorMultiplyAdd(V2, P2, R); + + return XMVector4Dot(S, R); +} + +#define XM3RANKDECOMPOSE(a, b, c, x, y, z) \ + if((x) < (y)) \ + { \ + if((y) < (z)) \ + { \ + (a) = 2; \ + (b) = 1; \ + (c) = 0; \ + } \ + else \ + { \ + (a) = 1; \ + \ + if((x) < (z)) \ + { \ + (b) = 2; \ + (c) = 0; \ + } \ + else \ + { \ + (b) = 0; \ + (c) = 2; \ + } \ + } \ + } \ + else \ + { \ + if((x) < (z)) \ + { \ + (a) = 2; \ + (b) = 0; \ + (c) = 1; \ + } \ + else \ + { \ + (a) = 0; \ + \ + if((y) < (z)) \ + { \ + (b) = 2; \ + (c) = 1; \ + } \ + else \ + { \ + (b) = 1; \ + (c) = 2; \ + } \ + } \ + } + +#define XM3_DECOMP_EPSILON 0.0001f + +_Use_decl_annotations_ +inline bool XM_CALLCONV XMMatrixDecompose +( + XMVECTOR* outScale, + XMVECTOR* outRotQuat, + XMVECTOR* outTrans, + FXMMATRIX M +) noexcept +{ + static const XMVECTOR* pvCanonicalBasis[3] = { + &g_XMIdentityR0.v, + &g_XMIdentityR1.v, + &g_XMIdentityR2.v + }; + + assert(outScale != nullptr); + assert(outRotQuat != nullptr); + assert(outTrans != nullptr); + + // Get the translation + outTrans[0] = M.r[3]; + + XMVECTOR* ppvBasis[3]; + XMMATRIX matTemp; + ppvBasis[0] = &matTemp.r[0]; + ppvBasis[1] = &matTemp.r[1]; + ppvBasis[2] = &matTemp.r[2]; + + matTemp.r[0] = M.r[0]; + matTemp.r[1] = M.r[1]; + matTemp.r[2] = M.r[2]; + matTemp.r[3] = g_XMIdentityR3.v; + + auto pfScales = reinterpret_cast(outScale); + + size_t a, b, c; + XMVectorGetXPtr(&pfScales[0], XMVector3Length(ppvBasis[0][0])); + XMVectorGetXPtr(&pfScales[1], XMVector3Length(ppvBasis[1][0])); + XMVectorGetXPtr(&pfScales[2], XMVector3Length(ppvBasis[2][0])); + pfScales[3] = 0.f; + + XM3RANKDECOMPOSE(a, b, c, pfScales[0], pfScales[1], pfScales[2]) + + if (pfScales[a] < XM3_DECOMP_EPSILON) + { + ppvBasis[a][0] = pvCanonicalBasis[a][0]; + } + ppvBasis[a][0] = XMVector3Normalize(ppvBasis[a][0]); + + if (pfScales[b] < XM3_DECOMP_EPSILON) + { + size_t aa, bb, cc; + float fAbsX, fAbsY, fAbsZ; + + fAbsX = fabsf(XMVectorGetX(ppvBasis[a][0])); + fAbsY = fabsf(XMVectorGetY(ppvBasis[a][0])); + fAbsZ = fabsf(XMVectorGetZ(ppvBasis[a][0])); + + XM3RANKDECOMPOSE(aa, bb, cc, fAbsX, fAbsY, fAbsZ) + + ppvBasis[b][0] = XMVector3Cross(ppvBasis[a][0], pvCanonicalBasis[cc][0]); + } + + ppvBasis[b][0] = XMVector3Normalize(ppvBasis[b][0]); + + if (pfScales[c] < XM3_DECOMP_EPSILON) + { + ppvBasis[c][0] = XMVector3Cross(ppvBasis[a][0], ppvBasis[b][0]); + } + + ppvBasis[c][0] = XMVector3Normalize(ppvBasis[c][0]); + + float fDet = XMVectorGetX(XMMatrixDeterminant(matTemp)); + + // use Kramer's rule to check for handedness of coordinate system + if (fDet < 0.0f) + { + // switch coordinate system by negating the scale and inverting the basis vector on the x-axis + pfScales[a] = -pfScales[a]; + ppvBasis[a][0] = XMVectorNegate(ppvBasis[a][0]); + + fDet = -fDet; + } + + fDet -= 1.0f; + fDet *= fDet; + + if (XM3_DECOMP_EPSILON < fDet) + { + // Non-SRT matrix encountered + return false; + } + + // generate the quaternion from the matrix + outRotQuat[0] = XMQuaternionRotationMatrix(matTemp); + return true; +} + +#undef XM3_DECOMP_EPSILON +#undef XM3RANKDECOMPOSE + +//------------------------------------------------------------------------------ +// Transformation operations +//------------------------------------------------------------------------------ + +//------------------------------------------------------------------------------ + +inline XMMATRIX XM_CALLCONV XMMatrixIdentity() noexcept +{ + XMMATRIX M; + M.r[0] = g_XMIdentityR0.v; + M.r[1] = g_XMIdentityR1.v; + M.r[2] = g_XMIdentityR2.v; + M.r[3] = g_XMIdentityR3.v; + return M; +} + +//------------------------------------------------------------------------------ + +inline XMMATRIX XM_CALLCONV XMMatrixSet +( + float m00, float m01, float m02, float m03, + float m10, float m11, float m12, float m13, + float m20, float m21, float m22, float m23, + float m30, float m31, float m32, float m33 +) noexcept +{ + XMMATRIX M; +#if defined(_XM_NO_INTRINSICS_) + M.m[0][0] = m00; M.m[0][1] = m01; M.m[0][2] = m02; M.m[0][3] = m03; + M.m[1][0] = m10; M.m[1][1] = m11; M.m[1][2] = m12; M.m[1][3] = m13; + M.m[2][0] = m20; M.m[2][1] = m21; M.m[2][2] = m22; M.m[2][3] = m23; + M.m[3][0] = m30; M.m[3][1] = m31; M.m[3][2] = m32; M.m[3][3] = m33; +#else + M.r[0] = XMVectorSet(m00, m01, m02, m03); + M.r[1] = XMVectorSet(m10, m11, m12, m13); + M.r[2] = XMVectorSet(m20, m21, m22, m23); + M.r[3] = XMVectorSet(m30, m31, m32, m33); +#endif + return M; +} + +//------------------------------------------------------------------------------ + +inline XMMATRIX XM_CALLCONV XMMatrixTranslation +( + float OffsetX, + float OffsetY, + float OffsetZ +) noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + + XMMATRIX M; + M.m[0][0] = 1.0f; + M.m[0][1] = 0.0f; + M.m[0][2] = 0.0f; + M.m[0][3] = 0.0f; + + M.m[1][0] = 0.0f; + M.m[1][1] = 1.0f; + M.m[1][2] = 0.0f; + M.m[1][3] = 0.0f; + + M.m[2][0] = 0.0f; + M.m[2][1] = 0.0f; + M.m[2][2] = 1.0f; + M.m[2][3] = 0.0f; + + M.m[3][0] = OffsetX; + M.m[3][1] = OffsetY; + M.m[3][2] = OffsetZ; + M.m[3][3] = 1.0f; + return M; + +#elif defined(_XM_SSE_INTRINSICS_) || defined(_XM_ARM_NEON_INTRINSICS_) + XMMATRIX M; + M.r[0] = g_XMIdentityR0.v; + M.r[1] = g_XMIdentityR1.v; + M.r[2] = g_XMIdentityR2.v; + M.r[3] = XMVectorSet(OffsetX, OffsetY, OffsetZ, 1.f); + return M; +#endif +} + + +//------------------------------------------------------------------------------ + +inline XMMATRIX XM_CALLCONV XMMatrixTranslationFromVector(FXMVECTOR Offset) noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + + XMMATRIX M; + M.m[0][0] = 1.0f; + M.m[0][1] = 0.0f; + M.m[0][2] = 0.0f; + M.m[0][3] = 0.0f; + + M.m[1][0] = 0.0f; + M.m[1][1] = 1.0f; + M.m[1][2] = 0.0f; + M.m[1][3] = 0.0f; + + M.m[2][0] = 0.0f; + M.m[2][1] = 0.0f; + M.m[2][2] = 1.0f; + M.m[2][3] = 0.0f; + + M.m[3][0] = Offset.vector4_f32[0]; + M.m[3][1] = Offset.vector4_f32[1]; + M.m[3][2] = Offset.vector4_f32[2]; + M.m[3][3] = 1.0f; + return M; + +#elif defined(_XM_SSE_INTRINSICS_) || defined(_XM_ARM_NEON_INTRINSICS_) + XMMATRIX M; + M.r[0] = g_XMIdentityR0.v; + M.r[1] = g_XMIdentityR1.v; + M.r[2] = g_XMIdentityR2.v; + M.r[3] = XMVectorSelect(g_XMIdentityR3.v, Offset, g_XMSelect1110.v); + return M; +#endif +} + +//------------------------------------------------------------------------------ + +inline XMMATRIX XM_CALLCONV XMMatrixScaling +( + float ScaleX, + float ScaleY, + float ScaleZ +) noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + + XMMATRIX M; + M.m[0][0] = ScaleX; + M.m[0][1] = 0.0f; + M.m[0][2] = 0.0f; + M.m[0][3] = 0.0f; + + M.m[1][0] = 0.0f; + M.m[1][1] = ScaleY; + M.m[1][2] = 0.0f; + M.m[1][3] = 0.0f; + + M.m[2][0] = 0.0f; + M.m[2][1] = 0.0f; + M.m[2][2] = ScaleZ; + M.m[2][3] = 0.0f; + + M.m[3][0] = 0.0f; + M.m[3][1] = 0.0f; + M.m[3][2] = 0.0f; + M.m[3][3] = 1.0f; + return M; + +#elif defined(_XM_ARM_NEON_INTRINSICS_) + const XMVECTOR Zero = vdupq_n_f32(0); + XMMATRIX M; + M.r[0] = vsetq_lane_f32(ScaleX, Zero, 0); + M.r[1] = vsetq_lane_f32(ScaleY, Zero, 1); + M.r[2] = vsetq_lane_f32(ScaleZ, Zero, 2); + M.r[3] = g_XMIdentityR3.v; + return M; +#elif defined(_XM_SSE_INTRINSICS_) + XMMATRIX M; + M.r[0] = _mm_set_ps(0, 0, 0, ScaleX); + M.r[1] = _mm_set_ps(0, 0, ScaleY, 0); + M.r[2] = _mm_set_ps(0, ScaleZ, 0, 0); + M.r[3] = g_XMIdentityR3.v; + return M; +#endif +} + +//------------------------------------------------------------------------------ + +inline XMMATRIX XM_CALLCONV XMMatrixScalingFromVector(FXMVECTOR Scale) noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + + XMMATRIX M; + M.m[0][0] = Scale.vector4_f32[0]; + M.m[0][1] = 0.0f; + M.m[0][2] = 0.0f; + M.m[0][3] = 0.0f; + + M.m[1][0] = 0.0f; + M.m[1][1] = Scale.vector4_f32[1]; + M.m[1][2] = 0.0f; + M.m[1][3] = 0.0f; + + M.m[2][0] = 0.0f; + M.m[2][1] = 0.0f; + M.m[2][2] = Scale.vector4_f32[2]; + M.m[2][3] = 0.0f; + + M.m[3][0] = 0.0f; + M.m[3][1] = 0.0f; + M.m[3][2] = 0.0f; + M.m[3][3] = 1.0f; + return M; + +#elif defined(_XM_ARM_NEON_INTRINSICS_) + XMMATRIX M; + M.r[0] = vandq_u32(Scale, g_XMMaskX); + M.r[1] = vandq_u32(Scale, g_XMMaskY); + M.r[2] = vandq_u32(Scale, g_XMMaskZ); + M.r[3] = g_XMIdentityR3.v; + return M; +#elif defined(_XM_SSE_INTRINSICS_) + XMMATRIX M; + M.r[0] = _mm_and_ps(Scale, g_XMMaskX); + M.r[1] = _mm_and_ps(Scale, g_XMMaskY); + M.r[2] = _mm_and_ps(Scale, g_XMMaskZ); + M.r[3] = g_XMIdentityR3.v; + return M; +#endif +} + +//------------------------------------------------------------------------------ + +inline XMMATRIX XM_CALLCONV XMMatrixRotationX(float Angle) noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + + float fSinAngle; + float fCosAngle; + XMScalarSinCos(&fSinAngle, &fCosAngle, Angle); + + XMMATRIX M; + M.m[0][0] = 1.0f; + M.m[0][1] = 0.0f; + M.m[0][2] = 0.0f; + M.m[0][3] = 0.0f; + + M.m[1][0] = 0.0f; + M.m[1][1] = fCosAngle; + M.m[1][2] = fSinAngle; + M.m[1][3] = 0.0f; + + M.m[2][0] = 0.0f; + M.m[2][1] = -fSinAngle; + M.m[2][2] = fCosAngle; + M.m[2][3] = 0.0f; + + M.m[3][0] = 0.0f; + M.m[3][1] = 0.0f; + M.m[3][2] = 0.0f; + M.m[3][3] = 1.0f; + return M; + +#elif defined(_XM_ARM_NEON_INTRINSICS_) + float fSinAngle; + float fCosAngle; + XMScalarSinCos(&fSinAngle, &fCosAngle, Angle); + + const XMVECTOR Zero = vdupq_n_f32(0); + + XMVECTOR T1 = vsetq_lane_f32(fCosAngle, Zero, 1); + T1 = vsetq_lane_f32(fSinAngle, T1, 2); + + XMVECTOR T2 = vsetq_lane_f32(-fSinAngle, Zero, 1); + T2 = vsetq_lane_f32(fCosAngle, T2, 2); + + XMMATRIX M; + M.r[0] = g_XMIdentityR0.v; + M.r[1] = T1; + M.r[2] = T2; + M.r[3] = g_XMIdentityR3.v; + return M; +#elif defined(_XM_SSE_INTRINSICS_) + float SinAngle; + float CosAngle; + XMScalarSinCos(&SinAngle, &CosAngle, Angle); + + XMVECTOR vSin = _mm_set_ss(SinAngle); + XMVECTOR vCos = _mm_set_ss(CosAngle); + // x = 0,y = cos,z = sin, w = 0 + vCos = _mm_shuffle_ps(vCos, vSin, _MM_SHUFFLE(3, 0, 0, 3)); + XMMATRIX M; + M.r[0] = g_XMIdentityR0; + M.r[1] = vCos; + // x = 0,y = sin,z = cos, w = 0 + vCos = XM_PERMUTE_PS(vCos, _MM_SHUFFLE(3, 1, 2, 0)); + // x = 0,y = -sin,z = cos, w = 0 + vCos = _mm_mul_ps(vCos, g_XMNegateY); + M.r[2] = vCos; + M.r[3] = g_XMIdentityR3; + return M; +#endif +} + +//------------------------------------------------------------------------------ + +inline XMMATRIX XM_CALLCONV XMMatrixRotationY(float Angle) noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + + float fSinAngle; + float fCosAngle; + XMScalarSinCos(&fSinAngle, &fCosAngle, Angle); + + XMMATRIX M; + M.m[0][0] = fCosAngle; + M.m[0][1] = 0.0f; + M.m[0][2] = -fSinAngle; + M.m[0][3] = 0.0f; + + M.m[1][0] = 0.0f; + M.m[1][1] = 1.0f; + M.m[1][2] = 0.0f; + M.m[1][3] = 0.0f; + + M.m[2][0] = fSinAngle; + M.m[2][1] = 0.0f; + M.m[2][2] = fCosAngle; + M.m[2][3] = 0.0f; + + M.m[3][0] = 0.0f; + M.m[3][1] = 0.0f; + M.m[3][2] = 0.0f; + M.m[3][3] = 1.0f; + return M; + +#elif defined(_XM_ARM_NEON_INTRINSICS_) + float fSinAngle; + float fCosAngle; + XMScalarSinCos(&fSinAngle, &fCosAngle, Angle); + + const XMVECTOR Zero = vdupq_n_f32(0); + + XMVECTOR T0 = vsetq_lane_f32(fCosAngle, Zero, 0); + T0 = vsetq_lane_f32(-fSinAngle, T0, 2); + + XMVECTOR T2 = vsetq_lane_f32(fSinAngle, Zero, 0); + T2 = vsetq_lane_f32(fCosAngle, T2, 2); + + XMMATRIX M; + M.r[0] = T0; + M.r[1] = g_XMIdentityR1.v; + M.r[2] = T2; + M.r[3] = g_XMIdentityR3.v; + return M; +#elif defined(_XM_SSE_INTRINSICS_) + float SinAngle; + float CosAngle; + XMScalarSinCos(&SinAngle, &CosAngle, Angle); + + XMVECTOR vSin = _mm_set_ss(SinAngle); + XMVECTOR vCos = _mm_set_ss(CosAngle); + // x = sin,y = 0,z = cos, w = 0 + vSin = _mm_shuffle_ps(vSin, vCos, _MM_SHUFFLE(3, 0, 3, 0)); + XMMATRIX M; + M.r[2] = vSin; + M.r[1] = g_XMIdentityR1; + // x = cos,y = 0,z = sin, w = 0 + vSin = XM_PERMUTE_PS(vSin, _MM_SHUFFLE(3, 0, 1, 2)); + // x = cos,y = 0,z = -sin, w = 0 + vSin = _mm_mul_ps(vSin, g_XMNegateZ); + M.r[0] = vSin; + M.r[3] = g_XMIdentityR3; + return M; +#endif +} + +//------------------------------------------------------------------------------ + +inline XMMATRIX XM_CALLCONV XMMatrixRotationZ(float Angle) noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + + float fSinAngle; + float fCosAngle; + XMScalarSinCos(&fSinAngle, &fCosAngle, Angle); + + XMMATRIX M; + M.m[0][0] = fCosAngle; + M.m[0][1] = fSinAngle; + M.m[0][2] = 0.0f; + M.m[0][3] = 0.0f; + + M.m[1][0] = -fSinAngle; + M.m[1][1] = fCosAngle; + M.m[1][2] = 0.0f; + M.m[1][3] = 0.0f; + + M.m[2][0] = 0.0f; + M.m[2][1] = 0.0f; + M.m[2][2] = 1.0f; + M.m[2][3] = 0.0f; + + M.m[3][0] = 0.0f; + M.m[3][1] = 0.0f; + M.m[3][2] = 0.0f; + M.m[3][3] = 1.0f; + return M; + +#elif defined(_XM_ARM_NEON_INTRINSICS_) + float fSinAngle; + float fCosAngle; + XMScalarSinCos(&fSinAngle, &fCosAngle, Angle); + + const XMVECTOR Zero = vdupq_n_f32(0); + + XMVECTOR T0 = vsetq_lane_f32(fCosAngle, Zero, 0); + T0 = vsetq_lane_f32(fSinAngle, T0, 1); + + XMVECTOR T1 = vsetq_lane_f32(-fSinAngle, Zero, 0); + T1 = vsetq_lane_f32(fCosAngle, T1, 1); + + XMMATRIX M; + M.r[0] = T0; + M.r[1] = T1; + M.r[2] = g_XMIdentityR2.v; + M.r[3] = g_XMIdentityR3.v; + return M; +#elif defined(_XM_SSE_INTRINSICS_) + float SinAngle; + float CosAngle; + XMScalarSinCos(&SinAngle, &CosAngle, Angle); + + XMVECTOR vSin = _mm_set_ss(SinAngle); + XMVECTOR vCos = _mm_set_ss(CosAngle); + // x = cos,y = sin,z = 0, w = 0 + vCos = _mm_unpacklo_ps(vCos, vSin); + XMMATRIX M; + M.r[0] = vCos; + // x = sin,y = cos,z = 0, w = 0 + vCos = XM_PERMUTE_PS(vCos, _MM_SHUFFLE(3, 2, 0, 1)); + // x = cos,y = -sin,z = 0, w = 0 + vCos = _mm_mul_ps(vCos, g_XMNegateX); + M.r[1] = vCos; + M.r[2] = g_XMIdentityR2; + M.r[3] = g_XMIdentityR3; + return M; +#endif +} + +//------------------------------------------------------------------------------ + +inline XMMATRIX XM_CALLCONV XMMatrixRotationRollPitchYaw +( + float Pitch, + float Yaw, + float Roll +) noexcept +{ + XMVECTOR Angles = XMVectorSet(Pitch, Yaw, Roll, 0.0f); + return XMMatrixRotationRollPitchYawFromVector(Angles); +} + +//------------------------------------------------------------------------------ + +inline XMMATRIX XM_CALLCONV XMMatrixRotationRollPitchYawFromVector +( + FXMVECTOR Angles // +) noexcept +{ + XMVECTOR Q = XMQuaternionRotationRollPitchYawFromVector(Angles); + return XMMatrixRotationQuaternion(Q); +} + +//------------------------------------------------------------------------------ + +inline XMMATRIX XM_CALLCONV XMMatrixRotationNormal +( + FXMVECTOR NormalAxis, + float Angle +) noexcept +{ +#if defined(_XM_NO_INTRINSICS_) || defined(_XM_ARM_NEON_INTRINSICS_) + + float fSinAngle; + float fCosAngle; + XMScalarSinCos(&fSinAngle, &fCosAngle, Angle); + + XMVECTOR A = XMVectorSet(fSinAngle, fCosAngle, 1.0f - fCosAngle, 0.0f); + + XMVECTOR C2 = XMVectorSplatZ(A); + XMVECTOR C1 = XMVectorSplatY(A); + XMVECTOR C0 = XMVectorSplatX(A); + + XMVECTOR N0 = XMVectorSwizzle(NormalAxis); + XMVECTOR N1 = XMVectorSwizzle(NormalAxis); + + XMVECTOR V0 = XMVectorMultiply(C2, N0); + V0 = XMVectorMultiply(V0, N1); + + XMVECTOR R0 = XMVectorMultiply(C2, NormalAxis); + R0 = XMVectorMultiplyAdd(R0, NormalAxis, C1); + + XMVECTOR R1 = XMVectorMultiplyAdd(C0, NormalAxis, V0); + XMVECTOR R2 = XMVectorNegativeMultiplySubtract(C0, NormalAxis, V0); + + V0 = XMVectorSelect(A, R0, g_XMSelect1110.v); + XMVECTOR V1 = XMVectorPermute(R1, R2); + XMVECTOR V2 = XMVectorPermute(R1, R2); + + XMMATRIX M; + M.r[0] = XMVectorPermute(V0, V1); + M.r[1] = XMVectorPermute(V0, V1); + M.r[2] = XMVectorPermute(V0, V2); + M.r[3] = g_XMIdentityR3.v; + return M; + +#elif defined(_XM_SSE_INTRINSICS_) + float fSinAngle; + float fCosAngle; + XMScalarSinCos(&fSinAngle, &fCosAngle, Angle); + + XMVECTOR C2 = _mm_set_ps1(1.0f - fCosAngle); + XMVECTOR C1 = _mm_set_ps1(fCosAngle); + XMVECTOR C0 = _mm_set_ps1(fSinAngle); + + XMVECTOR N0 = XM_PERMUTE_PS(NormalAxis, _MM_SHUFFLE(3, 0, 2, 1)); + XMVECTOR N1 = XM_PERMUTE_PS(NormalAxis, _MM_SHUFFLE(3, 1, 0, 2)); + + XMVECTOR V0 = _mm_mul_ps(C2, N0); + V0 = _mm_mul_ps(V0, N1); + + XMVECTOR R0 = _mm_mul_ps(C2, NormalAxis); + R0 = _mm_mul_ps(R0, NormalAxis); + R0 = _mm_add_ps(R0, C1); + + XMVECTOR R1 = _mm_mul_ps(C0, NormalAxis); + R1 = _mm_add_ps(R1, V0); + XMVECTOR R2 = _mm_mul_ps(C0, NormalAxis); + R2 = _mm_sub_ps(V0, R2); + + V0 = _mm_and_ps(R0, g_XMMask3); + XMVECTOR V1 = _mm_shuffle_ps(R1, R2, _MM_SHUFFLE(2, 1, 2, 0)); + V1 = XM_PERMUTE_PS(V1, _MM_SHUFFLE(0, 3, 2, 1)); + XMVECTOR V2 = _mm_shuffle_ps(R1, R2, _MM_SHUFFLE(0, 0, 1, 1)); + V2 = XM_PERMUTE_PS(V2, _MM_SHUFFLE(2, 0, 2, 0)); + + R2 = _mm_shuffle_ps(V0, V1, _MM_SHUFFLE(1, 0, 3, 0)); + R2 = XM_PERMUTE_PS(R2, _MM_SHUFFLE(1, 3, 2, 0)); + + XMMATRIX M; + M.r[0] = R2; + + R2 = _mm_shuffle_ps(V0, V1, _MM_SHUFFLE(3, 2, 3, 1)); + R2 = XM_PERMUTE_PS(R2, _MM_SHUFFLE(1, 3, 0, 2)); + M.r[1] = R2; + + V2 = _mm_shuffle_ps(V2, V0, _MM_SHUFFLE(3, 2, 1, 0)); + M.r[2] = V2; + M.r[3] = g_XMIdentityR3.v; + return M; +#endif +} + +//------------------------------------------------------------------------------ + +inline XMMATRIX XM_CALLCONV XMMatrixRotationAxis +( + FXMVECTOR Axis, + float Angle +) noexcept +{ + assert(!XMVector3Equal(Axis, XMVectorZero())); + assert(!XMVector3IsInfinite(Axis)); + + XMVECTOR Normal = XMVector3Normalize(Axis); + return XMMatrixRotationNormal(Normal, Angle); +} + +//------------------------------------------------------------------------------ + +inline XMMATRIX XM_CALLCONV XMMatrixRotationQuaternion(FXMVECTOR Quaternion) noexcept +{ +#if defined(_XM_NO_INTRINSICS_) || defined(_XM_ARM_NEON_INTRINSICS_) + + static const XMVECTORF32 Constant1110 = { { { 1.0f, 1.0f, 1.0f, 0.0f } } }; + + XMVECTOR Q0 = XMVectorAdd(Quaternion, Quaternion); + XMVECTOR Q1 = XMVectorMultiply(Quaternion, Q0); + + XMVECTOR V0 = XMVectorPermute(Q1, Constant1110.v); + XMVECTOR V1 = XMVectorPermute(Q1, Constant1110.v); + XMVECTOR R0 = XMVectorSubtract(Constant1110, V0); + R0 = XMVectorSubtract(R0, V1); + + V0 = XMVectorSwizzle(Quaternion); + V1 = XMVectorSwizzle(Q0); + V0 = XMVectorMultiply(V0, V1); + + V1 = XMVectorSplatW(Quaternion); + XMVECTOR V2 = XMVectorSwizzle(Q0); + V1 = XMVectorMultiply(V1, V2); + + XMVECTOR R1 = XMVectorAdd(V0, V1); + XMVECTOR R2 = XMVectorSubtract(V0, V1); + + V0 = XMVectorPermute(R1, R2); + V1 = XMVectorPermute(R1, R2); + + XMMATRIX M; + M.r[0] = XMVectorPermute(R0, V0); + M.r[1] = XMVectorPermute(R0, V0); + M.r[2] = XMVectorPermute(R0, V1); + M.r[3] = g_XMIdentityR3.v; + return M; + +#elif defined(_XM_SSE_INTRINSICS_) + static const XMVECTORF32 Constant1110 = { { { 1.0f, 1.0f, 1.0f, 0.0f } } }; + + XMVECTOR Q0 = _mm_add_ps(Quaternion, Quaternion); + XMVECTOR Q1 = _mm_mul_ps(Quaternion, Q0); + + XMVECTOR V0 = XM_PERMUTE_PS(Q1, _MM_SHUFFLE(3, 0, 0, 1)); + V0 = _mm_and_ps(V0, g_XMMask3); + XMVECTOR V1 = XM_PERMUTE_PS(Q1, _MM_SHUFFLE(3, 1, 2, 2)); + V1 = _mm_and_ps(V1, g_XMMask3); + XMVECTOR R0 = _mm_sub_ps(Constant1110, V0); + R0 = _mm_sub_ps(R0, V1); + + V0 = XM_PERMUTE_PS(Quaternion, _MM_SHUFFLE(3, 1, 0, 0)); + V1 = XM_PERMUTE_PS(Q0, _MM_SHUFFLE(3, 2, 1, 2)); + V0 = _mm_mul_ps(V0, V1); + + V1 = XM_PERMUTE_PS(Quaternion, _MM_SHUFFLE(3, 3, 3, 3)); + XMVECTOR V2 = XM_PERMUTE_PS(Q0, _MM_SHUFFLE(3, 0, 2, 1)); + V1 = _mm_mul_ps(V1, V2); + + XMVECTOR R1 = _mm_add_ps(V0, V1); + XMVECTOR R2 = _mm_sub_ps(V0, V1); + + V0 = _mm_shuffle_ps(R1, R2, _MM_SHUFFLE(1, 0, 2, 1)); + V0 = XM_PERMUTE_PS(V0, _MM_SHUFFLE(1, 3, 2, 0)); + V1 = _mm_shuffle_ps(R1, R2, _MM_SHUFFLE(2, 2, 0, 0)); + V1 = XM_PERMUTE_PS(V1, _MM_SHUFFLE(2, 0, 2, 0)); + + Q1 = _mm_shuffle_ps(R0, V0, _MM_SHUFFLE(1, 0, 3, 0)); + Q1 = XM_PERMUTE_PS(Q1, _MM_SHUFFLE(1, 3, 2, 0)); + + XMMATRIX M; + M.r[0] = Q1; + + Q1 = _mm_shuffle_ps(R0, V0, _MM_SHUFFLE(3, 2, 3, 1)); + Q1 = XM_PERMUTE_PS(Q1, _MM_SHUFFLE(1, 3, 0, 2)); + M.r[1] = Q1; + + Q1 = _mm_shuffle_ps(V1, R0, _MM_SHUFFLE(3, 2, 1, 0)); + M.r[2] = Q1; + M.r[3] = g_XMIdentityR3; + return M; +#endif +} + +//------------------------------------------------------------------------------ + +inline XMMATRIX XM_CALLCONV XMMatrixTransformation2D +( + FXMVECTOR ScalingOrigin, + float ScalingOrientation, + FXMVECTOR Scaling, + FXMVECTOR RotationOrigin, + float Rotation, + GXMVECTOR Translation +) noexcept +{ + // M = Inverse(MScalingOrigin) * Transpose(MScalingOrientation) * MScaling * MScalingOrientation * + // MScalingOrigin * Inverse(MRotationOrigin) * MRotation * MRotationOrigin * MTranslation; + + XMVECTOR VScalingOrigin = XMVectorSelect(g_XMSelect1100.v, ScalingOrigin, g_XMSelect1100.v); + XMVECTOR NegScalingOrigin = XMVectorNegate(VScalingOrigin); + + XMMATRIX MScalingOriginI = XMMatrixTranslationFromVector(NegScalingOrigin); + XMMATRIX MScalingOrientation = XMMatrixRotationZ(ScalingOrientation); + XMMATRIX MScalingOrientationT = XMMatrixTranspose(MScalingOrientation); + XMVECTOR VScaling = XMVectorSelect(g_XMOne.v, Scaling, g_XMSelect1100.v); + XMMATRIX MScaling = XMMatrixScalingFromVector(VScaling); + XMVECTOR VRotationOrigin = XMVectorSelect(g_XMSelect1100.v, RotationOrigin, g_XMSelect1100.v); + XMMATRIX MRotation = XMMatrixRotationZ(Rotation); + XMVECTOR VTranslation = XMVectorSelect(g_XMSelect1100.v, Translation, g_XMSelect1100.v); + + XMMATRIX M = XMMatrixMultiply(MScalingOriginI, MScalingOrientationT); + M = XMMatrixMultiply(M, MScaling); + M = XMMatrixMultiply(M, MScalingOrientation); + M.r[3] = XMVectorAdd(M.r[3], VScalingOrigin); + M.r[3] = XMVectorSubtract(M.r[3], VRotationOrigin); + M = XMMatrixMultiply(M, MRotation); + M.r[3] = XMVectorAdd(M.r[3], VRotationOrigin); + M.r[3] = XMVectorAdd(M.r[3], VTranslation); + + return M; +} + +//------------------------------------------------------------------------------ + +inline XMMATRIX XM_CALLCONV XMMatrixTransformation +( + FXMVECTOR ScalingOrigin, + FXMVECTOR ScalingOrientationQuaternion, + FXMVECTOR Scaling, + GXMVECTOR RotationOrigin, + HXMVECTOR RotationQuaternion, + HXMVECTOR Translation +) noexcept +{ + // M = Inverse(MScalingOrigin) * Transpose(MScalingOrientation) * MScaling * MScalingOrientation * + // MScalingOrigin * Inverse(MRotationOrigin) * MRotation * MRotationOrigin * MTranslation; + + XMVECTOR VScalingOrigin = XMVectorSelect(g_XMSelect1110.v, ScalingOrigin, g_XMSelect1110.v); + XMVECTOR NegScalingOrigin = XMVectorNegate(ScalingOrigin); + + XMMATRIX MScalingOriginI = XMMatrixTranslationFromVector(NegScalingOrigin); + XMMATRIX MScalingOrientation = XMMatrixRotationQuaternion(ScalingOrientationQuaternion); + XMMATRIX MScalingOrientationT = XMMatrixTranspose(MScalingOrientation); + XMMATRIX MScaling = XMMatrixScalingFromVector(Scaling); + XMVECTOR VRotationOrigin = XMVectorSelect(g_XMSelect1110.v, RotationOrigin, g_XMSelect1110.v); + XMMATRIX MRotation = XMMatrixRotationQuaternion(RotationQuaternion); + XMVECTOR VTranslation = XMVectorSelect(g_XMSelect1110.v, Translation, g_XMSelect1110.v); + + XMMATRIX M; + M = XMMatrixMultiply(MScalingOriginI, MScalingOrientationT); + M = XMMatrixMultiply(M, MScaling); + M = XMMatrixMultiply(M, MScalingOrientation); + M.r[3] = XMVectorAdd(M.r[3], VScalingOrigin); + M.r[3] = XMVectorSubtract(M.r[3], VRotationOrigin); + M = XMMatrixMultiply(M, MRotation); + M.r[3] = XMVectorAdd(M.r[3], VRotationOrigin); + M.r[3] = XMVectorAdd(M.r[3], VTranslation); + return M; +} + +//------------------------------------------------------------------------------ + +inline XMMATRIX XM_CALLCONV XMMatrixAffineTransformation2D +( + FXMVECTOR Scaling, + FXMVECTOR RotationOrigin, + float Rotation, + FXMVECTOR Translation +) noexcept +{ + // M = MScaling * Inverse(MRotationOrigin) * MRotation * MRotationOrigin * MTranslation; + + XMVECTOR VScaling = XMVectorSelect(g_XMOne.v, Scaling, g_XMSelect1100.v); + XMMATRIX MScaling = XMMatrixScalingFromVector(VScaling); + XMVECTOR VRotationOrigin = XMVectorSelect(g_XMSelect1100.v, RotationOrigin, g_XMSelect1100.v); + XMMATRIX MRotation = XMMatrixRotationZ(Rotation); + XMVECTOR VTranslation = XMVectorSelect(g_XMSelect1100.v, Translation, g_XMSelect1100.v); + + XMMATRIX M; + M = MScaling; + M.r[3] = XMVectorSubtract(M.r[3], VRotationOrigin); + M = XMMatrixMultiply(M, MRotation); + M.r[3] = XMVectorAdd(M.r[3], VRotationOrigin); + M.r[3] = XMVectorAdd(M.r[3], VTranslation); + return M; +} + +//------------------------------------------------------------------------------ + +inline XMMATRIX XM_CALLCONV XMMatrixAffineTransformation +( + FXMVECTOR Scaling, + FXMVECTOR RotationOrigin, + FXMVECTOR RotationQuaternion, + GXMVECTOR Translation +) noexcept +{ + // M = MScaling * Inverse(MRotationOrigin) * MRotation * MRotationOrigin * MTranslation; + + XMMATRIX MScaling = XMMatrixScalingFromVector(Scaling); + XMVECTOR VRotationOrigin = XMVectorSelect(g_XMSelect1110.v, RotationOrigin, g_XMSelect1110.v); + XMMATRIX MRotation = XMMatrixRotationQuaternion(RotationQuaternion); + XMVECTOR VTranslation = XMVectorSelect(g_XMSelect1110.v, Translation, g_XMSelect1110.v); + + XMMATRIX M; + M = MScaling; + M.r[3] = XMVectorSubtract(M.r[3], VRotationOrigin); + M = XMMatrixMultiply(M, MRotation); + M.r[3] = XMVectorAdd(M.r[3], VRotationOrigin); + M.r[3] = XMVectorAdd(M.r[3], VTranslation); + return M; +} + +//------------------------------------------------------------------------------ + +inline XMMATRIX XM_CALLCONV XMMatrixReflect(FXMVECTOR ReflectionPlane) noexcept +{ + assert(!XMVector3Equal(ReflectionPlane, XMVectorZero())); + assert(!XMPlaneIsInfinite(ReflectionPlane)); + + static const XMVECTORF32 NegativeTwo = { { { -2.0f, -2.0f, -2.0f, 0.0f } } }; + + XMVECTOR P = XMPlaneNormalize(ReflectionPlane); + XMVECTOR S = XMVectorMultiply(P, NegativeTwo); + + XMVECTOR A = XMVectorSplatX(P); + XMVECTOR B = XMVectorSplatY(P); + XMVECTOR C = XMVectorSplatZ(P); + XMVECTOR D = XMVectorSplatW(P); + + XMMATRIX M; + M.r[0] = XMVectorMultiplyAdd(A, S, g_XMIdentityR0.v); + M.r[1] = XMVectorMultiplyAdd(B, S, g_XMIdentityR1.v); + M.r[2] = XMVectorMultiplyAdd(C, S, g_XMIdentityR2.v); + M.r[3] = XMVectorMultiplyAdd(D, S, g_XMIdentityR3.v); + return M; +} + +//------------------------------------------------------------------------------ + +inline XMMATRIX XM_CALLCONV XMMatrixShadow +( + FXMVECTOR ShadowPlane, + FXMVECTOR LightPosition +) noexcept +{ + static const XMVECTORU32 Select0001 = { { { XM_SELECT_0, XM_SELECT_0, XM_SELECT_0, XM_SELECT_1 } } }; + + assert(!XMVector3Equal(ShadowPlane, XMVectorZero())); + assert(!XMPlaneIsInfinite(ShadowPlane)); + + XMVECTOR P = XMPlaneNormalize(ShadowPlane); + XMVECTOR Dot = XMPlaneDot(P, LightPosition); + P = XMVectorNegate(P); + XMVECTOR D = XMVectorSplatW(P); + XMVECTOR C = XMVectorSplatZ(P); + XMVECTOR B = XMVectorSplatY(P); + XMVECTOR A = XMVectorSplatX(P); + Dot = XMVectorSelect(Select0001.v, Dot, Select0001.v); + + XMMATRIX M; + M.r[3] = XMVectorMultiplyAdd(D, LightPosition, Dot); + Dot = XMVectorRotateLeft(Dot, 1); + M.r[2] = XMVectorMultiplyAdd(C, LightPosition, Dot); + Dot = XMVectorRotateLeft(Dot, 1); + M.r[1] = XMVectorMultiplyAdd(B, LightPosition, Dot); + Dot = XMVectorRotateLeft(Dot, 1); + M.r[0] = XMVectorMultiplyAdd(A, LightPosition, Dot); + return M; +} + +//------------------------------------------------------------------------------ +// View and projection initialization operations +//------------------------------------------------------------------------------ + +inline XMMATRIX XM_CALLCONV XMMatrixLookAtLH +( + FXMVECTOR EyePosition, + FXMVECTOR FocusPosition, + FXMVECTOR UpDirection +) noexcept +{ + XMVECTOR EyeDirection = XMVectorSubtract(FocusPosition, EyePosition); + return XMMatrixLookToLH(EyePosition, EyeDirection, UpDirection); +} + +//------------------------------------------------------------------------------ + +inline XMMATRIX XM_CALLCONV XMMatrixLookAtRH +( + FXMVECTOR EyePosition, + FXMVECTOR FocusPosition, + FXMVECTOR UpDirection +) noexcept +{ + XMVECTOR NegEyeDirection = XMVectorSubtract(EyePosition, FocusPosition); + return XMMatrixLookToLH(EyePosition, NegEyeDirection, UpDirection); +} + +//------------------------------------------------------------------------------ + +inline XMMATRIX XM_CALLCONV XMMatrixLookToLH +( + FXMVECTOR EyePosition, + FXMVECTOR EyeDirection, + FXMVECTOR UpDirection +) noexcept +{ + assert(!XMVector3Equal(EyeDirection, XMVectorZero())); + assert(!XMVector3IsInfinite(EyeDirection)); + assert(!XMVector3Equal(UpDirection, XMVectorZero())); + assert(!XMVector3IsInfinite(UpDirection)); + + XMVECTOR R2 = XMVector3Normalize(EyeDirection); + + XMVECTOR R0 = XMVector3Cross(UpDirection, R2); + R0 = XMVector3Normalize(R0); + + XMVECTOR R1 = XMVector3Cross(R2, R0); + + XMVECTOR NegEyePosition = XMVectorNegate(EyePosition); + + XMVECTOR D0 = XMVector3Dot(R0, NegEyePosition); + XMVECTOR D1 = XMVector3Dot(R1, NegEyePosition); + XMVECTOR D2 = XMVector3Dot(R2, NegEyePosition); + + XMMATRIX M; + M.r[0] = XMVectorSelect(D0, R0, g_XMSelect1110.v); + M.r[1] = XMVectorSelect(D1, R1, g_XMSelect1110.v); + M.r[2] = XMVectorSelect(D2, R2, g_XMSelect1110.v); + M.r[3] = g_XMIdentityR3.v; + + M = XMMatrixTranspose(M); + + return M; +} + +//------------------------------------------------------------------------------ + +inline XMMATRIX XM_CALLCONV XMMatrixLookToRH +( + FXMVECTOR EyePosition, + FXMVECTOR EyeDirection, + FXMVECTOR UpDirection +) noexcept +{ + XMVECTOR NegEyeDirection = XMVectorNegate(EyeDirection); + return XMMatrixLookToLH(EyePosition, NegEyeDirection, UpDirection); +} + +//------------------------------------------------------------------------------ + +#ifdef _PREFAST_ +#pragma prefast(push) +#pragma prefast(disable:28931, "PREfast noise: Esp:1266") +#endif + +inline XMMATRIX XM_CALLCONV XMMatrixPerspectiveLH +( + float ViewWidth, + float ViewHeight, + float NearZ, + float FarZ +) noexcept +{ + assert(NearZ > 0.f && FarZ > 0.f); + assert(!XMScalarNearEqual(ViewWidth, 0.0f, 0.00001f)); + assert(!XMScalarNearEqual(ViewHeight, 0.0f, 0.00001f)); + assert(!XMScalarNearEqual(FarZ, NearZ, 0.00001f)); + +#if defined(_XM_NO_INTRINSICS_) + + float TwoNearZ = NearZ + NearZ; + float fRange = FarZ / (FarZ - NearZ); + + XMMATRIX M; + M.m[0][0] = TwoNearZ / ViewWidth; + M.m[0][1] = 0.0f; + M.m[0][2] = 0.0f; + M.m[0][3] = 0.0f; + + M.m[1][0] = 0.0f; + M.m[1][1] = TwoNearZ / ViewHeight; + M.m[1][2] = 0.0f; + M.m[1][3] = 0.0f; + + M.m[2][0] = 0.0f; + M.m[2][1] = 0.0f; + M.m[2][2] = fRange; + M.m[2][3] = 1.0f; + + M.m[3][0] = 0.0f; + M.m[3][1] = 0.0f; + M.m[3][2] = -fRange * NearZ; + M.m[3][3] = 0.0f; + return M; + +#elif defined(_XM_ARM_NEON_INTRINSICS_) + float TwoNearZ = NearZ + NearZ; + float fRange = FarZ / (FarZ - NearZ); + const XMVECTOR Zero = vdupq_n_f32(0); + XMMATRIX M; + M.r[0] = vsetq_lane_f32(TwoNearZ / ViewWidth, Zero, 0); + M.r[1] = vsetq_lane_f32(TwoNearZ / ViewHeight, Zero, 1); + M.r[2] = vsetq_lane_f32(fRange, g_XMIdentityR3.v, 2); + M.r[3] = vsetq_lane_f32(-fRange * NearZ, Zero, 2); + return M; +#elif defined(_XM_SSE_INTRINSICS_) + XMMATRIX M; + float TwoNearZ = NearZ + NearZ; + float fRange = FarZ / (FarZ - NearZ); + // Note: This is recorded on the stack + XMVECTOR rMem = { + TwoNearZ / ViewWidth, + TwoNearZ / ViewHeight, + fRange, + -fRange * NearZ + }; + // Copy from memory to SSE register + XMVECTOR vValues = rMem; + XMVECTOR vTemp = _mm_setzero_ps(); + // Copy x only + vTemp = _mm_move_ss(vTemp, vValues); + // TwoNearZ / ViewWidth,0,0,0 + M.r[0] = vTemp; + // 0,TwoNearZ / ViewHeight,0,0 + vTemp = vValues; + vTemp = _mm_and_ps(vTemp, g_XMMaskY); + M.r[1] = vTemp; + // x=fRange,y=-fRange * NearZ,0,1.0f + vValues = _mm_shuffle_ps(vValues, g_XMIdentityR3, _MM_SHUFFLE(3, 2, 3, 2)); + // 0,0,fRange,1.0f + vTemp = _mm_setzero_ps(); + vTemp = _mm_shuffle_ps(vTemp, vValues, _MM_SHUFFLE(3, 0, 0, 0)); + M.r[2] = vTemp; + // 0,0,-fRange * NearZ,0 + vTemp = _mm_shuffle_ps(vTemp, vValues, _MM_SHUFFLE(2, 1, 0, 0)); + M.r[3] = vTemp; + return M; +#endif +} + +//------------------------------------------------------------------------------ + +inline XMMATRIX XM_CALLCONV XMMatrixPerspectiveRH +( + float ViewWidth, + float ViewHeight, + float NearZ, + float FarZ +) noexcept +{ + assert(NearZ > 0.f && FarZ > 0.f); + assert(!XMScalarNearEqual(ViewWidth, 0.0f, 0.00001f)); + assert(!XMScalarNearEqual(ViewHeight, 0.0f, 0.00001f)); + assert(!XMScalarNearEqual(FarZ, NearZ, 0.00001f)); + +#if defined(_XM_NO_INTRINSICS_) + + float TwoNearZ = NearZ + NearZ; + float fRange = FarZ / (NearZ - FarZ); + + XMMATRIX M; + M.m[0][0] = TwoNearZ / ViewWidth; + M.m[0][1] = 0.0f; + M.m[0][2] = 0.0f; + M.m[0][3] = 0.0f; + + M.m[1][0] = 0.0f; + M.m[1][1] = TwoNearZ / ViewHeight; + M.m[1][2] = 0.0f; + M.m[1][3] = 0.0f; + + M.m[2][0] = 0.0f; + M.m[2][1] = 0.0f; + M.m[2][2] = fRange; + M.m[2][3] = -1.0f; + + M.m[3][0] = 0.0f; + M.m[3][1] = 0.0f; + M.m[3][2] = fRange * NearZ; + M.m[3][3] = 0.0f; + return M; + +#elif defined(_XM_ARM_NEON_INTRINSICS_) + float TwoNearZ = NearZ + NearZ; + float fRange = FarZ / (NearZ - FarZ); + const XMVECTOR Zero = vdupq_n_f32(0); + + XMMATRIX M; + M.r[0] = vsetq_lane_f32(TwoNearZ / ViewWidth, Zero, 0); + M.r[1] = vsetq_lane_f32(TwoNearZ / ViewHeight, Zero, 1); + M.r[2] = vsetq_lane_f32(fRange, g_XMNegIdentityR3.v, 2); + M.r[3] = vsetq_lane_f32(fRange * NearZ, Zero, 2); + return M; +#elif defined(_XM_SSE_INTRINSICS_) + XMMATRIX M; + float TwoNearZ = NearZ + NearZ; + float fRange = FarZ / (NearZ - FarZ); + // Note: This is recorded on the stack + XMVECTOR rMem = { + TwoNearZ / ViewWidth, + TwoNearZ / ViewHeight, + fRange, + fRange * NearZ + }; + // Copy from memory to SSE register + XMVECTOR vValues = rMem; + XMVECTOR vTemp = _mm_setzero_ps(); + // Copy x only + vTemp = _mm_move_ss(vTemp, vValues); + // TwoNearZ / ViewWidth,0,0,0 + M.r[0] = vTemp; + // 0,TwoNearZ / ViewHeight,0,0 + vTemp = vValues; + vTemp = _mm_and_ps(vTemp, g_XMMaskY); + M.r[1] = vTemp; + // x=fRange,y=-fRange * NearZ,0,-1.0f + vValues = _mm_shuffle_ps(vValues, g_XMNegIdentityR3, _MM_SHUFFLE(3, 2, 3, 2)); + // 0,0,fRange,-1.0f + vTemp = _mm_setzero_ps(); + vTemp = _mm_shuffle_ps(vTemp, vValues, _MM_SHUFFLE(3, 0, 0, 0)); + M.r[2] = vTemp; + // 0,0,-fRange * NearZ,0 + vTemp = _mm_shuffle_ps(vTemp, vValues, _MM_SHUFFLE(2, 1, 0, 0)); + M.r[3] = vTemp; + return M; +#endif +} + +//------------------------------------------------------------------------------ + +inline XMMATRIX XM_CALLCONV XMMatrixPerspectiveFovLH +( + float FovAngleY, + float AspectRatio, + float NearZ, + float FarZ +) noexcept +{ + assert(NearZ > 0.f && FarZ > 0.f); + assert(!XMScalarNearEqual(FovAngleY, 0.0f, 0.00001f * 2.0f)); + assert(!XMScalarNearEqual(AspectRatio, 0.0f, 0.00001f)); + assert(!XMScalarNearEqual(FarZ, NearZ, 0.00001f)); + +#if defined(_XM_NO_INTRINSICS_) + + float SinFov; + float CosFov; + XMScalarSinCos(&SinFov, &CosFov, 0.5f * FovAngleY); + + float Height = CosFov / SinFov; + float Width = Height / AspectRatio; + float fRange = FarZ / (FarZ - NearZ); + + XMMATRIX M; + M.m[0][0] = Width; + M.m[0][1] = 0.0f; + M.m[0][2] = 0.0f; + M.m[0][3] = 0.0f; + + M.m[1][0] = 0.0f; + M.m[1][1] = Height; + M.m[1][2] = 0.0f; + M.m[1][3] = 0.0f; + + M.m[2][0] = 0.0f; + M.m[2][1] = 0.0f; + M.m[2][2] = fRange; + M.m[2][3] = 1.0f; + + M.m[3][0] = 0.0f; + M.m[3][1] = 0.0f; + M.m[3][2] = -fRange * NearZ; + M.m[3][3] = 0.0f; + return M; + +#elif defined(_XM_ARM_NEON_INTRINSICS_) + float SinFov; + float CosFov; + XMScalarSinCos(&SinFov, &CosFov, 0.5f * FovAngleY); + + float fRange = FarZ / (FarZ - NearZ); + float Height = CosFov / SinFov; + float Width = Height / AspectRatio; + const XMVECTOR Zero = vdupq_n_f32(0); + + XMMATRIX M; + M.r[0] = vsetq_lane_f32(Width, Zero, 0); + M.r[1] = vsetq_lane_f32(Height, Zero, 1); + M.r[2] = vsetq_lane_f32(fRange, g_XMIdentityR3.v, 2); + M.r[3] = vsetq_lane_f32(-fRange * NearZ, Zero, 2); + return M; +#elif defined(_XM_SSE_INTRINSICS_) + float SinFov; + float CosFov; + XMScalarSinCos(&SinFov, &CosFov, 0.5f * FovAngleY); + + float fRange = FarZ / (FarZ - NearZ); + // Note: This is recorded on the stack + float Height = CosFov / SinFov; + XMVECTOR rMem = { + Height / AspectRatio, + Height, + fRange, + -fRange * NearZ + }; + // Copy from memory to SSE register + XMVECTOR vValues = rMem; + XMVECTOR vTemp = _mm_setzero_ps(); + // Copy x only + vTemp = _mm_move_ss(vTemp, vValues); + // CosFov / SinFov,0,0,0 + XMMATRIX M; + M.r[0] = vTemp; + // 0,Height / AspectRatio,0,0 + vTemp = vValues; + vTemp = _mm_and_ps(vTemp, g_XMMaskY); + M.r[1] = vTemp; + // x=fRange,y=-fRange * NearZ,0,1.0f + vTemp = _mm_setzero_ps(); + vValues = _mm_shuffle_ps(vValues, g_XMIdentityR3, _MM_SHUFFLE(3, 2, 3, 2)); + // 0,0,fRange,1.0f + vTemp = _mm_shuffle_ps(vTemp, vValues, _MM_SHUFFLE(3, 0, 0, 0)); + M.r[2] = vTemp; + // 0,0,-fRange * NearZ,0.0f + vTemp = _mm_shuffle_ps(vTemp, vValues, _MM_SHUFFLE(2, 1, 0, 0)); + M.r[3] = vTemp; + return M; +#endif +} + +//------------------------------------------------------------------------------ + +inline XMMATRIX XM_CALLCONV XMMatrixPerspectiveFovRH +( + float FovAngleY, + float AspectRatio, + float NearZ, + float FarZ +) noexcept +{ + assert(NearZ > 0.f && FarZ > 0.f); + assert(!XMScalarNearEqual(FovAngleY, 0.0f, 0.00001f * 2.0f)); + assert(!XMScalarNearEqual(AspectRatio, 0.0f, 0.00001f)); + assert(!XMScalarNearEqual(FarZ, NearZ, 0.00001f)); + +#if defined(_XM_NO_INTRINSICS_) + + float SinFov; + float CosFov; + XMScalarSinCos(&SinFov, &CosFov, 0.5f * FovAngleY); + + float Height = CosFov / SinFov; + float Width = Height / AspectRatio; + float fRange = FarZ / (NearZ - FarZ); + + XMMATRIX M; + M.m[0][0] = Width; + M.m[0][1] = 0.0f; + M.m[0][2] = 0.0f; + M.m[0][3] = 0.0f; + + M.m[1][0] = 0.0f; + M.m[1][1] = Height; + M.m[1][2] = 0.0f; + M.m[1][3] = 0.0f; + + M.m[2][0] = 0.0f; + M.m[2][1] = 0.0f; + M.m[2][2] = fRange; + M.m[2][3] = -1.0f; + + M.m[3][0] = 0.0f; + M.m[3][1] = 0.0f; + M.m[3][2] = fRange * NearZ; + M.m[3][3] = 0.0f; + return M; + +#elif defined(_XM_ARM_NEON_INTRINSICS_) + float SinFov; + float CosFov; + XMScalarSinCos(&SinFov, &CosFov, 0.5f * FovAngleY); + float fRange = FarZ / (NearZ - FarZ); + float Height = CosFov / SinFov; + float Width = Height / AspectRatio; + const XMVECTOR Zero = vdupq_n_f32(0); + + XMMATRIX M; + M.r[0] = vsetq_lane_f32(Width, Zero, 0); + M.r[1] = vsetq_lane_f32(Height, Zero, 1); + M.r[2] = vsetq_lane_f32(fRange, g_XMNegIdentityR3.v, 2); + M.r[3] = vsetq_lane_f32(fRange * NearZ, Zero, 2); + return M; +#elif defined(_XM_SSE_INTRINSICS_) + float SinFov; + float CosFov; + XMScalarSinCos(&SinFov, &CosFov, 0.5f * FovAngleY); + float fRange = FarZ / (NearZ - FarZ); + // Note: This is recorded on the stack + float Height = CosFov / SinFov; + XMVECTOR rMem = { + Height / AspectRatio, + Height, + fRange, + fRange * NearZ + }; + // Copy from memory to SSE register + XMVECTOR vValues = rMem; + XMVECTOR vTemp = _mm_setzero_ps(); + // Copy x only + vTemp = _mm_move_ss(vTemp, vValues); + // CosFov / SinFov,0,0,0 + XMMATRIX M; + M.r[0] = vTemp; + // 0,Height / AspectRatio,0,0 + vTemp = vValues; + vTemp = _mm_and_ps(vTemp, g_XMMaskY); + M.r[1] = vTemp; + // x=fRange,y=-fRange * NearZ,0,-1.0f + vTemp = _mm_setzero_ps(); + vValues = _mm_shuffle_ps(vValues, g_XMNegIdentityR3, _MM_SHUFFLE(3, 2, 3, 2)); + // 0,0,fRange,-1.0f + vTemp = _mm_shuffle_ps(vTemp, vValues, _MM_SHUFFLE(3, 0, 0, 0)); + M.r[2] = vTemp; + // 0,0,fRange * NearZ,0.0f + vTemp = _mm_shuffle_ps(vTemp, vValues, _MM_SHUFFLE(2, 1, 0, 0)); + M.r[3] = vTemp; + return M; +#endif +} + +//------------------------------------------------------------------------------ + +inline XMMATRIX XM_CALLCONV XMMatrixPerspectiveOffCenterLH +( + float ViewLeft, + float ViewRight, + float ViewBottom, + float ViewTop, + float NearZ, + float FarZ +) noexcept +{ + assert(NearZ > 0.f && FarZ > 0.f); + assert(!XMScalarNearEqual(ViewRight, ViewLeft, 0.00001f)); + assert(!XMScalarNearEqual(ViewTop, ViewBottom, 0.00001f)); + assert(!XMScalarNearEqual(FarZ, NearZ, 0.00001f)); + +#if defined(_XM_NO_INTRINSICS_) + + float TwoNearZ = NearZ + NearZ; + float ReciprocalWidth = 1.0f / (ViewRight - ViewLeft); + float ReciprocalHeight = 1.0f / (ViewTop - ViewBottom); + float fRange = FarZ / (FarZ - NearZ); + + XMMATRIX M; + M.m[0][0] = TwoNearZ * ReciprocalWidth; + M.m[0][1] = 0.0f; + M.m[0][2] = 0.0f; + M.m[0][3] = 0.0f; + + M.m[1][0] = 0.0f; + M.m[1][1] = TwoNearZ * ReciprocalHeight; + M.m[1][2] = 0.0f; + M.m[1][3] = 0.0f; + + M.m[2][0] = -(ViewLeft + ViewRight) * ReciprocalWidth; + M.m[2][1] = -(ViewTop + ViewBottom) * ReciprocalHeight; + M.m[2][2] = fRange; + M.m[2][3] = 1.0f; + + M.m[3][0] = 0.0f; + M.m[3][1] = 0.0f; + M.m[3][2] = -fRange * NearZ; + M.m[3][3] = 0.0f; + return M; + +#elif defined(_XM_ARM_NEON_INTRINSICS_) + float TwoNearZ = NearZ + NearZ; + float ReciprocalWidth = 1.0f / (ViewRight - ViewLeft); + float ReciprocalHeight = 1.0f / (ViewTop - ViewBottom); + float fRange = FarZ / (FarZ - NearZ); + const XMVECTOR Zero = vdupq_n_f32(0); + + XMMATRIX M; + M.r[0] = vsetq_lane_f32(TwoNearZ * ReciprocalWidth, Zero, 0); + M.r[1] = vsetq_lane_f32(TwoNearZ * ReciprocalHeight, Zero, 1); + M.r[2] = XMVectorSet(-(ViewLeft + ViewRight) * ReciprocalWidth, + -(ViewTop + ViewBottom) * ReciprocalHeight, + fRange, + 1.0f); + M.r[3] = vsetq_lane_f32(-fRange * NearZ, Zero, 2); + return M; +#elif defined(_XM_SSE_INTRINSICS_) + XMMATRIX M; + float TwoNearZ = NearZ + NearZ; + float ReciprocalWidth = 1.0f / (ViewRight - ViewLeft); + float ReciprocalHeight = 1.0f / (ViewTop - ViewBottom); + float fRange = FarZ / (FarZ - NearZ); + // Note: This is recorded on the stack + XMVECTOR rMem = { + TwoNearZ * ReciprocalWidth, + TwoNearZ * ReciprocalHeight, + -fRange * NearZ, + 0 + }; + // Copy from memory to SSE register + XMVECTOR vValues = rMem; + XMVECTOR vTemp = _mm_setzero_ps(); + // Copy x only + vTemp = _mm_move_ss(vTemp, vValues); + // TwoNearZ*ReciprocalWidth,0,0,0 + M.r[0] = vTemp; + // 0,TwoNearZ*ReciprocalHeight,0,0 + vTemp = vValues; + vTemp = _mm_and_ps(vTemp, g_XMMaskY); + M.r[1] = vTemp; + // 0,0,fRange,1.0f + M.r[2] = XMVectorSet(-(ViewLeft + ViewRight) * ReciprocalWidth, + -(ViewTop + ViewBottom) * ReciprocalHeight, + fRange, + 1.0f); + // 0,0,-fRange * NearZ,0.0f + vValues = _mm_and_ps(vValues, g_XMMaskZ); + M.r[3] = vValues; + return M; +#endif +} + +//------------------------------------------------------------------------------ + +inline XMMATRIX XM_CALLCONV XMMatrixPerspectiveOffCenterRH +( + float ViewLeft, + float ViewRight, + float ViewBottom, + float ViewTop, + float NearZ, + float FarZ +) noexcept +{ + assert(NearZ > 0.f && FarZ > 0.f); + assert(!XMScalarNearEqual(ViewRight, ViewLeft, 0.00001f)); + assert(!XMScalarNearEqual(ViewTop, ViewBottom, 0.00001f)); + assert(!XMScalarNearEqual(FarZ, NearZ, 0.00001f)); + +#if defined(_XM_NO_INTRINSICS_) + + float TwoNearZ = NearZ + NearZ; + float ReciprocalWidth = 1.0f / (ViewRight - ViewLeft); + float ReciprocalHeight = 1.0f / (ViewTop - ViewBottom); + float fRange = FarZ / (NearZ - FarZ); + + XMMATRIX M; + M.m[0][0] = TwoNearZ * ReciprocalWidth; + M.m[0][1] = 0.0f; + M.m[0][2] = 0.0f; + M.m[0][3] = 0.0f; + + M.m[1][0] = 0.0f; + M.m[1][1] = TwoNearZ * ReciprocalHeight; + M.m[1][2] = 0.0f; + M.m[1][3] = 0.0f; + + M.m[2][0] = (ViewLeft + ViewRight) * ReciprocalWidth; + M.m[2][1] = (ViewTop + ViewBottom) * ReciprocalHeight; + M.m[2][2] = fRange; + M.m[2][3] = -1.0f; + + M.m[3][0] = 0.0f; + M.m[3][1] = 0.0f; + M.m[3][2] = fRange * NearZ; + M.m[3][3] = 0.0f; + return M; + +#elif defined(_XM_ARM_NEON_INTRINSICS_) + float TwoNearZ = NearZ + NearZ; + float ReciprocalWidth = 1.0f / (ViewRight - ViewLeft); + float ReciprocalHeight = 1.0f / (ViewTop - ViewBottom); + float fRange = FarZ / (NearZ - FarZ); + const XMVECTOR Zero = vdupq_n_f32(0); + + XMMATRIX M; + M.r[0] = vsetq_lane_f32(TwoNearZ * ReciprocalWidth, Zero, 0); + M.r[1] = vsetq_lane_f32(TwoNearZ * ReciprocalHeight, Zero, 1); + M.r[2] = XMVectorSet((ViewLeft + ViewRight) * ReciprocalWidth, + (ViewTop + ViewBottom) * ReciprocalHeight, + fRange, + -1.0f); + M.r[3] = vsetq_lane_f32(fRange * NearZ, Zero, 2); + return M; +#elif defined(_XM_SSE_INTRINSICS_) + XMMATRIX M; + float TwoNearZ = NearZ + NearZ; + float ReciprocalWidth = 1.0f / (ViewRight - ViewLeft); + float ReciprocalHeight = 1.0f / (ViewTop - ViewBottom); + float fRange = FarZ / (NearZ - FarZ); + // Note: This is recorded on the stack + XMVECTOR rMem = { + TwoNearZ * ReciprocalWidth, + TwoNearZ * ReciprocalHeight, + fRange * NearZ, + 0 + }; + // Copy from memory to SSE register + XMVECTOR vValues = rMem; + XMVECTOR vTemp = _mm_setzero_ps(); + // Copy x only + vTemp = _mm_move_ss(vTemp, vValues); + // TwoNearZ*ReciprocalWidth,0,0,0 + M.r[0] = vTemp; + // 0,TwoNearZ*ReciprocalHeight,0,0 + vTemp = vValues; + vTemp = _mm_and_ps(vTemp, g_XMMaskY); + M.r[1] = vTemp; + // 0,0,fRange,1.0f + M.r[2] = XMVectorSet((ViewLeft + ViewRight) * ReciprocalWidth, + (ViewTop + ViewBottom) * ReciprocalHeight, + fRange, + -1.0f); + // 0,0,-fRange * NearZ,0.0f + vValues = _mm_and_ps(vValues, g_XMMaskZ); + M.r[3] = vValues; + return M; +#endif +} + +//------------------------------------------------------------------------------ + +inline XMMATRIX XM_CALLCONV XMMatrixOrthographicLH +( + float ViewWidth, + float ViewHeight, + float NearZ, + float FarZ +) noexcept +{ + assert(!XMScalarNearEqual(ViewWidth, 0.0f, 0.00001f)); + assert(!XMScalarNearEqual(ViewHeight, 0.0f, 0.00001f)); + assert(!XMScalarNearEqual(FarZ, NearZ, 0.00001f)); + +#if defined(_XM_NO_INTRINSICS_) + + float fRange = 1.0f / (FarZ - NearZ); + + XMMATRIX M; + M.m[0][0] = 2.0f / ViewWidth; + M.m[0][1] = 0.0f; + M.m[0][2] = 0.0f; + M.m[0][3] = 0.0f; + + M.m[1][0] = 0.0f; + M.m[1][1] = 2.0f / ViewHeight; + M.m[1][2] = 0.0f; + M.m[1][3] = 0.0f; + + M.m[2][0] = 0.0f; + M.m[2][1] = 0.0f; + M.m[2][2] = fRange; + M.m[2][3] = 0.0f; + + M.m[3][0] = 0.0f; + M.m[3][1] = 0.0f; + M.m[3][2] = -fRange * NearZ; + M.m[3][3] = 1.0f; + return M; + +#elif defined(_XM_ARM_NEON_INTRINSICS_) + float fRange = 1.0f / (FarZ - NearZ); + + const XMVECTOR Zero = vdupq_n_f32(0); + XMMATRIX M; + M.r[0] = vsetq_lane_f32(2.0f / ViewWidth, Zero, 0); + M.r[1] = vsetq_lane_f32(2.0f / ViewHeight, Zero, 1); + M.r[2] = vsetq_lane_f32(fRange, Zero, 2); + M.r[3] = vsetq_lane_f32(-fRange * NearZ, g_XMIdentityR3.v, 2); + return M; +#elif defined(_XM_SSE_INTRINSICS_) + XMMATRIX M; + float fRange = 1.0f / (FarZ - NearZ); + // Note: This is recorded on the stack + XMVECTOR rMem = { + 2.0f / ViewWidth, + 2.0f / ViewHeight, + fRange, + -fRange * NearZ + }; + // Copy from memory to SSE register + XMVECTOR vValues = rMem; + XMVECTOR vTemp = _mm_setzero_ps(); + // Copy x only + vTemp = _mm_move_ss(vTemp, vValues); + // 2.0f / ViewWidth,0,0,0 + M.r[0] = vTemp; + // 0,2.0f / ViewHeight,0,0 + vTemp = vValues; + vTemp = _mm_and_ps(vTemp, g_XMMaskY); + M.r[1] = vTemp; + // x=fRange,y=-fRange * NearZ,0,1.0f + vTemp = _mm_setzero_ps(); + vValues = _mm_shuffle_ps(vValues, g_XMIdentityR3, _MM_SHUFFLE(3, 2, 3, 2)); + // 0,0,fRange,0.0f + vTemp = _mm_shuffle_ps(vTemp, vValues, _MM_SHUFFLE(2, 0, 0, 0)); + M.r[2] = vTemp; + // 0,0,-fRange * NearZ,1.0f + vTemp = _mm_shuffle_ps(vTemp, vValues, _MM_SHUFFLE(3, 1, 0, 0)); + M.r[3] = vTemp; + return M; +#endif +} + +//------------------------------------------------------------------------------ + +inline XMMATRIX XM_CALLCONV XMMatrixOrthographicRH +( + float ViewWidth, + float ViewHeight, + float NearZ, + float FarZ +) noexcept +{ + assert(!XMScalarNearEqual(ViewWidth, 0.0f, 0.00001f)); + assert(!XMScalarNearEqual(ViewHeight, 0.0f, 0.00001f)); + assert(!XMScalarNearEqual(FarZ, NearZ, 0.00001f)); + +#if defined(_XM_NO_INTRINSICS_) + + float fRange = 1.0f / (NearZ - FarZ); + + XMMATRIX M; + M.m[0][0] = 2.0f / ViewWidth; + M.m[0][1] = 0.0f; + M.m[0][2] = 0.0f; + M.m[0][3] = 0.0f; + + M.m[1][0] = 0.0f; + M.m[1][1] = 2.0f / ViewHeight; + M.m[1][2] = 0.0f; + M.m[1][3] = 0.0f; + + M.m[2][0] = 0.0f; + M.m[2][1] = 0.0f; + M.m[2][2] = fRange; + M.m[2][3] = 0.0f; + + M.m[3][0] = 0.0f; + M.m[3][1] = 0.0f; + M.m[3][2] = fRange * NearZ; + M.m[3][3] = 1.0f; + return M; + +#elif defined(_XM_ARM_NEON_INTRINSICS_) + float fRange = 1.0f / (NearZ - FarZ); + + const XMVECTOR Zero = vdupq_n_f32(0); + XMMATRIX M; + M.r[0] = vsetq_lane_f32(2.0f / ViewWidth, Zero, 0); + M.r[1] = vsetq_lane_f32(2.0f / ViewHeight, Zero, 1); + M.r[2] = vsetq_lane_f32(fRange, Zero, 2); + M.r[3] = vsetq_lane_f32(fRange * NearZ, g_XMIdentityR3.v, 2); + return M; +#elif defined(_XM_SSE_INTRINSICS_) + XMMATRIX M; + float fRange = 1.0f / (NearZ - FarZ); + // Note: This is recorded on the stack + XMVECTOR rMem = { + 2.0f / ViewWidth, + 2.0f / ViewHeight, + fRange, + fRange * NearZ + }; + // Copy from memory to SSE register + XMVECTOR vValues = rMem; + XMVECTOR vTemp = _mm_setzero_ps(); + // Copy x only + vTemp = _mm_move_ss(vTemp, vValues); + // 2.0f / ViewWidth,0,0,0 + M.r[0] = vTemp; + // 0,2.0f / ViewHeight,0,0 + vTemp = vValues; + vTemp = _mm_and_ps(vTemp, g_XMMaskY); + M.r[1] = vTemp; + // x=fRange,y=fRange * NearZ,0,1.0f + vTemp = _mm_setzero_ps(); + vValues = _mm_shuffle_ps(vValues, g_XMIdentityR3, _MM_SHUFFLE(3, 2, 3, 2)); + // 0,0,fRange,0.0f + vTemp = _mm_shuffle_ps(vTemp, vValues, _MM_SHUFFLE(2, 0, 0, 0)); + M.r[2] = vTemp; + // 0,0,fRange * NearZ,1.0f + vTemp = _mm_shuffle_ps(vTemp, vValues, _MM_SHUFFLE(3, 1, 0, 0)); + M.r[3] = vTemp; + return M; +#endif +} + +//------------------------------------------------------------------------------ + +inline XMMATRIX XM_CALLCONV XMMatrixOrthographicOffCenterLH +( + float ViewLeft, + float ViewRight, + float ViewBottom, + float ViewTop, + float NearZ, + float FarZ +) noexcept +{ + assert(!XMScalarNearEqual(ViewRight, ViewLeft, 0.00001f)); + assert(!XMScalarNearEqual(ViewTop, ViewBottom, 0.00001f)); + assert(!XMScalarNearEqual(FarZ, NearZ, 0.00001f)); + +#if defined(_XM_NO_INTRINSICS_) + + float ReciprocalWidth = 1.0f / (ViewRight - ViewLeft); + float ReciprocalHeight = 1.0f / (ViewTop - ViewBottom); + float fRange = 1.0f / (FarZ - NearZ); + + XMMATRIX M; + M.m[0][0] = ReciprocalWidth + ReciprocalWidth; + M.m[0][1] = 0.0f; + M.m[0][2] = 0.0f; + M.m[0][3] = 0.0f; + + M.m[1][0] = 0.0f; + M.m[1][1] = ReciprocalHeight + ReciprocalHeight; + M.m[1][2] = 0.0f; + M.m[1][3] = 0.0f; + + M.m[2][0] = 0.0f; + M.m[2][1] = 0.0f; + M.m[2][2] = fRange; + M.m[2][3] = 0.0f; + + M.m[3][0] = -(ViewLeft + ViewRight) * ReciprocalWidth; + M.m[3][1] = -(ViewTop + ViewBottom) * ReciprocalHeight; + M.m[3][2] = -fRange * NearZ; + M.m[3][3] = 1.0f; + return M; + +#elif defined(_XM_ARM_NEON_INTRINSICS_) + float ReciprocalWidth = 1.0f / (ViewRight - ViewLeft); + float ReciprocalHeight = 1.0f / (ViewTop - ViewBottom); + float fRange = 1.0f / (FarZ - NearZ); + const XMVECTOR Zero = vdupq_n_f32(0); + XMMATRIX M; + M.r[0] = vsetq_lane_f32(ReciprocalWidth + ReciprocalWidth, Zero, 0); + M.r[1] = vsetq_lane_f32(ReciprocalHeight + ReciprocalHeight, Zero, 1); + M.r[2] = vsetq_lane_f32(fRange, Zero, 2); + M.r[3] = XMVectorSet(-(ViewLeft + ViewRight) * ReciprocalWidth, + -(ViewTop + ViewBottom) * ReciprocalHeight, + -fRange * NearZ, + 1.0f); + return M; +#elif defined(_XM_SSE_INTRINSICS_) + XMMATRIX M; + float fReciprocalWidth = 1.0f / (ViewRight - ViewLeft); + float fReciprocalHeight = 1.0f / (ViewTop - ViewBottom); + float fRange = 1.0f / (FarZ - NearZ); + // Note: This is recorded on the stack + XMVECTOR rMem = { + fReciprocalWidth, + fReciprocalHeight, + fRange, + 1.0f + }; + XMVECTOR rMem2 = { + -(ViewLeft + ViewRight), + -(ViewTop + ViewBottom), + -NearZ, + 1.0f + }; + // Copy from memory to SSE register + XMVECTOR vValues = rMem; + XMVECTOR vTemp = _mm_setzero_ps(); + // Copy x only + vTemp = _mm_move_ss(vTemp, vValues); + // fReciprocalWidth*2,0,0,0 + vTemp = _mm_add_ss(vTemp, vTemp); + M.r[0] = vTemp; + // 0,fReciprocalHeight*2,0,0 + vTemp = vValues; + vTemp = _mm_and_ps(vTemp, g_XMMaskY); + vTemp = _mm_add_ps(vTemp, vTemp); + M.r[1] = vTemp; + // 0,0,fRange,0.0f + vTemp = vValues; + vTemp = _mm_and_ps(vTemp, g_XMMaskZ); + M.r[2] = vTemp; + // -(ViewLeft + ViewRight)*fReciprocalWidth,-(ViewTop + ViewBottom)*fReciprocalHeight,fRange*-NearZ,1.0f + vValues = _mm_mul_ps(vValues, rMem2); + M.r[3] = vValues; + return M; +#endif +} + +//------------------------------------------------------------------------------ + +inline XMMATRIX XM_CALLCONV XMMatrixOrthographicOffCenterRH +( + float ViewLeft, + float ViewRight, + float ViewBottom, + float ViewTop, + float NearZ, + float FarZ +) noexcept +{ + assert(!XMScalarNearEqual(ViewRight, ViewLeft, 0.00001f)); + assert(!XMScalarNearEqual(ViewTop, ViewBottom, 0.00001f)); + assert(!XMScalarNearEqual(FarZ, NearZ, 0.00001f)); + +#if defined(_XM_NO_INTRINSICS_) + + float ReciprocalWidth = 1.0f / (ViewRight - ViewLeft); + float ReciprocalHeight = 1.0f / (ViewTop - ViewBottom); + float fRange = 1.0f / (NearZ - FarZ); + + XMMATRIX M; + M.m[0][0] = ReciprocalWidth + ReciprocalWidth; + M.m[0][1] = 0.0f; + M.m[0][2] = 0.0f; + M.m[0][3] = 0.0f; + + M.m[1][0] = 0.0f; + M.m[1][1] = ReciprocalHeight + ReciprocalHeight; + M.m[1][2] = 0.0f; + M.m[1][3] = 0.0f; + + M.m[2][0] = 0.0f; + M.m[2][1] = 0.0f; + M.m[2][2] = fRange; + M.m[2][3] = 0.0f; + + M.r[3] = XMVectorSet(-(ViewLeft + ViewRight) * ReciprocalWidth, + -(ViewTop + ViewBottom) * ReciprocalHeight, + fRange * NearZ, + 1.0f); + return M; + +#elif defined(_XM_ARM_NEON_INTRINSICS_) + float ReciprocalWidth = 1.0f / (ViewRight - ViewLeft); + float ReciprocalHeight = 1.0f / (ViewTop - ViewBottom); + float fRange = 1.0f / (NearZ - FarZ); + const XMVECTOR Zero = vdupq_n_f32(0); + XMMATRIX M; + M.r[0] = vsetq_lane_f32(ReciprocalWidth + ReciprocalWidth, Zero, 0); + M.r[1] = vsetq_lane_f32(ReciprocalHeight + ReciprocalHeight, Zero, 1); + M.r[2] = vsetq_lane_f32(fRange, Zero, 2); + M.r[3] = XMVectorSet(-(ViewLeft + ViewRight) * ReciprocalWidth, + -(ViewTop + ViewBottom) * ReciprocalHeight, + fRange * NearZ, + 1.0f); + return M; +#elif defined(_XM_SSE_INTRINSICS_) + XMMATRIX M; + float fReciprocalWidth = 1.0f / (ViewRight - ViewLeft); + float fReciprocalHeight = 1.0f / (ViewTop - ViewBottom); + float fRange = 1.0f / (NearZ - FarZ); + // Note: This is recorded on the stack + XMVECTOR rMem = { + fReciprocalWidth, + fReciprocalHeight, + fRange, + 1.0f + }; + XMVECTOR rMem2 = { + -(ViewLeft + ViewRight), + -(ViewTop + ViewBottom), + NearZ, + 1.0f + }; + // Copy from memory to SSE register + XMVECTOR vValues = rMem; + XMVECTOR vTemp = _mm_setzero_ps(); + // Copy x only + vTemp = _mm_move_ss(vTemp, vValues); + // fReciprocalWidth*2,0,0,0 + vTemp = _mm_add_ss(vTemp, vTemp); + M.r[0] = vTemp; + // 0,fReciprocalHeight*2,0,0 + vTemp = vValues; + vTemp = _mm_and_ps(vTemp, g_XMMaskY); + vTemp = _mm_add_ps(vTemp, vTemp); + M.r[1] = vTemp; + // 0,0,fRange,0.0f + vTemp = vValues; + vTemp = _mm_and_ps(vTemp, g_XMMaskZ); + M.r[2] = vTemp; + // -(ViewLeft + ViewRight)*fReciprocalWidth,-(ViewTop + ViewBottom)*fReciprocalHeight,fRange*-NearZ,1.0f + vValues = _mm_mul_ps(vValues, rMem2); + M.r[3] = vValues; + return M; +#endif +} + +#ifdef _PREFAST_ +#pragma prefast(pop) +#endif + +/**************************************************************************** + * + * XMMATRIX operators and methods + * + ****************************************************************************/ + + //------------------------------------------------------------------------------ + +inline XMMATRIX::XMMATRIX +( + float m00, float m01, float m02, float m03, + float m10, float m11, float m12, float m13, + float m20, float m21, float m22, float m23, + float m30, float m31, float m32, float m33 +) noexcept +{ + r[0] = XMVectorSet(m00, m01, m02, m03); + r[1] = XMVectorSet(m10, m11, m12, m13); + r[2] = XMVectorSet(m20, m21, m22, m23); + r[3] = XMVectorSet(m30, m31, m32, m33); +} + +//------------------------------------------------------------------------------ +_Use_decl_annotations_ +inline XMMATRIX::XMMATRIX(const float* pArray) noexcept +{ + assert(pArray != nullptr); + r[0] = XMLoadFloat4(reinterpret_cast(pArray)); + r[1] = XMLoadFloat4(reinterpret_cast(pArray + 4)); + r[2] = XMLoadFloat4(reinterpret_cast(pArray + 8)); + r[3] = XMLoadFloat4(reinterpret_cast(pArray + 12)); +} + +//------------------------------------------------------------------------------ + +inline XMMATRIX XMMATRIX::operator- () const noexcept +{ + XMMATRIX R; + R.r[0] = XMVectorNegate(r[0]); + R.r[1] = XMVectorNegate(r[1]); + R.r[2] = XMVectorNegate(r[2]); + R.r[3] = XMVectorNegate(r[3]); + return R; +} + +//------------------------------------------------------------------------------ + +inline XMMATRIX& XM_CALLCONV XMMATRIX::operator+= (FXMMATRIX M) noexcept +{ + r[0] = XMVectorAdd(r[0], M.r[0]); + r[1] = XMVectorAdd(r[1], M.r[1]); + r[2] = XMVectorAdd(r[2], M.r[2]); + r[3] = XMVectorAdd(r[3], M.r[3]); + return *this; +} + +//------------------------------------------------------------------------------ + +inline XMMATRIX& XM_CALLCONV XMMATRIX::operator-= (FXMMATRIX M) noexcept +{ + r[0] = XMVectorSubtract(r[0], M.r[0]); + r[1] = XMVectorSubtract(r[1], M.r[1]); + r[2] = XMVectorSubtract(r[2], M.r[2]); + r[3] = XMVectorSubtract(r[3], M.r[3]); + return *this; +} + +//------------------------------------------------------------------------------ + +inline XMMATRIX& XM_CALLCONV XMMATRIX::operator*=(FXMMATRIX M) noexcept +{ + *this = XMMatrixMultiply(*this, M); + return *this; +} + +//------------------------------------------------------------------------------ + +inline XMMATRIX& XMMATRIX::operator*= (float S) noexcept +{ + r[0] = XMVectorScale(r[0], S); + r[1] = XMVectorScale(r[1], S); + r[2] = XMVectorScale(r[2], S); + r[3] = XMVectorScale(r[3], S); + return *this; +} + +//------------------------------------------------------------------------------ + +inline XMMATRIX& XMMATRIX::operator/= (float S) noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + XMVECTOR vS = XMVectorReplicate(S); + r[0] = XMVectorDivide(r[0], vS); + r[1] = XMVectorDivide(r[1], vS); + r[2] = XMVectorDivide(r[2], vS); + r[3] = XMVectorDivide(r[3], vS); + return *this; +#elif defined(_XM_ARM_NEON_INTRINSICS_) +#if defined(_M_ARM64) || defined(_M_HYBRID_X86_ARM64) || __aarch64__ + float32x4_t vS = vdupq_n_f32(S); + r[0] = vdivq_f32(r[0], vS); + r[1] = vdivq_f32(r[1], vS); + r[2] = vdivq_f32(r[2], vS); + r[3] = vdivq_f32(r[3], vS); +#else + // 2 iterations of Newton-Raphson refinement of reciprocal + float32x2_t vS = vdup_n_f32(S); + float32x2_t R0 = vrecpe_f32(vS); + float32x2_t S0 = vrecps_f32(R0, vS); + R0 = vmul_f32(S0, R0); + S0 = vrecps_f32(R0, vS); + R0 = vmul_f32(S0, R0); + float32x4_t Reciprocal = vcombine_u32(R0, R0); + r[0] = vmulq_f32(r[0], Reciprocal); + r[1] = vmulq_f32(r[1], Reciprocal); + r[2] = vmulq_f32(r[2], Reciprocal); + r[3] = vmulq_f32(r[3], Reciprocal); +#endif + return *this; +#elif defined(_XM_SSE_INTRINSICS_) + __m128 vS = _mm_set_ps1(S); + r[0] = _mm_div_ps(r[0], vS); + r[1] = _mm_div_ps(r[1], vS); + r[2] = _mm_div_ps(r[2], vS); + r[3] = _mm_div_ps(r[3], vS); + return *this; +#endif +} + +//------------------------------------------------------------------------------ + +inline XMMATRIX XM_CALLCONV XMMATRIX::operator+ (FXMMATRIX M) const noexcept +{ + XMMATRIX R; + R.r[0] = XMVectorAdd(r[0], M.r[0]); + R.r[1] = XMVectorAdd(r[1], M.r[1]); + R.r[2] = XMVectorAdd(r[2], M.r[2]); + R.r[3] = XMVectorAdd(r[3], M.r[3]); + return R; +} + +//------------------------------------------------------------------------------ + +inline XMMATRIX XM_CALLCONV XMMATRIX::operator- (FXMMATRIX M) const noexcept +{ + XMMATRIX R; + R.r[0] = XMVectorSubtract(r[0], M.r[0]); + R.r[1] = XMVectorSubtract(r[1], M.r[1]); + R.r[2] = XMVectorSubtract(r[2], M.r[2]); + R.r[3] = XMVectorSubtract(r[3], M.r[3]); + return R; +} + +//------------------------------------------------------------------------------ + +inline XMMATRIX XM_CALLCONV XMMATRIX::operator*(FXMMATRIX M) const noexcept +{ + return XMMatrixMultiply(*this, M); +} + +//------------------------------------------------------------------------------ + +inline XMMATRIX XMMATRIX::operator* (float S) const noexcept +{ + XMMATRIX R; + R.r[0] = XMVectorScale(r[0], S); + R.r[1] = XMVectorScale(r[1], S); + R.r[2] = XMVectorScale(r[2], S); + R.r[3] = XMVectorScale(r[3], S); + return R; +} + +//------------------------------------------------------------------------------ + +inline XMMATRIX XMMATRIX::operator/ (float S) const noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + XMVECTOR vS = XMVectorReplicate(S); + XMMATRIX R; + R.r[0] = XMVectorDivide(r[0], vS); + R.r[1] = XMVectorDivide(r[1], vS); + R.r[2] = XMVectorDivide(r[2], vS); + R.r[3] = XMVectorDivide(r[3], vS); + return R; +#elif defined(_XM_ARM_NEON_INTRINSICS_) +#if defined(_M_ARM64) || defined(_M_HYBRID_X86_ARM64) || __aarch64__ + float32x4_t vS = vdupq_n_f32(S); + XMMATRIX R; + R.r[0] = vdivq_f32(r[0], vS); + R.r[1] = vdivq_f32(r[1], vS); + R.r[2] = vdivq_f32(r[2], vS); + R.r[3] = vdivq_f32(r[3], vS); +#else + // 2 iterations of Newton-Raphson refinement of reciprocal + float32x2_t vS = vdup_n_f32(S); + float32x2_t R0 = vrecpe_f32(vS); + float32x2_t S0 = vrecps_f32(R0, vS); + R0 = vmul_f32(S0, R0); + S0 = vrecps_f32(R0, vS); + R0 = vmul_f32(S0, R0); + float32x4_t Reciprocal = vcombine_u32(R0, R0); + XMMATRIX R; + R.r[0] = vmulq_f32(r[0], Reciprocal); + R.r[1] = vmulq_f32(r[1], Reciprocal); + R.r[2] = vmulq_f32(r[2], Reciprocal); + R.r[3] = vmulq_f32(r[3], Reciprocal); +#endif + return R; +#elif defined(_XM_SSE_INTRINSICS_) + __m128 vS = _mm_set_ps1(S); + XMMATRIX R; + R.r[0] = _mm_div_ps(r[0], vS); + R.r[1] = _mm_div_ps(r[1], vS); + R.r[2] = _mm_div_ps(r[2], vS); + R.r[3] = _mm_div_ps(r[3], vS); + return R; +#endif +} + +//------------------------------------------------------------------------------ + +inline XMMATRIX XM_CALLCONV operator* +( + float S, + FXMMATRIX M +) noexcept +{ + XMMATRIX R; + R.r[0] = XMVectorScale(M.r[0], S); + R.r[1] = XMVectorScale(M.r[1], S); + R.r[2] = XMVectorScale(M.r[2], S); + R.r[3] = XMVectorScale(M.r[3], S); + return R; +} + +/**************************************************************************** + * + * XMFLOAT3X3 operators + * + ****************************************************************************/ + + //------------------------------------------------------------------------------ +_Use_decl_annotations_ +inline XMFLOAT3X3::XMFLOAT3X3(const float* pArray) noexcept +{ + assert(pArray != nullptr); + for (size_t Row = 0; Row < 3; Row++) + { + for (size_t Column = 0; Column < 3; Column++) + { + m[Row][Column] = pArray[Row * 3 + Column]; + } + } +} + +/**************************************************************************** + * + * XMFLOAT4X3 operators + * + ****************************************************************************/ + + //------------------------------------------------------------------------------ +_Use_decl_annotations_ +inline XMFLOAT4X3::XMFLOAT4X3(const float* pArray) noexcept +{ + assert(pArray != nullptr); + + m[0][0] = pArray[0]; + m[0][1] = pArray[1]; + m[0][2] = pArray[2]; + + m[1][0] = pArray[3]; + m[1][1] = pArray[4]; + m[1][2] = pArray[5]; + + m[2][0] = pArray[6]; + m[2][1] = pArray[7]; + m[2][2] = pArray[8]; + + m[3][0] = pArray[9]; + m[3][1] = pArray[10]; + m[3][2] = pArray[11]; +} + +/**************************************************************************** +* +* XMFLOAT3X4 operators +* +****************************************************************************/ + +//------------------------------------------------------------------------------ +_Use_decl_annotations_ +inline XMFLOAT3X4::XMFLOAT3X4(const float* pArray) noexcept +{ + assert(pArray != nullptr); + + m[0][0] = pArray[0]; + m[0][1] = pArray[1]; + m[0][2] = pArray[2]; + m[0][3] = pArray[3]; + + m[1][0] = pArray[4]; + m[1][1] = pArray[5]; + m[1][2] = pArray[6]; + m[1][3] = pArray[7]; + + m[2][0] = pArray[8]; + m[2][1] = pArray[9]; + m[2][2] = pArray[10]; + m[2][3] = pArray[11]; +} + +/**************************************************************************** + * + * XMFLOAT4X4 operators + * + ****************************************************************************/ + + //------------------------------------------------------------------------------ +_Use_decl_annotations_ +inline XMFLOAT4X4::XMFLOAT4X4(const float* pArray) noexcept +{ + assert(pArray != nullptr); + + m[0][0] = pArray[0]; + m[0][1] = pArray[1]; + m[0][2] = pArray[2]; + m[0][3] = pArray[3]; + + m[1][0] = pArray[4]; + m[1][1] = pArray[5]; + m[1][2] = pArray[6]; + m[1][3] = pArray[7]; + + m[2][0] = pArray[8]; + m[2][1] = pArray[9]; + m[2][2] = pArray[10]; + m[2][3] = pArray[11]; + + m[3][0] = pArray[12]; + m[3][1] = pArray[13]; + m[3][2] = pArray[14]; + m[3][3] = pArray[15]; +} + diff --git a/include/directxmath/directxmathmisc.inl b/include/directxmath/directxmathmisc.inl new file mode 100644 index 0000000..aca863b --- /dev/null +++ b/include/directxmath/directxmathmisc.inl @@ -0,0 +1,2425 @@ +//------------------------------------------------------------------------------------- +// DirectXMathMisc.inl -- SIMD C++ Math library +// +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. +// +// http://go.microsoft.com/fwlink/?LinkID=615560 +//------------------------------------------------------------------------------------- + +#pragma once + +/**************************************************************************** + * + * Quaternion + * + ****************************************************************************/ + + //------------------------------------------------------------------------------ + // Comparison operations + //------------------------------------------------------------------------------ + + //------------------------------------------------------------------------------ + +inline bool XM_CALLCONV XMQuaternionEqual +( + FXMVECTOR Q1, + FXMVECTOR Q2 +) noexcept +{ + return XMVector4Equal(Q1, Q2); +} + +//------------------------------------------------------------------------------ + +inline bool XM_CALLCONV XMQuaternionNotEqual +( + FXMVECTOR Q1, + FXMVECTOR Q2 +) noexcept +{ + return XMVector4NotEqual(Q1, Q2); +} + +//------------------------------------------------------------------------------ + +inline bool XM_CALLCONV XMQuaternionIsNaN(FXMVECTOR Q) noexcept +{ + return XMVector4IsNaN(Q); +} + +//------------------------------------------------------------------------------ + +inline bool XM_CALLCONV XMQuaternionIsInfinite(FXMVECTOR Q) noexcept +{ + return XMVector4IsInfinite(Q); +} + +//------------------------------------------------------------------------------ + +inline bool XM_CALLCONV XMQuaternionIsIdentity(FXMVECTOR Q) noexcept +{ + return XMVector4Equal(Q, g_XMIdentityR3.v); +} + +//------------------------------------------------------------------------------ +// Computation operations +//------------------------------------------------------------------------------ + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMQuaternionDot +( + FXMVECTOR Q1, + FXMVECTOR Q2 +) noexcept +{ + return XMVector4Dot(Q1, Q2); +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMQuaternionMultiply +( + FXMVECTOR Q1, + FXMVECTOR Q2 +) noexcept +{ + // Returns the product Q2*Q1 (which is the concatenation of a rotation Q1 followed by the rotation Q2) + + // [ (Q2.w * Q1.x) + (Q2.x * Q1.w) + (Q2.y * Q1.z) - (Q2.z * Q1.y), + // (Q2.w * Q1.y) - (Q2.x * Q1.z) + (Q2.y * Q1.w) + (Q2.z * Q1.x), + // (Q2.w * Q1.z) + (Q2.x * Q1.y) - (Q2.y * Q1.x) + (Q2.z * Q1.w), + // (Q2.w * Q1.w) - (Q2.x * Q1.x) - (Q2.y * Q1.y) - (Q2.z * Q1.z) ] + +#if defined(_XM_NO_INTRINSICS_) + XMVECTORF32 Result = { { { + (Q2.vector4_f32[3] * Q1.vector4_f32[0]) + (Q2.vector4_f32[0] * Q1.vector4_f32[3]) + (Q2.vector4_f32[1] * Q1.vector4_f32[2]) - (Q2.vector4_f32[2] * Q1.vector4_f32[1]), + (Q2.vector4_f32[3] * Q1.vector4_f32[1]) - (Q2.vector4_f32[0] * Q1.vector4_f32[2]) + (Q2.vector4_f32[1] * Q1.vector4_f32[3]) + (Q2.vector4_f32[2] * Q1.vector4_f32[0]), + (Q2.vector4_f32[3] * Q1.vector4_f32[2]) + (Q2.vector4_f32[0] * Q1.vector4_f32[1]) - (Q2.vector4_f32[1] * Q1.vector4_f32[0]) + (Q2.vector4_f32[2] * Q1.vector4_f32[3]), + (Q2.vector4_f32[3] * Q1.vector4_f32[3]) - (Q2.vector4_f32[0] * Q1.vector4_f32[0]) - (Q2.vector4_f32[1] * Q1.vector4_f32[1]) - (Q2.vector4_f32[2] * Q1.vector4_f32[2]) + } } }; + return Result.v; +#elif defined(_XM_ARM_NEON_INTRINSICS_) + static const XMVECTORF32 ControlWZYX = { { { 1.0f, -1.0f, 1.0f, -1.0f } } }; + static const XMVECTORF32 ControlZWXY = { { { 1.0f, 1.0f, -1.0f, -1.0f } } }; + static const XMVECTORF32 ControlYXWZ = { { { -1.0f, 1.0f, 1.0f, -1.0f } } }; + + float32x2_t Q2L = vget_low_f32(Q2); + float32x2_t Q2H = vget_high_f32(Q2); + + float32x4_t Q2X = vdupq_lane_f32(Q2L, 0); + float32x4_t Q2Y = vdupq_lane_f32(Q2L, 1); + float32x4_t Q2Z = vdupq_lane_f32(Q2H, 0); + XMVECTOR vResult = vmulq_lane_f32(Q1, Q2H, 1); + + // Mul by Q1WZYX + float32x4_t vTemp = vrev64q_f32(Q1); + vTemp = vcombine_f32(vget_high_f32(vTemp), vget_low_f32(vTemp)); + Q2X = vmulq_f32(Q2X, vTemp); + vResult = vmlaq_f32(vResult, Q2X, ControlWZYX); + + // Mul by Q1ZWXY + vTemp = vrev64q_u32(vTemp); + Q2Y = vmulq_f32(Q2Y, vTemp); + vResult = vmlaq_f32(vResult, Q2Y, ControlZWXY); + + // Mul by Q1YXWZ + vTemp = vrev64q_u32(vTemp); + vTemp = vcombine_f32(vget_high_f32(vTemp), vget_low_f32(vTemp)); + Q2Z = vmulq_f32(Q2Z, vTemp); + vResult = vmlaq_f32(vResult, Q2Z, ControlYXWZ); + return vResult; +#elif defined(_XM_SSE_INTRINSICS_) + static const XMVECTORF32 ControlWZYX = { { { 1.0f, -1.0f, 1.0f, -1.0f } } }; + static const XMVECTORF32 ControlZWXY = { { { 1.0f, 1.0f, -1.0f, -1.0f } } }; + static const XMVECTORF32 ControlYXWZ = { { { -1.0f, 1.0f, 1.0f, -1.0f } } }; + // Copy to SSE registers and use as few as possible for x86 + XMVECTOR Q2X = Q2; + XMVECTOR Q2Y = Q2; + XMVECTOR Q2Z = Q2; + XMVECTOR vResult = Q2; + // Splat with one instruction + vResult = XM_PERMUTE_PS(vResult, _MM_SHUFFLE(3, 3, 3, 3)); + Q2X = XM_PERMUTE_PS(Q2X, _MM_SHUFFLE(0, 0, 0, 0)); + Q2Y = XM_PERMUTE_PS(Q2Y, _MM_SHUFFLE(1, 1, 1, 1)); + Q2Z = XM_PERMUTE_PS(Q2Z, _MM_SHUFFLE(2, 2, 2, 2)); + // Retire Q1 and perform Q1*Q2W + vResult = _mm_mul_ps(vResult, Q1); + XMVECTOR Q1Shuffle = Q1; + // Shuffle the copies of Q1 + Q1Shuffle = XM_PERMUTE_PS(Q1Shuffle, _MM_SHUFFLE(0, 1, 2, 3)); + // Mul by Q1WZYX + Q2X = _mm_mul_ps(Q2X, Q1Shuffle); + Q1Shuffle = XM_PERMUTE_PS(Q1Shuffle, _MM_SHUFFLE(2, 3, 0, 1)); + // Flip the signs on y and z + vResult = XM_FMADD_PS(Q2X, ControlWZYX, vResult); + // Mul by Q1ZWXY + Q2Y = _mm_mul_ps(Q2Y, Q1Shuffle); + Q1Shuffle = XM_PERMUTE_PS(Q1Shuffle, _MM_SHUFFLE(0, 1, 2, 3)); + // Flip the signs on z and w + Q2Y = _mm_mul_ps(Q2Y, ControlZWXY); + // Mul by Q1YXWZ + Q2Z = _mm_mul_ps(Q2Z, Q1Shuffle); + // Flip the signs on x and w + Q2Y = XM_FMADD_PS(Q2Z, ControlYXWZ, Q2Y); + vResult = _mm_add_ps(vResult, Q2Y); + return vResult; +#endif +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMQuaternionLengthSq(FXMVECTOR Q) noexcept +{ + return XMVector4LengthSq(Q); +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMQuaternionReciprocalLength(FXMVECTOR Q) noexcept +{ + return XMVector4ReciprocalLength(Q); +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMQuaternionLength(FXMVECTOR Q) noexcept +{ + return XMVector4Length(Q); +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMQuaternionNormalizeEst(FXMVECTOR Q) noexcept +{ + return XMVector4NormalizeEst(Q); +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMQuaternionNormalize(FXMVECTOR Q) noexcept +{ + return XMVector4Normalize(Q); +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMQuaternionConjugate(FXMVECTOR Q) noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + XMVECTORF32 Result = { { { + -Q.vector4_f32[0], + -Q.vector4_f32[1], + -Q.vector4_f32[2], + Q.vector4_f32[3] + } } }; + return Result.v; +#elif defined(_XM_ARM_NEON_INTRINSICS_) + static const XMVECTORF32 NegativeOne3 = { { { -1.0f, -1.0f, -1.0f, 1.0f } } }; + return vmulq_f32(Q, NegativeOne3.v); +#elif defined(_XM_SSE_INTRINSICS_) + static const XMVECTORF32 NegativeOne3 = { { { -1.0f, -1.0f, -1.0f, 1.0f } } }; + return _mm_mul_ps(Q, NegativeOne3); +#endif +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMQuaternionInverse(FXMVECTOR Q) noexcept +{ + const XMVECTOR Zero = XMVectorZero(); + + XMVECTOR L = XMVector4LengthSq(Q); + XMVECTOR Conjugate = XMQuaternionConjugate(Q); + + XMVECTOR Control = XMVectorLessOrEqual(L, g_XMEpsilon.v); + + XMVECTOR Result = XMVectorDivide(Conjugate, L); + + Result = XMVectorSelect(Result, Zero, Control); + + return Result; +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMQuaternionLn(FXMVECTOR Q) noexcept +{ + static const XMVECTORF32 OneMinusEpsilon = { { { 1.0f - 0.00001f, 1.0f - 0.00001f, 1.0f - 0.00001f, 1.0f - 0.00001f } } }; + + XMVECTOR QW = XMVectorSplatW(Q); + XMVECTOR Q0 = XMVectorSelect(g_XMSelect1110.v, Q, g_XMSelect1110.v); + + XMVECTOR ControlW = XMVectorInBounds(QW, OneMinusEpsilon.v); + + XMVECTOR Theta = XMVectorACos(QW); + XMVECTOR SinTheta = XMVectorSin(Theta); + + XMVECTOR S = XMVectorDivide(Theta, SinTheta); + + XMVECTOR Result = XMVectorMultiply(Q0, S); + Result = XMVectorSelect(Q0, Result, ControlW); + + return Result; +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMQuaternionExp(FXMVECTOR Q) noexcept +{ + XMVECTOR Theta = XMVector3Length(Q); + + XMVECTOR SinTheta, CosTheta; + XMVectorSinCos(&SinTheta, &CosTheta, Theta); + + XMVECTOR S = XMVectorDivide(SinTheta, Theta); + + XMVECTOR Result = XMVectorMultiply(Q, S); + + const XMVECTOR Zero = XMVectorZero(); + XMVECTOR Control = XMVectorNearEqual(Theta, Zero, g_XMEpsilon.v); + Result = XMVectorSelect(Result, Q, Control); + + Result = XMVectorSelect(CosTheta, Result, g_XMSelect1110.v); + + return Result; +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMQuaternionSlerp +( + FXMVECTOR Q0, + FXMVECTOR Q1, + float t +) noexcept +{ + XMVECTOR T = XMVectorReplicate(t); + return XMQuaternionSlerpV(Q0, Q1, T); +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMQuaternionSlerpV +( + FXMVECTOR Q0, + FXMVECTOR Q1, + FXMVECTOR T +) noexcept +{ + assert((XMVectorGetY(T) == XMVectorGetX(T)) && (XMVectorGetZ(T) == XMVectorGetX(T)) && (XMVectorGetW(T) == XMVectorGetX(T))); + + // Result = Q0 * sin((1.0 - t) * Omega) / sin(Omega) + Q1 * sin(t * Omega) / sin(Omega) + +#if defined(_XM_NO_INTRINSICS_) || defined(_XM_ARM_NEON_INTRINSICS_) + + const XMVECTORF32 OneMinusEpsilon = { { { 1.0f - 0.00001f, 1.0f - 0.00001f, 1.0f - 0.00001f, 1.0f - 0.00001f } } }; + + XMVECTOR CosOmega = XMQuaternionDot(Q0, Q1); + + const XMVECTOR Zero = XMVectorZero(); + XMVECTOR Control = XMVectorLess(CosOmega, Zero); + XMVECTOR Sign = XMVectorSelect(g_XMOne.v, g_XMNegativeOne.v, Control); + + CosOmega = XMVectorMultiply(CosOmega, Sign); + + Control = XMVectorLess(CosOmega, OneMinusEpsilon); + + XMVECTOR SinOmega = XMVectorNegativeMultiplySubtract(CosOmega, CosOmega, g_XMOne.v); + SinOmega = XMVectorSqrt(SinOmega); + + XMVECTOR Omega = XMVectorATan2(SinOmega, CosOmega); + + XMVECTOR SignMask = XMVectorSplatSignMask(); + XMVECTOR V01 = XMVectorShiftLeft(T, Zero, 2); + SignMask = XMVectorShiftLeft(SignMask, Zero, 3); + V01 = XMVectorXorInt(V01, SignMask); + V01 = XMVectorAdd(g_XMIdentityR0.v, V01); + + XMVECTOR InvSinOmega = XMVectorReciprocal(SinOmega); + + XMVECTOR S0 = XMVectorMultiply(V01, Omega); + S0 = XMVectorSin(S0); + S0 = XMVectorMultiply(S0, InvSinOmega); + + S0 = XMVectorSelect(V01, S0, Control); + + XMVECTOR S1 = XMVectorSplatY(S0); + S0 = XMVectorSplatX(S0); + + S1 = XMVectorMultiply(S1, Sign); + + XMVECTOR Result = XMVectorMultiply(Q0, S0); + Result = XMVectorMultiplyAdd(Q1, S1, Result); + + return Result; + +#elif defined(_XM_SSE_INTRINSICS_) + static const XMVECTORF32 OneMinusEpsilon = { { { 1.0f - 0.00001f, 1.0f - 0.00001f, 1.0f - 0.00001f, 1.0f - 0.00001f } } }; + static const XMVECTORU32 SignMask2 = { { { 0x80000000, 0x00000000, 0x00000000, 0x00000000 } } }; + + XMVECTOR CosOmega = XMQuaternionDot(Q0, Q1); + + const XMVECTOR Zero = XMVectorZero(); + XMVECTOR Control = XMVectorLess(CosOmega, Zero); + XMVECTOR Sign = XMVectorSelect(g_XMOne, g_XMNegativeOne, Control); + + CosOmega = _mm_mul_ps(CosOmega, Sign); + + Control = XMVectorLess(CosOmega, OneMinusEpsilon); + + XMVECTOR SinOmega = _mm_mul_ps(CosOmega, CosOmega); + SinOmega = _mm_sub_ps(g_XMOne, SinOmega); + SinOmega = _mm_sqrt_ps(SinOmega); + + XMVECTOR Omega = XMVectorATan2(SinOmega, CosOmega); + + XMVECTOR V01 = XM_PERMUTE_PS(T, _MM_SHUFFLE(2, 3, 0, 1)); + V01 = _mm_and_ps(V01, g_XMMaskXY); + V01 = _mm_xor_ps(V01, SignMask2); + V01 = _mm_add_ps(g_XMIdentityR0, V01); + + XMVECTOR S0 = _mm_mul_ps(V01, Omega); + S0 = XMVectorSin(S0); + S0 = _mm_div_ps(S0, SinOmega); + + S0 = XMVectorSelect(V01, S0, Control); + + XMVECTOR S1 = XMVectorSplatY(S0); + S0 = XMVectorSplatX(S0); + + S1 = _mm_mul_ps(S1, Sign); + XMVECTOR Result = _mm_mul_ps(Q0, S0); + S1 = _mm_mul_ps(S1, Q1); + Result = _mm_add_ps(Result, S1); + return Result; +#endif +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMQuaternionSquad +( + FXMVECTOR Q0, + FXMVECTOR Q1, + FXMVECTOR Q2, + GXMVECTOR Q3, + float t +) noexcept +{ + XMVECTOR T = XMVectorReplicate(t); + return XMQuaternionSquadV(Q0, Q1, Q2, Q3, T); +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMQuaternionSquadV +( + FXMVECTOR Q0, + FXMVECTOR Q1, + FXMVECTOR Q2, + GXMVECTOR Q3, + HXMVECTOR T +) noexcept +{ + assert((XMVectorGetY(T) == XMVectorGetX(T)) && (XMVectorGetZ(T) == XMVectorGetX(T)) && (XMVectorGetW(T) == XMVectorGetX(T))); + + XMVECTOR TP = T; + const XMVECTOR Two = XMVectorSplatConstant(2, 0); + + XMVECTOR Q03 = XMQuaternionSlerpV(Q0, Q3, T); + XMVECTOR Q12 = XMQuaternionSlerpV(Q1, Q2, T); + + TP = XMVectorNegativeMultiplySubtract(TP, TP, TP); + TP = XMVectorMultiply(TP, Two); + + XMVECTOR Result = XMQuaternionSlerpV(Q03, Q12, TP); + + return Result; +} + +//------------------------------------------------------------------------------ +_Use_decl_annotations_ +inline void XM_CALLCONV XMQuaternionSquadSetup +( + XMVECTOR* pA, + XMVECTOR* pB, + XMVECTOR* pC, + FXMVECTOR Q0, + FXMVECTOR Q1, + FXMVECTOR Q2, + GXMVECTOR Q3 +) noexcept +{ + assert(pA); + assert(pB); + assert(pC); + + XMVECTOR LS12 = XMQuaternionLengthSq(XMVectorAdd(Q1, Q2)); + XMVECTOR LD12 = XMQuaternionLengthSq(XMVectorSubtract(Q1, Q2)); + XMVECTOR SQ2 = XMVectorNegate(Q2); + + XMVECTOR Control1 = XMVectorLess(LS12, LD12); + SQ2 = XMVectorSelect(Q2, SQ2, Control1); + + XMVECTOR LS01 = XMQuaternionLengthSq(XMVectorAdd(Q0, Q1)); + XMVECTOR LD01 = XMQuaternionLengthSq(XMVectorSubtract(Q0, Q1)); + XMVECTOR SQ0 = XMVectorNegate(Q0); + + XMVECTOR LS23 = XMQuaternionLengthSq(XMVectorAdd(SQ2, Q3)); + XMVECTOR LD23 = XMQuaternionLengthSq(XMVectorSubtract(SQ2, Q3)); + XMVECTOR SQ3 = XMVectorNegate(Q3); + + XMVECTOR Control0 = XMVectorLess(LS01, LD01); + XMVECTOR Control2 = XMVectorLess(LS23, LD23); + + SQ0 = XMVectorSelect(Q0, SQ0, Control0); + SQ3 = XMVectorSelect(Q3, SQ3, Control2); + + XMVECTOR InvQ1 = XMQuaternionInverse(Q1); + XMVECTOR InvQ2 = XMQuaternionInverse(SQ2); + + XMVECTOR LnQ0 = XMQuaternionLn(XMQuaternionMultiply(InvQ1, SQ0)); + XMVECTOR LnQ2 = XMQuaternionLn(XMQuaternionMultiply(InvQ1, SQ2)); + XMVECTOR LnQ1 = XMQuaternionLn(XMQuaternionMultiply(InvQ2, Q1)); + XMVECTOR LnQ3 = XMQuaternionLn(XMQuaternionMultiply(InvQ2, SQ3)); + + const XMVECTOR NegativeOneQuarter = XMVectorSplatConstant(-1, 2); + + XMVECTOR ExpQ02 = XMVectorMultiply(XMVectorAdd(LnQ0, LnQ2), NegativeOneQuarter); + XMVECTOR ExpQ13 = XMVectorMultiply(XMVectorAdd(LnQ1, LnQ3), NegativeOneQuarter); + ExpQ02 = XMQuaternionExp(ExpQ02); + ExpQ13 = XMQuaternionExp(ExpQ13); + + *pA = XMQuaternionMultiply(Q1, ExpQ02); + *pB = XMQuaternionMultiply(SQ2, ExpQ13); + *pC = SQ2; +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMQuaternionBaryCentric +( + FXMVECTOR Q0, + FXMVECTOR Q1, + FXMVECTOR Q2, + float f, + float g +) noexcept +{ + float s = f + g; + + XMVECTOR Result; + if ((s < 0.00001f) && (s > -0.00001f)) + { + Result = Q0; + } + else + { + XMVECTOR Q01 = XMQuaternionSlerp(Q0, Q1, s); + XMVECTOR Q02 = XMQuaternionSlerp(Q0, Q2, s); + + Result = XMQuaternionSlerp(Q01, Q02, g / s); + } + + return Result; +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMQuaternionBaryCentricV +( + FXMVECTOR Q0, + FXMVECTOR Q1, + FXMVECTOR Q2, + GXMVECTOR F, + HXMVECTOR G +) noexcept +{ + assert((XMVectorGetY(F) == XMVectorGetX(F)) && (XMVectorGetZ(F) == XMVectorGetX(F)) && (XMVectorGetW(F) == XMVectorGetX(F))); + assert((XMVectorGetY(G) == XMVectorGetX(G)) && (XMVectorGetZ(G) == XMVectorGetX(G)) && (XMVectorGetW(G) == XMVectorGetX(G))); + + const XMVECTOR Epsilon = XMVectorSplatConstant(1, 16); + + XMVECTOR S = XMVectorAdd(F, G); + + XMVECTOR Result; + if (XMVector4InBounds(S, Epsilon)) + { + Result = Q0; + } + else + { + XMVECTOR Q01 = XMQuaternionSlerpV(Q0, Q1, S); + XMVECTOR Q02 = XMQuaternionSlerpV(Q0, Q2, S); + XMVECTOR GS = XMVectorReciprocal(S); + GS = XMVectorMultiply(G, GS); + + Result = XMQuaternionSlerpV(Q01, Q02, GS); + } + + return Result; +} + +//------------------------------------------------------------------------------ +// Transformation operations +//------------------------------------------------------------------------------ + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMQuaternionIdentity() noexcept +{ + return g_XMIdentityR3.v; +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMQuaternionRotationRollPitchYaw +( + float Pitch, + float Yaw, + float Roll +) noexcept +{ + XMVECTOR Angles = XMVectorSet(Pitch, Yaw, Roll, 0.0f); + XMVECTOR Q = XMQuaternionRotationRollPitchYawFromVector(Angles); + return Q; +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMQuaternionRotationRollPitchYawFromVector +( + FXMVECTOR Angles // +) noexcept +{ + static const XMVECTORF32 Sign = { { { 1.0f, -1.0f, -1.0f, 1.0f } } }; + + XMVECTOR HalfAngles = XMVectorMultiply(Angles, g_XMOneHalf.v); + + XMVECTOR SinAngles, CosAngles; + XMVectorSinCos(&SinAngles, &CosAngles, HalfAngles); + + XMVECTOR P0 = XMVectorPermute(SinAngles, CosAngles); + XMVECTOR Y0 = XMVectorPermute(SinAngles, CosAngles); + XMVECTOR R0 = XMVectorPermute(SinAngles, CosAngles); + XMVECTOR P1 = XMVectorPermute(CosAngles, SinAngles); + XMVECTOR Y1 = XMVectorPermute(CosAngles, SinAngles); + XMVECTOR R1 = XMVectorPermute(CosAngles, SinAngles); + + XMVECTOR Q1 = XMVectorMultiply(P1, Sign.v); + XMVECTOR Q0 = XMVectorMultiply(P0, Y0); + Q1 = XMVectorMultiply(Q1, Y1); + Q0 = XMVectorMultiply(Q0, R0); + XMVECTOR Q = XMVectorMultiplyAdd(Q1, R1, Q0); + + return Q; +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMQuaternionRotationNormal +( + FXMVECTOR NormalAxis, + float Angle +) noexcept +{ +#if defined(_XM_NO_INTRINSICS_) || defined(_XM_ARM_NEON_INTRINSICS_) + + XMVECTOR N = XMVectorSelect(g_XMOne.v, NormalAxis, g_XMSelect1110.v); + + float SinV, CosV; + XMScalarSinCos(&SinV, &CosV, 0.5f * Angle); + + XMVECTOR Scale = XMVectorSet(SinV, SinV, SinV, CosV); + return XMVectorMultiply(N, Scale); +#elif defined(_XM_SSE_INTRINSICS_) + XMVECTOR N = _mm_and_ps(NormalAxis, g_XMMask3); + N = _mm_or_ps(N, g_XMIdentityR3); + XMVECTOR Scale = _mm_set_ps1(0.5f * Angle); + XMVECTOR vSine; + XMVECTOR vCosine; + XMVectorSinCos(&vSine, &vCosine, Scale); + Scale = _mm_and_ps(vSine, g_XMMask3); + vCosine = _mm_and_ps(vCosine, g_XMMaskW); + Scale = _mm_or_ps(Scale, vCosine); + N = _mm_mul_ps(N, Scale); + return N; +#endif +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMQuaternionRotationAxis +( + FXMVECTOR Axis, + float Angle +) noexcept +{ + assert(!XMVector3Equal(Axis, XMVectorZero())); + assert(!XMVector3IsInfinite(Axis)); + + XMVECTOR Normal = XMVector3Normalize(Axis); + XMVECTOR Q = XMQuaternionRotationNormal(Normal, Angle); + return Q; +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMQuaternionRotationMatrix(FXMMATRIX M) noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + + XMVECTORF32 q; + float r22 = M.m[2][2]; + if (r22 <= 0.f) // x^2 + y^2 >= z^2 + w^2 + { + float dif10 = M.m[1][1] - M.m[0][0]; + float omr22 = 1.f - r22; + if (dif10 <= 0.f) // x^2 >= y^2 + { + float fourXSqr = omr22 - dif10; + float inv4x = 0.5f / sqrtf(fourXSqr); + q.f[0] = fourXSqr * inv4x; + q.f[1] = (M.m[0][1] + M.m[1][0]) * inv4x; + q.f[2] = (M.m[0][2] + M.m[2][0]) * inv4x; + q.f[3] = (M.m[1][2] - M.m[2][1]) * inv4x; + } + else // y^2 >= x^2 + { + float fourYSqr = omr22 + dif10; + float inv4y = 0.5f / sqrtf(fourYSqr); + q.f[0] = (M.m[0][1] + M.m[1][0]) * inv4y; + q.f[1] = fourYSqr * inv4y; + q.f[2] = (M.m[1][2] + M.m[2][1]) * inv4y; + q.f[3] = (M.m[2][0] - M.m[0][2]) * inv4y; + } + } + else // z^2 + w^2 >= x^2 + y^2 + { + float sum10 = M.m[1][1] + M.m[0][0]; + float opr22 = 1.f + r22; + if (sum10 <= 0.f) // z^2 >= w^2 + { + float fourZSqr = opr22 - sum10; + float inv4z = 0.5f / sqrtf(fourZSqr); + q.f[0] = (M.m[0][2] + M.m[2][0]) * inv4z; + q.f[1] = (M.m[1][2] + M.m[2][1]) * inv4z; + q.f[2] = fourZSqr * inv4z; + q.f[3] = (M.m[0][1] - M.m[1][0]) * inv4z; + } + else // w^2 >= z^2 + { + float fourWSqr = opr22 + sum10; + float inv4w = 0.5f / sqrtf(fourWSqr); + q.f[0] = (M.m[1][2] - M.m[2][1]) * inv4w; + q.f[1] = (M.m[2][0] - M.m[0][2]) * inv4w; + q.f[2] = (M.m[0][1] - M.m[1][0]) * inv4w; + q.f[3] = fourWSqr * inv4w; + } + } + return q.v; + +#elif defined(_XM_ARM_NEON_INTRINSICS_) + static const XMVECTORF32 XMPMMP = { { { +1.0f, -1.0f, -1.0f, +1.0f } } }; + static const XMVECTORF32 XMMPMP = { { { -1.0f, +1.0f, -1.0f, +1.0f } } }; + static const XMVECTORF32 XMMMPP = { { { -1.0f, -1.0f, +1.0f, +1.0f } } }; + static const XMVECTORU32 Select0110 = { { { XM_SELECT_0, XM_SELECT_1, XM_SELECT_1, XM_SELECT_0 } } }; + static const XMVECTORU32 Select0010 = { { { XM_SELECT_0, XM_SELECT_0, XM_SELECT_1, XM_SELECT_0 } } }; + + XMVECTOR r0 = M.r[0]; + XMVECTOR r1 = M.r[1]; + XMVECTOR r2 = M.r[2]; + + XMVECTOR r00 = vdupq_lane_f32(vget_low_f32(r0), 0); + XMVECTOR r11 = vdupq_lane_f32(vget_low_f32(r1), 1); + XMVECTOR r22 = vdupq_lane_f32(vget_high_f32(r2), 0); + + // x^2 >= y^2 equivalent to r11 - r00 <= 0 + XMVECTOR r11mr00 = vsubq_f32(r11, r00); + XMVECTOR x2gey2 = vcleq_f32(r11mr00, g_XMZero); + + // z^2 >= w^2 equivalent to r11 + r00 <= 0 + XMVECTOR r11pr00 = vaddq_f32(r11, r00); + XMVECTOR z2gew2 = vcleq_f32(r11pr00, g_XMZero); + + // x^2 + y^2 >= z^2 + w^2 equivalent to r22 <= 0 + XMVECTOR x2py2gez2pw2 = vcleq_f32(r22, g_XMZero); + + // (4*x^2, 4*y^2, 4*z^2, 4*w^2) + XMVECTOR t0 = vmulq_f32(XMPMMP, r00); + XMVECTOR x2y2z2w2 = vmlaq_f32(t0, XMMPMP, r11); + x2y2z2w2 = vmlaq_f32(x2y2z2w2, XMMMPP, r22); + x2y2z2w2 = vaddq_f32(x2y2z2w2, g_XMOne); + + // (r01, r02, r12, r11) + t0 = vextq_f32(r0, r0, 1); + XMVECTOR t1 = vextq_f32(r1, r1, 1); + t0 = vcombine_f32(vget_low_f32(t0), vrev64_f32(vget_low_f32(t1))); + + // (r10, r20, r21, r10) + t1 = vextq_f32(r2, r2, 3); + XMVECTOR r10 = vdupq_lane_f32(vget_low_f32(r1), 0); + t1 = vbslq_f32(Select0110, t1, r10); + + // (4*x*y, 4*x*z, 4*y*z, unused) + XMVECTOR xyxzyz = vaddq_f32(t0, t1); + + // (r21, r20, r10, r10) + t0 = vcombine_f32(vrev64_f32(vget_low_f32(r2)), vget_low_f32(r10)); + + // (r12, r02, r01, r12) + XMVECTOR t2 = vcombine_f32(vrev64_f32(vget_high_f32(r0)), vrev64_f32(vget_low_f32(r0))); + XMVECTOR t3 = vdupq_lane_f32(vget_high_f32(r1), 0); + t1 = vbslq_f32(Select0110, t2, t3); + + // (4*x*w, 4*y*w, 4*z*w, unused) + XMVECTOR xwywzw = vsubq_f32(t0, t1); + xwywzw = vmulq_f32(XMMPMP, xwywzw); + + // (4*x*x, 4*x*y, 4*x*z, 4*x*w) + t0 = vextq_f32(xyxzyz, xyxzyz, 3); + t1 = vbslq_f32(Select0110, t0, x2y2z2w2); + t2 = vdupq_lane_f32(vget_low_f32(xwywzw), 0); + XMVECTOR tensor0 = vbslq_f32(g_XMSelect1110, t1, t2); + + // (4*y*x, 4*y*y, 4*y*z, 4*y*w) + t0 = vbslq_f32(g_XMSelect1011, xyxzyz, x2y2z2w2); + t1 = vdupq_lane_f32(vget_low_f32(xwywzw), 1); + XMVECTOR tensor1 = vbslq_f32(g_XMSelect1110, t0, t1); + + // (4*z*x, 4*z*y, 4*z*z, 4*z*w) + t0 = vextq_f32(xyxzyz, xyxzyz, 1); + t1 = vcombine_f32(vget_low_f32(t0), vrev64_f32(vget_high_f32(xwywzw))); + XMVECTOR tensor2 = vbslq_f32(Select0010, x2y2z2w2, t1); + + // (4*w*x, 4*w*y, 4*w*z, 4*w*w) + XMVECTOR tensor3 = vbslq_f32(g_XMSelect1110, xwywzw, x2y2z2w2); + + // Select the row of the tensor-product matrix that has the largest + // magnitude. + t0 = vbslq_f32(x2gey2, tensor0, tensor1); + t1 = vbslq_f32(z2gew2, tensor2, tensor3); + t2 = vbslq_f32(x2py2gez2pw2, t0, t1); + + // Normalize the row. No division by zero is possible because the + // quaternion is unit-length (and the row is a nonzero multiple of + // the quaternion). + t0 = XMVector4Length(t2); + return XMVectorDivide(t2, t0); +#elif defined(_XM_SSE_INTRINSICS_) + static const XMVECTORF32 XMPMMP = { { { +1.0f, -1.0f, -1.0f, +1.0f } } }; + static const XMVECTORF32 XMMPMP = { { { -1.0f, +1.0f, -1.0f, +1.0f } } }; + static const XMVECTORF32 XMMMPP = { { { -1.0f, -1.0f, +1.0f, +1.0f } } }; + + XMVECTOR r0 = M.r[0]; // (r00, r01, r02, 0) + XMVECTOR r1 = M.r[1]; // (r10, r11, r12, 0) + XMVECTOR r2 = M.r[2]; // (r20, r21, r22, 0) + + // (r00, r00, r00, r00) + XMVECTOR r00 = XM_PERMUTE_PS(r0, _MM_SHUFFLE(0, 0, 0, 0)); + // (r11, r11, r11, r11) + XMVECTOR r11 = XM_PERMUTE_PS(r1, _MM_SHUFFLE(1, 1, 1, 1)); + // (r22, r22, r22, r22) + XMVECTOR r22 = XM_PERMUTE_PS(r2, _MM_SHUFFLE(2, 2, 2, 2)); + + // x^2 >= y^2 equivalent to r11 - r00 <= 0 + // (r11 - r00, r11 - r00, r11 - r00, r11 - r00) + XMVECTOR r11mr00 = _mm_sub_ps(r11, r00); + XMVECTOR x2gey2 = _mm_cmple_ps(r11mr00, g_XMZero); + + // z^2 >= w^2 equivalent to r11 + r00 <= 0 + // (r11 + r00, r11 + r00, r11 + r00, r11 + r00) + XMVECTOR r11pr00 = _mm_add_ps(r11, r00); + XMVECTOR z2gew2 = _mm_cmple_ps(r11pr00, g_XMZero); + + // x^2 + y^2 >= z^2 + w^2 equivalent to r22 <= 0 + XMVECTOR x2py2gez2pw2 = _mm_cmple_ps(r22, g_XMZero); + + // (4*x^2, 4*y^2, 4*z^2, 4*w^2) + XMVECTOR t0 = XM_FMADD_PS(XMPMMP, r00, g_XMOne); + XMVECTOR t1 = _mm_mul_ps(XMMPMP, r11); + XMVECTOR t2 = XM_FMADD_PS(XMMMPP, r22, t0); + XMVECTOR x2y2z2w2 = _mm_add_ps(t1, t2); + + // (r01, r02, r12, r11) + t0 = _mm_shuffle_ps(r0, r1, _MM_SHUFFLE(1, 2, 2, 1)); + // (r10, r10, r20, r21) + t1 = _mm_shuffle_ps(r1, r2, _MM_SHUFFLE(1, 0, 0, 0)); + // (r10, r20, r21, r10) + t1 = XM_PERMUTE_PS(t1, _MM_SHUFFLE(1, 3, 2, 0)); + // (4*x*y, 4*x*z, 4*y*z, unused) + XMVECTOR xyxzyz = _mm_add_ps(t0, t1); + + // (r21, r20, r10, r10) + t0 = _mm_shuffle_ps(r2, r1, _MM_SHUFFLE(0, 0, 0, 1)); + // (r12, r12, r02, r01) + t1 = _mm_shuffle_ps(r1, r0, _MM_SHUFFLE(1, 2, 2, 2)); + // (r12, r02, r01, r12) + t1 = XM_PERMUTE_PS(t1, _MM_SHUFFLE(1, 3, 2, 0)); + // (4*x*w, 4*y*w, 4*z*w, unused) + XMVECTOR xwywzw = _mm_sub_ps(t0, t1); + xwywzw = _mm_mul_ps(XMMPMP, xwywzw); + + // (4*x^2, 4*y^2, 4*x*y, unused) + t0 = _mm_shuffle_ps(x2y2z2w2, xyxzyz, _MM_SHUFFLE(0, 0, 1, 0)); + // (4*z^2, 4*w^2, 4*z*w, unused) + t1 = _mm_shuffle_ps(x2y2z2w2, xwywzw, _MM_SHUFFLE(0, 2, 3, 2)); + // (4*x*z, 4*y*z, 4*x*w, 4*y*w) + t2 = _mm_shuffle_ps(xyxzyz, xwywzw, _MM_SHUFFLE(1, 0, 2, 1)); + + // (4*x*x, 4*x*y, 4*x*z, 4*x*w) + XMVECTOR tensor0 = _mm_shuffle_ps(t0, t2, _MM_SHUFFLE(2, 0, 2, 0)); + // (4*y*x, 4*y*y, 4*y*z, 4*y*w) + XMVECTOR tensor1 = _mm_shuffle_ps(t0, t2, _MM_SHUFFLE(3, 1, 1, 2)); + // (4*z*x, 4*z*y, 4*z*z, 4*z*w) + XMVECTOR tensor2 = _mm_shuffle_ps(t2, t1, _MM_SHUFFLE(2, 0, 1, 0)); + // (4*w*x, 4*w*y, 4*w*z, 4*w*w) + XMVECTOR tensor3 = _mm_shuffle_ps(t2, t1, _MM_SHUFFLE(1, 2, 3, 2)); + + // Select the row of the tensor-product matrix that has the largest + // magnitude. + t0 = _mm_and_ps(x2gey2, tensor0); + t1 = _mm_andnot_ps(x2gey2, tensor1); + t0 = _mm_or_ps(t0, t1); + t1 = _mm_and_ps(z2gew2, tensor2); + t2 = _mm_andnot_ps(z2gew2, tensor3); + t1 = _mm_or_ps(t1, t2); + t0 = _mm_and_ps(x2py2gez2pw2, t0); + t1 = _mm_andnot_ps(x2py2gez2pw2, t1); + t2 = _mm_or_ps(t0, t1); + + // Normalize the row. No division by zero is possible because the + // quaternion is unit-length (and the row is a nonzero multiple of + // the quaternion). + t0 = XMVector4Length(t2); + return _mm_div_ps(t2, t0); +#endif +} + +//------------------------------------------------------------------------------ +// Conversion operations +//------------------------------------------------------------------------------ + +//------------------------------------------------------------------------------ +_Use_decl_annotations_ +inline void XM_CALLCONV XMQuaternionToAxisAngle +( + XMVECTOR* pAxis, + float* pAngle, + FXMVECTOR Q +) noexcept +{ + assert(pAxis); + assert(pAngle); + + *pAxis = Q; + + *pAngle = 2.0f * XMScalarACos(XMVectorGetW(Q)); +} + +/**************************************************************************** + * + * Plane + * + ****************************************************************************/ + + //------------------------------------------------------------------------------ + // Comparison operations + //------------------------------------------------------------------------------ + + //------------------------------------------------------------------------------ + +inline bool XM_CALLCONV XMPlaneEqual +( + FXMVECTOR P1, + FXMVECTOR P2 +) noexcept +{ + return XMVector4Equal(P1, P2); +} + +//------------------------------------------------------------------------------ + +inline bool XM_CALLCONV XMPlaneNearEqual +( + FXMVECTOR P1, + FXMVECTOR P2, + FXMVECTOR Epsilon +) noexcept +{ + XMVECTOR NP1 = XMPlaneNormalize(P1); + XMVECTOR NP2 = XMPlaneNormalize(P2); + return XMVector4NearEqual(NP1, NP2, Epsilon); +} + +//------------------------------------------------------------------------------ + +inline bool XM_CALLCONV XMPlaneNotEqual +( + FXMVECTOR P1, + FXMVECTOR P2 +) noexcept +{ + return XMVector4NotEqual(P1, P2); +} + +//------------------------------------------------------------------------------ + +inline bool XM_CALLCONV XMPlaneIsNaN(FXMVECTOR P) noexcept +{ + return XMVector4IsNaN(P); +} + +//------------------------------------------------------------------------------ + +inline bool XM_CALLCONV XMPlaneIsInfinite(FXMVECTOR P) noexcept +{ + return XMVector4IsInfinite(P); +} + +//------------------------------------------------------------------------------ +// Computation operations +//------------------------------------------------------------------------------ + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMPlaneDot +( + FXMVECTOR P, + FXMVECTOR V +) noexcept +{ + return XMVector4Dot(P, V); +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMPlaneDotCoord +( + FXMVECTOR P, + FXMVECTOR V +) noexcept +{ + // Result = P[0] * V[0] + P[1] * V[1] + P[2] * V[2] + P[3] + + XMVECTOR V3 = XMVectorSelect(g_XMOne.v, V, g_XMSelect1110.v); + XMVECTOR Result = XMVector4Dot(P, V3); + return Result; +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMPlaneDotNormal +( + FXMVECTOR P, + FXMVECTOR V +) noexcept +{ + return XMVector3Dot(P, V); +} + +//------------------------------------------------------------------------------ +// XMPlaneNormalizeEst uses a reciprocal estimate and +// returns QNaN on zero and infinite vectors. + +inline XMVECTOR XM_CALLCONV XMPlaneNormalizeEst(FXMVECTOR P) noexcept +{ +#if defined(_XM_NO_INTRINSICS_) || defined(_XM_ARM_NEON_INTRINSICS_) + + XMVECTOR Result = XMVector3ReciprocalLengthEst(P); + return XMVectorMultiply(P, Result); + +#elif defined(_XM_SSE4_INTRINSICS_) + XMVECTOR vTemp = _mm_dp_ps(P, P, 0x7f); + XMVECTOR vResult = _mm_rsqrt_ps(vTemp); + return _mm_mul_ps(vResult, P); +#elif defined(_XM_SSE_INTRINSICS_) + // Perform the dot product + XMVECTOR vDot = _mm_mul_ps(P, P); + // x=Dot.y, y=Dot.z + XMVECTOR vTemp = XM_PERMUTE_PS(vDot, _MM_SHUFFLE(2, 1, 2, 1)); + // Result.x = x+y + vDot = _mm_add_ss(vDot, vTemp); + // x=Dot.z + vTemp = XM_PERMUTE_PS(vTemp, _MM_SHUFFLE(1, 1, 1, 1)); + // Result.x = (x+y)+z + vDot = _mm_add_ss(vDot, vTemp); + // Splat x + vDot = XM_PERMUTE_PS(vDot, _MM_SHUFFLE(0, 0, 0, 0)); + // Get the reciprocal + vDot = _mm_rsqrt_ps(vDot); + // Get the reciprocal + vDot = _mm_mul_ps(vDot, P); + return vDot; +#endif +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMPlaneNormalize(FXMVECTOR P) noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + float fLengthSq = sqrtf((P.vector4_f32[0] * P.vector4_f32[0]) + (P.vector4_f32[1] * P.vector4_f32[1]) + (P.vector4_f32[2] * P.vector4_f32[2])); + // Prevent divide by zero + if (fLengthSq > 0) + { + fLengthSq = 1.0f / fLengthSq; + } + XMVECTORF32 vResult = { { { + P.vector4_f32[0] * fLengthSq, + P.vector4_f32[1] * fLengthSq, + P.vector4_f32[2] * fLengthSq, + P.vector4_f32[3] * fLengthSq + } } }; + return vResult.v; +#elif defined(_XM_ARM_NEON_INTRINSICS_) + XMVECTOR vLength = XMVector3ReciprocalLength(P); + return XMVectorMultiply(P, vLength); +#elif defined(_XM_SSE4_INTRINSICS_) + XMVECTOR vLengthSq = _mm_dp_ps(P, P, 0x7f); + // Prepare for the division + XMVECTOR vResult = _mm_sqrt_ps(vLengthSq); + // Failsafe on zero (Or epsilon) length planes + // If the length is infinity, set the elements to zero + vLengthSq = _mm_cmpneq_ps(vLengthSq, g_XMInfinity); + // Reciprocal mul to perform the normalization + vResult = _mm_div_ps(P, vResult); + // Any that are infinity, set to zero + vResult = _mm_and_ps(vResult, vLengthSq); + return vResult; +#elif defined(_XM_SSE_INTRINSICS_) + // Perform the dot product on x,y and z only + XMVECTOR vLengthSq = _mm_mul_ps(P, P); + XMVECTOR vTemp = XM_PERMUTE_PS(vLengthSq, _MM_SHUFFLE(2, 1, 2, 1)); + vLengthSq = _mm_add_ss(vLengthSq, vTemp); + vTemp = XM_PERMUTE_PS(vTemp, _MM_SHUFFLE(1, 1, 1, 1)); + vLengthSq = _mm_add_ss(vLengthSq, vTemp); + vLengthSq = XM_PERMUTE_PS(vLengthSq, _MM_SHUFFLE(0, 0, 0, 0)); + // Prepare for the division + XMVECTOR vResult = _mm_sqrt_ps(vLengthSq); + // Failsafe on zero (Or epsilon) length planes + // If the length is infinity, set the elements to zero + vLengthSq = _mm_cmpneq_ps(vLengthSq, g_XMInfinity); + // Reciprocal mul to perform the normalization + vResult = _mm_div_ps(P, vResult); + // Any that are infinity, set to zero + vResult = _mm_and_ps(vResult, vLengthSq); + return vResult; +#endif +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMPlaneIntersectLine +( + FXMVECTOR P, + FXMVECTOR LinePoint1, + FXMVECTOR LinePoint2 +) noexcept +{ + XMVECTOR V1 = XMVector3Dot(P, LinePoint1); + XMVECTOR V2 = XMVector3Dot(P, LinePoint2); + XMVECTOR D = XMVectorSubtract(V1, V2); + + XMVECTOR VT = XMPlaneDotCoord(P, LinePoint1); + VT = XMVectorDivide(VT, D); + + XMVECTOR Point = XMVectorSubtract(LinePoint2, LinePoint1); + Point = XMVectorMultiplyAdd(Point, VT, LinePoint1); + + const XMVECTOR Zero = XMVectorZero(); + XMVECTOR Control = XMVectorNearEqual(D, Zero, g_XMEpsilon.v); + + return XMVectorSelect(Point, g_XMQNaN.v, Control); +} + +//------------------------------------------------------------------------------ +_Use_decl_annotations_ +inline void XM_CALLCONV XMPlaneIntersectPlane +( + XMVECTOR* pLinePoint1, + XMVECTOR* pLinePoint2, + FXMVECTOR P1, + FXMVECTOR P2 +) noexcept +{ + assert(pLinePoint1); + assert(pLinePoint2); + + XMVECTOR V1 = XMVector3Cross(P2, P1); + + XMVECTOR LengthSq = XMVector3LengthSq(V1); + + XMVECTOR V2 = XMVector3Cross(P2, V1); + + XMVECTOR P1W = XMVectorSplatW(P1); + XMVECTOR Point = XMVectorMultiply(V2, P1W); + + XMVECTOR V3 = XMVector3Cross(V1, P1); + + XMVECTOR P2W = XMVectorSplatW(P2); + Point = XMVectorMultiplyAdd(V3, P2W, Point); + + XMVECTOR LinePoint1 = XMVectorDivide(Point, LengthSq); + + XMVECTOR LinePoint2 = XMVectorAdd(LinePoint1, V1); + + XMVECTOR Control = XMVectorLessOrEqual(LengthSq, g_XMEpsilon.v); + *pLinePoint1 = XMVectorSelect(LinePoint1, g_XMQNaN.v, Control); + *pLinePoint2 = XMVectorSelect(LinePoint2, g_XMQNaN.v, Control); +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMPlaneTransform +( + FXMVECTOR P, + FXMMATRIX M +) noexcept +{ + XMVECTOR W = XMVectorSplatW(P); + XMVECTOR Z = XMVectorSplatZ(P); + XMVECTOR Y = XMVectorSplatY(P); + XMVECTOR X = XMVectorSplatX(P); + + XMVECTOR Result = XMVectorMultiply(W, M.r[3]); + Result = XMVectorMultiplyAdd(Z, M.r[2], Result); + Result = XMVectorMultiplyAdd(Y, M.r[1], Result); + Result = XMVectorMultiplyAdd(X, M.r[0], Result); + return Result; +} + +//------------------------------------------------------------------------------ +_Use_decl_annotations_ +inline XMFLOAT4* XM_CALLCONV XMPlaneTransformStream +( + XMFLOAT4* pOutputStream, + size_t OutputStride, + const XMFLOAT4* pInputStream, + size_t InputStride, + size_t PlaneCount, + FXMMATRIX M +) noexcept +{ + return XMVector4TransformStream(pOutputStream, + OutputStride, + pInputStream, + InputStride, + PlaneCount, + M); +} + +//------------------------------------------------------------------------------ +// Conversion operations +//------------------------------------------------------------------------------ + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMPlaneFromPointNormal +( + FXMVECTOR Point, + FXMVECTOR Normal +) noexcept +{ + XMVECTOR W = XMVector3Dot(Point, Normal); + W = XMVectorNegate(W); + return XMVectorSelect(W, Normal, g_XMSelect1110.v); +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMPlaneFromPoints +( + FXMVECTOR Point1, + FXMVECTOR Point2, + FXMVECTOR Point3 +) noexcept +{ + XMVECTOR V21 = XMVectorSubtract(Point1, Point2); + XMVECTOR V31 = XMVectorSubtract(Point1, Point3); + + XMVECTOR N = XMVector3Cross(V21, V31); + N = XMVector3Normalize(N); + + XMVECTOR D = XMPlaneDotNormal(N, Point1); + D = XMVectorNegate(D); + + XMVECTOR Result = XMVectorSelect(D, N, g_XMSelect1110.v); + + return Result; +} + +/**************************************************************************** + * + * Color + * + ****************************************************************************/ + + //------------------------------------------------------------------------------ + // Comparison operations + //------------------------------------------------------------------------------ + + //------------------------------------------------------------------------------ + +inline bool XM_CALLCONV XMColorEqual +( + FXMVECTOR C1, + FXMVECTOR C2 +) noexcept +{ + return XMVector4Equal(C1, C2); +} + +//------------------------------------------------------------------------------ + +inline bool XM_CALLCONV XMColorNotEqual +( + FXMVECTOR C1, + FXMVECTOR C2 +) noexcept +{ + return XMVector4NotEqual(C1, C2); +} + +//------------------------------------------------------------------------------ + +inline bool XM_CALLCONV XMColorGreater +( + FXMVECTOR C1, + FXMVECTOR C2 +) noexcept +{ + return XMVector4Greater(C1, C2); +} + +//------------------------------------------------------------------------------ + +inline bool XM_CALLCONV XMColorGreaterOrEqual +( + FXMVECTOR C1, + FXMVECTOR C2 +) noexcept +{ + return XMVector4GreaterOrEqual(C1, C2); +} + +//------------------------------------------------------------------------------ + +inline bool XM_CALLCONV XMColorLess +( + FXMVECTOR C1, + FXMVECTOR C2 +) noexcept +{ + return XMVector4Less(C1, C2); +} + +//------------------------------------------------------------------------------ + +inline bool XM_CALLCONV XMColorLessOrEqual +( + FXMVECTOR C1, + FXMVECTOR C2 +) noexcept +{ + return XMVector4LessOrEqual(C1, C2); +} + +//------------------------------------------------------------------------------ + +inline bool XM_CALLCONV XMColorIsNaN(FXMVECTOR C) noexcept +{ + return XMVector4IsNaN(C); +} + +//------------------------------------------------------------------------------ + +inline bool XM_CALLCONV XMColorIsInfinite(FXMVECTOR C) noexcept +{ + return XMVector4IsInfinite(C); +} + +//------------------------------------------------------------------------------ +// Computation operations +//------------------------------------------------------------------------------ + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMColorNegative(FXMVECTOR vColor) noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + XMVECTORF32 vResult = { { { + 1.0f - vColor.vector4_f32[0], + 1.0f - vColor.vector4_f32[1], + 1.0f - vColor.vector4_f32[2], + vColor.vector4_f32[3] + } } }; + return vResult.v; +#elif defined(_XM_ARM_NEON_INTRINSICS_) + XMVECTOR vTemp = veorq_u32(vColor, g_XMNegate3); + return vaddq_f32(vTemp, g_XMOne3); +#elif defined(_XM_SSE_INTRINSICS_) + // Negate only x,y and z. + XMVECTOR vTemp = _mm_xor_ps(vColor, g_XMNegate3); + // Add 1,1,1,0 to -x,-y,-z,w + return _mm_add_ps(vTemp, g_XMOne3); +#endif +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMColorModulate +( + FXMVECTOR C1, + FXMVECTOR C2 +) noexcept +{ + return XMVectorMultiply(C1, C2); +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMColorAdjustSaturation +( + FXMVECTOR vColor, + float fSaturation +) noexcept +{ + // Luminance = 0.2125f * C[0] + 0.7154f * C[1] + 0.0721f * C[2]; + // Result = (C - Luminance) * Saturation + Luminance; + + const XMVECTORF32 gvLuminance = { { { 0.2125f, 0.7154f, 0.0721f, 0.0f } } }; +#if defined(_XM_NO_INTRINSICS_) + float fLuminance = (vColor.vector4_f32[0] * gvLuminance.f[0]) + (vColor.vector4_f32[1] * gvLuminance.f[1]) + (vColor.vector4_f32[2] * gvLuminance.f[2]); + XMVECTOR vResult; + vResult.vector4_f32[0] = ((vColor.vector4_f32[0] - fLuminance) * fSaturation) + fLuminance; + vResult.vector4_f32[1] = ((vColor.vector4_f32[1] - fLuminance) * fSaturation) + fLuminance; + vResult.vector4_f32[2] = ((vColor.vector4_f32[2] - fLuminance) * fSaturation) + fLuminance; + vResult.vector4_f32[3] = vColor.vector4_f32[3]; + return vResult; +#elif defined(_XM_ARM_NEON_INTRINSICS_) + XMVECTOR vLuminance = XMVector3Dot(vColor, gvLuminance); + XMVECTOR vResult = vsubq_f32(vColor, vLuminance); + vResult = vmlaq_n_f32(vLuminance, vResult, fSaturation); + return vbslq_f32(g_XMSelect1110, vResult, vColor); +#elif defined(_XM_SSE_INTRINSICS_) + XMVECTOR vLuminance = XMVector3Dot(vColor, gvLuminance); + // Splat fSaturation + XMVECTOR vSaturation = _mm_set_ps1(fSaturation); + // vResult = ((vColor-vLuminance)*vSaturation)+vLuminance; + XMVECTOR vResult = _mm_sub_ps(vColor, vLuminance); + vResult = XM_FMADD_PS(vResult, vSaturation, vLuminance); + // Retain w from the source color + vLuminance = _mm_shuffle_ps(vResult, vColor, _MM_SHUFFLE(3, 2, 2, 2)); // x = vResult.z,y = vResult.z,z = vColor.z,w=vColor.w + vResult = _mm_shuffle_ps(vResult, vLuminance, _MM_SHUFFLE(3, 0, 1, 0)); // x = vResult.x,y = vResult.y,z = vResult.z,w=vColor.w + return vResult; +#endif +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMColorAdjustContrast +( + FXMVECTOR vColor, + float fContrast +) noexcept +{ + // Result = (vColor - 0.5f) * fContrast + 0.5f; + +#if defined(_XM_NO_INTRINSICS_) + XMVECTORF32 vResult = { { { + ((vColor.vector4_f32[0] - 0.5f) * fContrast) + 0.5f, + ((vColor.vector4_f32[1] - 0.5f) * fContrast) + 0.5f, + ((vColor.vector4_f32[2] - 0.5f) * fContrast) + 0.5f, + vColor.vector4_f32[3] // Leave W untouched + } } }; + return vResult.v; +#elif defined(_XM_ARM_NEON_INTRINSICS_) + XMVECTOR vResult = vsubq_f32(vColor, g_XMOneHalf.v); + vResult = vmlaq_n_f32(g_XMOneHalf.v, vResult, fContrast); + return vbslq_f32(g_XMSelect1110, vResult, vColor); +#elif defined(_XM_SSE_INTRINSICS_) + XMVECTOR vScale = _mm_set_ps1(fContrast); // Splat the scale + XMVECTOR vResult = _mm_sub_ps(vColor, g_XMOneHalf); // Subtract 0.5f from the source (Saving source) + vResult = XM_FMADD_PS(vResult, vScale, g_XMOneHalf); +// Retain w from the source color + vScale = _mm_shuffle_ps(vResult, vColor, _MM_SHUFFLE(3, 2, 2, 2)); // x = vResult.z,y = vResult.z,z = vColor.z,w=vColor.w + vResult = _mm_shuffle_ps(vResult, vScale, _MM_SHUFFLE(3, 0, 1, 0)); // x = vResult.x,y = vResult.y,z = vResult.z,w=vColor.w + return vResult; +#endif +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMColorRGBToHSL(FXMVECTOR rgb) noexcept +{ + XMVECTOR r = XMVectorSplatX(rgb); + XMVECTOR g = XMVectorSplatY(rgb); + XMVECTOR b = XMVectorSplatZ(rgb); + + XMVECTOR min = XMVectorMin(r, XMVectorMin(g, b)); + XMVECTOR max = XMVectorMax(r, XMVectorMax(g, b)); + + XMVECTOR l = XMVectorMultiply(XMVectorAdd(min, max), g_XMOneHalf); + + XMVECTOR d = XMVectorSubtract(max, min); + + XMVECTOR la = XMVectorSelect(rgb, l, g_XMSelect1110); + + if (XMVector3Less(d, g_XMEpsilon)) + { + // Achromatic, assume H and S of 0 + return XMVectorSelect(la, g_XMZero, g_XMSelect1100); + } + else + { + XMVECTOR s, h; + + XMVECTOR d2 = XMVectorAdd(min, max); + + if (XMVector3Greater(l, g_XMOneHalf)) + { + // d / (2-max-min) + s = XMVectorDivide(d, XMVectorSubtract(g_XMTwo, d2)); + } + else + { + // d / (max+min) + s = XMVectorDivide(d, d2); + } + + if (XMVector3Equal(r, max)) + { + // Red is max + h = XMVectorDivide(XMVectorSubtract(g, b), d); + } + else if (XMVector3Equal(g, max)) + { + // Green is max + h = XMVectorDivide(XMVectorSubtract(b, r), d); + h = XMVectorAdd(h, g_XMTwo); + } + else + { + // Blue is max + h = XMVectorDivide(XMVectorSubtract(r, g), d); + h = XMVectorAdd(h, g_XMFour); + } + + h = XMVectorDivide(h, g_XMSix); + + if (XMVector3Less(h, g_XMZero)) + h = XMVectorAdd(h, g_XMOne); + + XMVECTOR lha = XMVectorSelect(la, h, g_XMSelect1100); + return XMVectorSelect(s, lha, g_XMSelect1011); + } +} + +//------------------------------------------------------------------------------ + +namespace Internal +{ + + inline XMVECTOR XM_CALLCONV XMColorHue2Clr(FXMVECTOR p, FXMVECTOR q, FXMVECTOR h) noexcept + { + static const XMVECTORF32 oneSixth = { { { 1.0f / 6.0f, 1.0f / 6.0f, 1.0f / 6.0f, 1.0f / 6.0f } } }; + static const XMVECTORF32 twoThirds = { { { 2.0f / 3.0f, 2.0f / 3.0f, 2.0f / 3.0f, 2.0f / 3.0f } } }; + + XMVECTOR t = h; + + if (XMVector3Less(t, g_XMZero)) + t = XMVectorAdd(t, g_XMOne); + + if (XMVector3Greater(t, g_XMOne)) + t = XMVectorSubtract(t, g_XMOne); + + if (XMVector3Less(t, oneSixth)) + { + // p + (q - p) * 6 * t + XMVECTOR t1 = XMVectorSubtract(q, p); + XMVECTOR t2 = XMVectorMultiply(g_XMSix, t); + return XMVectorMultiplyAdd(t1, t2, p); + } + + if (XMVector3Less(t, g_XMOneHalf)) + return q; + + if (XMVector3Less(t, twoThirds)) + { + // p + (q - p) * 6 * (2/3 - t) + XMVECTOR t1 = XMVectorSubtract(q, p); + XMVECTOR t2 = XMVectorMultiply(g_XMSix, XMVectorSubtract(twoThirds, t)); + return XMVectorMultiplyAdd(t1, t2, p); + } + + return p; + } + +} // namespace Internal + +inline XMVECTOR XM_CALLCONV XMColorHSLToRGB(FXMVECTOR hsl) noexcept +{ + static const XMVECTORF32 oneThird = { { { 1.0f / 3.0f, 1.0f / 3.0f, 1.0f / 3.0f, 1.0f / 3.0f } } }; + + XMVECTOR s = XMVectorSplatY(hsl); + XMVECTOR l = XMVectorSplatZ(hsl); + + if (XMVector3NearEqual(s, g_XMZero, g_XMEpsilon)) + { + // Achromatic + return XMVectorSelect(hsl, l, g_XMSelect1110); + } + else + { + XMVECTOR h = XMVectorSplatX(hsl); + + XMVECTOR q; + if (XMVector3Less(l, g_XMOneHalf)) + { + q = XMVectorMultiply(l, XMVectorAdd(g_XMOne, s)); + } + else + { + q = XMVectorSubtract(XMVectorAdd(l, s), XMVectorMultiply(l, s)); + } + + XMVECTOR p = XMVectorSubtract(XMVectorMultiply(g_XMTwo, l), q); + + XMVECTOR r = DirectX::Internal::XMColorHue2Clr(p, q, XMVectorAdd(h, oneThird)); + XMVECTOR g = DirectX::Internal::XMColorHue2Clr(p, q, h); + XMVECTOR b = DirectX::Internal::XMColorHue2Clr(p, q, XMVectorSubtract(h, oneThird)); + + XMVECTOR rg = XMVectorSelect(g, r, g_XMSelect1000); + XMVECTOR ba = XMVectorSelect(hsl, b, g_XMSelect1110); + + return XMVectorSelect(ba, rg, g_XMSelect1100); + } +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMColorRGBToHSV(FXMVECTOR rgb) noexcept +{ + XMVECTOR r = XMVectorSplatX(rgb); + XMVECTOR g = XMVectorSplatY(rgb); + XMVECTOR b = XMVectorSplatZ(rgb); + + XMVECTOR min = XMVectorMin(r, XMVectorMin(g, b)); + XMVECTOR v = XMVectorMax(r, XMVectorMax(g, b)); + + XMVECTOR d = XMVectorSubtract(v, min); + + XMVECTOR s = (XMVector3NearEqual(v, g_XMZero, g_XMEpsilon)) ? g_XMZero : XMVectorDivide(d, v); + + if (XMVector3Less(d, g_XMEpsilon)) + { + // Achromatic, assume H of 0 + XMVECTOR hv = XMVectorSelect(v, g_XMZero, g_XMSelect1000); + XMVECTOR hva = XMVectorSelect(rgb, hv, g_XMSelect1110); + return XMVectorSelect(s, hva, g_XMSelect1011); + } + else + { + XMVECTOR h; + + if (XMVector3Equal(r, v)) + { + // Red is max + h = XMVectorDivide(XMVectorSubtract(g, b), d); + + if (XMVector3Less(g, b)) + h = XMVectorAdd(h, g_XMSix); + } + else if (XMVector3Equal(g, v)) + { + // Green is max + h = XMVectorDivide(XMVectorSubtract(b, r), d); + h = XMVectorAdd(h, g_XMTwo); + } + else + { + // Blue is max + h = XMVectorDivide(XMVectorSubtract(r, g), d); + h = XMVectorAdd(h, g_XMFour); + } + + h = XMVectorDivide(h, g_XMSix); + + XMVECTOR hv = XMVectorSelect(v, h, g_XMSelect1000); + XMVECTOR hva = XMVectorSelect(rgb, hv, g_XMSelect1110); + return XMVectorSelect(s, hva, g_XMSelect1011); + } +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMColorHSVToRGB(FXMVECTOR hsv) noexcept +{ + XMVECTOR h = XMVectorSplatX(hsv); + XMVECTOR s = XMVectorSplatY(hsv); + XMVECTOR v = XMVectorSplatZ(hsv); + + XMVECTOR h6 = XMVectorMultiply(h, g_XMSix); + + XMVECTOR i = XMVectorFloor(h6); + XMVECTOR f = XMVectorSubtract(h6, i); + + // p = v* (1-s) + XMVECTOR p = XMVectorMultiply(v, XMVectorSubtract(g_XMOne, s)); + + // q = v*(1-f*s) + XMVECTOR q = XMVectorMultiply(v, XMVectorSubtract(g_XMOne, XMVectorMultiply(f, s))); + + // t = v*(1 - (1-f)*s) + XMVECTOR t = XMVectorMultiply(v, XMVectorSubtract(g_XMOne, XMVectorMultiply(XMVectorSubtract(g_XMOne, f), s))); + + auto ii = static_cast(XMVectorGetX(XMVectorMod(i, g_XMSix))); + + XMVECTOR _rgb; + + switch (ii) + { + case 0: // rgb = vtp + { + XMVECTOR vt = XMVectorSelect(t, v, g_XMSelect1000); + _rgb = XMVectorSelect(p, vt, g_XMSelect1100); + } + break; + case 1: // rgb = qvp + { + XMVECTOR qv = XMVectorSelect(v, q, g_XMSelect1000); + _rgb = XMVectorSelect(p, qv, g_XMSelect1100); + } + break; + case 2: // rgb = pvt + { + XMVECTOR pv = XMVectorSelect(v, p, g_XMSelect1000); + _rgb = XMVectorSelect(t, pv, g_XMSelect1100); + } + break; + case 3: // rgb = pqv + { + XMVECTOR pq = XMVectorSelect(q, p, g_XMSelect1000); + _rgb = XMVectorSelect(v, pq, g_XMSelect1100); + } + break; + case 4: // rgb = tpv + { + XMVECTOR tp = XMVectorSelect(p, t, g_XMSelect1000); + _rgb = XMVectorSelect(v, tp, g_XMSelect1100); + } + break; + default: // rgb = vpq + { + XMVECTOR vp = XMVectorSelect(p, v, g_XMSelect1000); + _rgb = XMVectorSelect(q, vp, g_XMSelect1100); + } + break; + } + + return XMVectorSelect(hsv, _rgb, g_XMSelect1110); +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMColorRGBToYUV(FXMVECTOR rgb) noexcept +{ + static const XMVECTORF32 Scale0 = { { { 0.299f, -0.147f, 0.615f, 0.0f } } }; + static const XMVECTORF32 Scale1 = { { { 0.587f, -0.289f, -0.515f, 0.0f } } }; + static const XMVECTORF32 Scale2 = { { { 0.114f, 0.436f, -0.100f, 0.0f } } }; + + XMMATRIX M(Scale0, Scale1, Scale2, g_XMZero); + XMVECTOR clr = XMVector3Transform(rgb, M); + + return XMVectorSelect(rgb, clr, g_XMSelect1110); +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMColorYUVToRGB(FXMVECTOR yuv) noexcept +{ + static const XMVECTORF32 Scale1 = { { { 0.0f, -0.395f, 2.032f, 0.0f } } }; + static const XMVECTORF32 Scale2 = { { { 1.140f, -0.581f, 0.0f, 0.0f } } }; + + XMMATRIX M(g_XMOne, Scale1, Scale2, g_XMZero); + XMVECTOR clr = XMVector3Transform(yuv, M); + + return XMVectorSelect(yuv, clr, g_XMSelect1110); +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMColorRGBToYUV_HD(FXMVECTOR rgb) noexcept +{ + static const XMVECTORF32 Scale0 = { { { 0.2126f, -0.0997f, 0.6150f, 0.0f } } }; + static const XMVECTORF32 Scale1 = { { { 0.7152f, -0.3354f, -0.5586f, 0.0f } } }; + static const XMVECTORF32 Scale2 = { { { 0.0722f, 0.4351f, -0.0564f, 0.0f } } }; + + XMMATRIX M(Scale0, Scale1, Scale2, g_XMZero); + XMVECTOR clr = XMVector3Transform(rgb, M); + + return XMVectorSelect(rgb, clr, g_XMSelect1110); +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMColorYUVToRGB_HD(FXMVECTOR yuv) noexcept +{ + static const XMVECTORF32 Scale1 = { { { 0.0f, -0.2153f, 2.1324f, 0.0f } } }; + static const XMVECTORF32 Scale2 = { { { 1.2803f, -0.3806f, 0.0f, 0.0f } } }; + + XMMATRIX M(g_XMOne, Scale1, Scale2, g_XMZero); + XMVECTOR clr = XMVector3Transform(yuv, M); + + return XMVectorSelect(yuv, clr, g_XMSelect1110); +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMColorRGBToXYZ(FXMVECTOR rgb) noexcept +{ + static const XMVECTORF32 Scale0 = { { { 0.4887180f, 0.1762044f, 0.0000000f, 0.0f } } }; + static const XMVECTORF32 Scale1 = { { { 0.3106803f, 0.8129847f, 0.0102048f, 0.0f } } }; + static const XMVECTORF32 Scale2 = { { { 0.2006017f, 0.0108109f, 0.9897952f, 0.0f } } }; + static const XMVECTORF32 Scale = { { { 1.f / 0.17697f, 1.f / 0.17697f, 1.f / 0.17697f, 0.0f } } }; + + XMMATRIX M(Scale0, Scale1, Scale2, g_XMZero); + XMVECTOR clr = XMVectorMultiply(XMVector3Transform(rgb, M), Scale); + + return XMVectorSelect(rgb, clr, g_XMSelect1110); +} + +inline XMVECTOR XM_CALLCONV XMColorXYZToRGB(FXMVECTOR xyz) noexcept +{ + static const XMVECTORF32 Scale0 = { { { 2.3706743f, -0.5138850f, 0.0052982f, 0.0f } } }; + static const XMVECTORF32 Scale1 = { { { -0.9000405f, 1.4253036f, -0.0146949f, 0.0f } } }; + static const XMVECTORF32 Scale2 = { { { -0.4706338f, 0.0885814f, 1.0093968f, 0.0f } } }; + static const XMVECTORF32 Scale = { { { 0.17697f, 0.17697f, 0.17697f, 0.0f } } }; + + XMMATRIX M(Scale0, Scale1, Scale2, g_XMZero); + XMVECTOR clr = XMVector3Transform(XMVectorMultiply(xyz, Scale), M); + + return XMVectorSelect(xyz, clr, g_XMSelect1110); +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMColorXYZToSRGB(FXMVECTOR xyz) noexcept +{ + static const XMVECTORF32 Scale0 = { { { 3.2406f, -0.9689f, 0.0557f, 0.0f } } }; + static const XMVECTORF32 Scale1 = { { { -1.5372f, 1.8758f, -0.2040f, 0.0f } } }; + static const XMVECTORF32 Scale2 = { { { -0.4986f, 0.0415f, 1.0570f, 0.0f } } }; + static const XMVECTORF32 Cutoff = { { { 0.0031308f, 0.0031308f, 0.0031308f, 0.0f } } }; + static const XMVECTORF32 Exp = { { { 1.0f / 2.4f, 1.0f / 2.4f, 1.0f / 2.4f, 1.0f } } }; + + XMMATRIX M(Scale0, Scale1, Scale2, g_XMZero); + XMVECTOR lclr = XMVector3Transform(xyz, M); + + XMVECTOR sel = XMVectorGreater(lclr, Cutoff); + + // clr = 12.92 * lclr for lclr <= 0.0031308f + XMVECTOR smallC = XMVectorMultiply(lclr, g_XMsrgbScale); + + // clr = (1+a)*pow(lclr, 1/2.4) - a for lclr > 0.0031308 (where a = 0.055) + XMVECTOR largeC = XMVectorSubtract(XMVectorMultiply(g_XMsrgbA1, XMVectorPow(lclr, Exp)), g_XMsrgbA); + + XMVECTOR clr = XMVectorSelect(smallC, largeC, sel); + + return XMVectorSelect(xyz, clr, g_XMSelect1110); +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMColorSRGBToXYZ(FXMVECTOR srgb) noexcept +{ + static const XMVECTORF32 Scale0 = { { { 0.4124f, 0.2126f, 0.0193f, 0.0f } } }; + static const XMVECTORF32 Scale1 = { { { 0.3576f, 0.7152f, 0.1192f, 0.0f } } }; + static const XMVECTORF32 Scale2 = { { { 0.1805f, 0.0722f, 0.9505f, 0.0f } } }; + static const XMVECTORF32 Cutoff = { { { 0.04045f, 0.04045f, 0.04045f, 0.0f } } }; + static const XMVECTORF32 Exp = { { { 2.4f, 2.4f, 2.4f, 1.0f } } }; + + XMVECTOR sel = XMVectorGreater(srgb, Cutoff); + + // lclr = clr / 12.92 + XMVECTOR smallC = XMVectorDivide(srgb, g_XMsrgbScale); + + // lclr = pow( (clr + a) / (1+a), 2.4 ) + XMVECTOR largeC = XMVectorPow(XMVectorDivide(XMVectorAdd(srgb, g_XMsrgbA), g_XMsrgbA1), Exp); + + XMVECTOR lclr = XMVectorSelect(smallC, largeC, sel); + + XMMATRIX M(Scale0, Scale1, Scale2, g_XMZero); + XMVECTOR clr = XMVector3Transform(lclr, M); + + return XMVectorSelect(srgb, clr, g_XMSelect1110); +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMColorRGBToSRGB(FXMVECTOR rgb) noexcept +{ + static const XMVECTORF32 Cutoff = { { { 0.0031308f, 0.0031308f, 0.0031308f, 1.f } } }; + static const XMVECTORF32 Linear = { { { 12.92f, 12.92f, 12.92f, 1.f } } }; + static const XMVECTORF32 Scale = { { { 1.055f, 1.055f, 1.055f, 1.f } } }; + static const XMVECTORF32 Bias = { { { 0.055f, 0.055f, 0.055f, 0.f } } }; + static const XMVECTORF32 InvGamma = { { { 1.0f / 2.4f, 1.0f / 2.4f, 1.0f / 2.4f, 1.f } } }; + + XMVECTOR V = XMVectorSaturate(rgb); + XMVECTOR V0 = XMVectorMultiply(V, Linear); + XMVECTOR V1 = XMVectorSubtract(XMVectorMultiply(Scale, XMVectorPow(V, InvGamma)), Bias); + XMVECTOR select = XMVectorLess(V, Cutoff); + V = XMVectorSelect(V1, V0, select); + return XMVectorSelect(rgb, V, g_XMSelect1110); +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMColorSRGBToRGB(FXMVECTOR srgb) noexcept +{ + static const XMVECTORF32 Cutoff = { { { 0.04045f, 0.04045f, 0.04045f, 1.f } } }; + static const XMVECTORF32 ILinear = { { { 1.f / 12.92f, 1.f / 12.92f, 1.f / 12.92f, 1.f } } }; + static const XMVECTORF32 Scale = { { { 1.f / 1.055f, 1.f / 1.055f, 1.f / 1.055f, 1.f } } }; + static const XMVECTORF32 Bias = { { { 0.055f, 0.055f, 0.055f, 0.f } } }; + static const XMVECTORF32 Gamma = { { { 2.4f, 2.4f, 2.4f, 1.f } } }; + + XMVECTOR V = XMVectorSaturate(srgb); + XMVECTOR V0 = XMVectorMultiply(V, ILinear); + XMVECTOR V1 = XMVectorPow(XMVectorMultiply(XMVectorAdd(V, Bias), Scale), Gamma); + XMVECTOR select = XMVectorGreater(V, Cutoff); + V = XMVectorSelect(V0, V1, select); + return XMVectorSelect(srgb, V, g_XMSelect1110); +} + +/**************************************************************************** + * + * Miscellaneous + * + ****************************************************************************/ + + //------------------------------------------------------------------------------ + +inline bool XMVerifyCPUSupport() noexcept +{ +#if defined(_XM_SSE_INTRINSICS_) && !defined(_XM_NO_INTRINSICS_) + int CPUInfo[4] = { -1 }; +#if defined(__clang__) || defined(__GNUC__) + __cpuid(0, CPUInfo[0], CPUInfo[1], CPUInfo[2], CPUInfo[3]); +#else + __cpuid(CPUInfo, 0); +#endif + +#ifdef __AVX2__ + if (CPUInfo[0] < 7) + return false; +#else + if (CPUInfo[0] < 1) + return false; +#endif + +#if defined(__clang__) || defined(__GNUC__) + __cpuid(1, CPUInfo[0], CPUInfo[1], CPUInfo[2], CPUInfo[3]); +#else + __cpuid(CPUInfo, 1); +#endif + +#if defined(__AVX2__) || defined(_XM_AVX2_INTRINSICS_) + // The compiler can emit FMA3 instructions even without explicit intrinsics use + if ((CPUInfo[2] & 0x38081001) != 0x38081001) + return false; // No F16C/AVX/OSXSAVE/SSE4.1/FMA3/SSE3 support +#elif defined(_XM_FMA3_INTRINSICS_) && defined(_XM_F16C_INTRINSICS_) + if ((CPUInfo[2] & 0x38081001) != 0x38081001) + return false; // No F16C/AVX/OSXSAVE/SSE4.1/FMA3/SSE3 support +#elif defined(_XM_FMA3_INTRINSICS_) + if ((CPUInfo[2] & 0x18081001) != 0x18081001) + return false; // No AVX/OSXSAVE/SSE4.1/FMA3/SSE3 support +#elif defined(_XM_F16C_INTRINSICS_) + if ((CPUInfo[2] & 0x38080001) != 0x38080001) + return false; // No F16C/AVX/OSXSAVE/SSE4.1/SSE3 support +#elif defined(__AVX__) || defined(_XM_AVX_INTRINSICS_) + if ((CPUInfo[2] & 0x18080001) != 0x18080001) + return false; // No AVX/OSXSAVE/SSE4.1/SSE3 support +#elif defined(_XM_SSE4_INTRINSICS_) + if ((CPUInfo[2] & 0x80001) != 0x80001) + return false; // No SSE3/SSE4.1 support +#elif defined(_XM_SSE3_INTRINSICS_) + if (!(CPUInfo[2] & 0x1)) + return false; // No SSE3 support +#endif + + // The x64 processor model requires SSE2 support, but no harm in checking + if ((CPUInfo[3] & 0x6000000) != 0x6000000) + return false; // No SSE2/SSE support + +#if defined(__AVX2__) || defined(_XM_AVX2_INTRINSICS_) +#if defined(__clang__) || defined(__GNUC__) + __cpuid_count(7, 0, CPUInfo[0], CPUInfo[1], CPUInfo[2], CPUInfo[3]); +#else + __cpuidex(CPUInfo, 7, 0); +#endif + if (!(CPUInfo[1] & 0x20)) + return false; // No AVX2 support +#endif + + return true; +#elif defined(_XM_ARM_NEON_INTRINSICS_) && !defined(_XM_NO_INTRINSICS_) + // ARM-NEON support is required for the Windows on ARM platform + return true; +#else + // No intrinsics path always supported + return true; +#endif +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMFresnelTerm +( + FXMVECTOR CosIncidentAngle, + FXMVECTOR RefractionIndex +) noexcept +{ + assert(!XMVector4IsInfinite(CosIncidentAngle)); + + // Result = 0.5f * (g - c)^2 / (g + c)^2 * ((c * (g + c) - 1)^2 / (c * (g - c) + 1)^2 + 1) where + // c = CosIncidentAngle + // g = sqrt(c^2 + RefractionIndex^2 - 1) + +#if defined(_XM_NO_INTRINSICS_) || defined(_XM_ARM_NEON_INTRINSICS_) + + XMVECTOR G = XMVectorMultiplyAdd(RefractionIndex, RefractionIndex, g_XMNegativeOne.v); + G = XMVectorMultiplyAdd(CosIncidentAngle, CosIncidentAngle, G); + G = XMVectorAbs(G); + G = XMVectorSqrt(G); + + XMVECTOR S = XMVectorAdd(G, CosIncidentAngle); + XMVECTOR D = XMVectorSubtract(G, CosIncidentAngle); + + XMVECTOR V0 = XMVectorMultiply(D, D); + XMVECTOR V1 = XMVectorMultiply(S, S); + V1 = XMVectorReciprocal(V1); + V0 = XMVectorMultiply(g_XMOneHalf.v, V0); + V0 = XMVectorMultiply(V0, V1); + + XMVECTOR V2 = XMVectorMultiplyAdd(CosIncidentAngle, S, g_XMNegativeOne.v); + XMVECTOR V3 = XMVectorMultiplyAdd(CosIncidentAngle, D, g_XMOne.v); + V2 = XMVectorMultiply(V2, V2); + V3 = XMVectorMultiply(V3, V3); + V3 = XMVectorReciprocal(V3); + V2 = XMVectorMultiplyAdd(V2, V3, g_XMOne.v); + + XMVECTOR Result = XMVectorMultiply(V0, V2); + + Result = XMVectorSaturate(Result); + + return Result; + +#elif defined(_XM_SSE_INTRINSICS_) + // G = sqrt(abs((RefractionIndex^2-1) + CosIncidentAngle^2)) + XMVECTOR G = _mm_mul_ps(RefractionIndex, RefractionIndex); + XMVECTOR vTemp = _mm_mul_ps(CosIncidentAngle, CosIncidentAngle); + G = _mm_sub_ps(G, g_XMOne); + vTemp = _mm_add_ps(vTemp, G); + // max((0-vTemp),vTemp) == abs(vTemp) + // The abs is needed to deal with refraction and cosine being zero + G = _mm_setzero_ps(); + G = _mm_sub_ps(G, vTemp); + G = _mm_max_ps(G, vTemp); + // Last operation, the sqrt() + G = _mm_sqrt_ps(G); + + // Calc G-C and G+C + XMVECTOR GAddC = _mm_add_ps(G, CosIncidentAngle); + XMVECTOR GSubC = _mm_sub_ps(G, CosIncidentAngle); + // Perform the term (0.5f *(g - c)^2) / (g + c)^2 + XMVECTOR vResult = _mm_mul_ps(GSubC, GSubC); + vTemp = _mm_mul_ps(GAddC, GAddC); + vResult = _mm_mul_ps(vResult, g_XMOneHalf); + vResult = _mm_div_ps(vResult, vTemp); + // Perform the term ((c * (g + c) - 1)^2 / (c * (g - c) + 1)^2 + 1) + GAddC = _mm_mul_ps(GAddC, CosIncidentAngle); + GSubC = _mm_mul_ps(GSubC, CosIncidentAngle); + GAddC = _mm_sub_ps(GAddC, g_XMOne); + GSubC = _mm_add_ps(GSubC, g_XMOne); + GAddC = _mm_mul_ps(GAddC, GAddC); + GSubC = _mm_mul_ps(GSubC, GSubC); + GAddC = _mm_div_ps(GAddC, GSubC); + GAddC = _mm_add_ps(GAddC, g_XMOne); + // Multiply the two term parts + vResult = _mm_mul_ps(vResult, GAddC); + // Clamp to 0.0 - 1.0f + vResult = _mm_max_ps(vResult, g_XMZero); + vResult = _mm_min_ps(vResult, g_XMOne); + return vResult; +#endif +} + +//------------------------------------------------------------------------------ + +inline bool XMScalarNearEqual +( + float S1, + float S2, + float Epsilon +) noexcept +{ + float Delta = S1 - S2; + return (fabsf(Delta) <= Epsilon); +} + +//------------------------------------------------------------------------------ +// Modulo the range of the given angle such that -XM_PI <= Angle < XM_PI +inline float XMScalarModAngle(float Angle) noexcept +{ + // Note: The modulo is performed with unsigned math only to work + // around a precision error on numbers that are close to PI + + // Normalize the range from 0.0f to XM_2PI + Angle = Angle + XM_PI; + // Perform the modulo, unsigned + float fTemp = fabsf(Angle); + fTemp = fTemp - (XM_2PI * static_cast(static_cast(fTemp / XM_2PI))); + // Restore the number to the range of -XM_PI to XM_PI-epsilon + fTemp = fTemp - XM_PI; + // If the modulo'd value was negative, restore negation + if (Angle < 0.0f) + { + fTemp = -fTemp; + } + return fTemp; +} + +//------------------------------------------------------------------------------ + +inline float XMScalarSin(float Value) noexcept +{ + // Map Value to y in [-pi,pi], x = 2*pi*quotient + remainder. + float quotient = XM_1DIV2PI * Value; + if (Value >= 0.0f) + { + quotient = static_cast(static_cast(quotient + 0.5f)); + } + else + { + quotient = static_cast(static_cast(quotient - 0.5f)); + } + float y = Value - XM_2PI * quotient; + + // Map y to [-pi/2,pi/2] with sin(y) = sin(Value). + if (y > XM_PIDIV2) + { + y = XM_PI - y; + } + else if (y < -XM_PIDIV2) + { + y = -XM_PI - y; + } + + // 11-degree minimax approximation + float y2 = y * y; + return (((((-2.3889859e-08f * y2 + 2.7525562e-06f) * y2 - 0.00019840874f) * y2 + 0.0083333310f) * y2 - 0.16666667f) * y2 + 1.0f) * y; +} + +//------------------------------------------------------------------------------ + +inline float XMScalarSinEst(float Value) noexcept +{ + // Map Value to y in [-pi,pi], x = 2*pi*quotient + remainder. + float quotient = XM_1DIV2PI * Value; + if (Value >= 0.0f) + { + quotient = static_cast(static_cast(quotient + 0.5f)); + } + else + { + quotient = static_cast(static_cast(quotient - 0.5f)); + } + float y = Value - XM_2PI * quotient; + + // Map y to [-pi/2,pi/2] with sin(y) = sin(Value). + if (y > XM_PIDIV2) + { + y = XM_PI - y; + } + else if (y < -XM_PIDIV2) + { + y = -XM_PI - y; + } + + // 7-degree minimax approximation + float y2 = y * y; + return (((-0.00018524670f * y2 + 0.0083139502f) * y2 - 0.16665852f) * y2 + 1.0f) * y; +} + +//------------------------------------------------------------------------------ + +inline float XMScalarCos(float Value) noexcept +{ + // Map Value to y in [-pi,pi], x = 2*pi*quotient + remainder. + float quotient = XM_1DIV2PI * Value; + if (Value >= 0.0f) + { + quotient = static_cast(static_cast(quotient + 0.5f)); + } + else + { + quotient = static_cast(static_cast(quotient - 0.5f)); + } + float y = Value - XM_2PI * quotient; + + // Map y to [-pi/2,pi/2] with cos(y) = sign*cos(x). + float sign; + if (y > XM_PIDIV2) + { + y = XM_PI - y; + sign = -1.0f; + } + else if (y < -XM_PIDIV2) + { + y = -XM_PI - y; + sign = -1.0f; + } + else + { + sign = +1.0f; + } + + // 10-degree minimax approximation + float y2 = y * y; + float p = ((((-2.6051615e-07f * y2 + 2.4760495e-05f) * y2 - 0.0013888378f) * y2 + 0.041666638f) * y2 - 0.5f) * y2 + 1.0f; + return sign * p; +} + +//------------------------------------------------------------------------------ + +inline float XMScalarCosEst(float Value) noexcept +{ + // Map Value to y in [-pi,pi], x = 2*pi*quotient + remainder. + float quotient = XM_1DIV2PI * Value; + if (Value >= 0.0f) + { + quotient = static_cast(static_cast(quotient + 0.5f)); + } + else + { + quotient = static_cast(static_cast(quotient - 0.5f)); + } + float y = Value - XM_2PI * quotient; + + // Map y to [-pi/2,pi/2] with cos(y) = sign*cos(x). + float sign; + if (y > XM_PIDIV2) + { + y = XM_PI - y; + sign = -1.0f; + } + else if (y < -XM_PIDIV2) + { + y = -XM_PI - y; + sign = -1.0f; + } + else + { + sign = +1.0f; + } + + // 6-degree minimax approximation + float y2 = y * y; + float p = ((-0.0012712436f * y2 + 0.041493919f) * y2 - 0.49992746f) * y2 + 1.0f; + return sign * p; +} + +//------------------------------------------------------------------------------ + +_Use_decl_annotations_ +inline void XMScalarSinCos +( + float* pSin, + float* pCos, + float Value +) noexcept +{ + assert(pSin); + assert(pCos); + + // Map Value to y in [-pi,pi], x = 2*pi*quotient + remainder. + float quotient = XM_1DIV2PI * Value; + if (Value >= 0.0f) + { + quotient = static_cast(static_cast(quotient + 0.5f)); + } + else + { + quotient = static_cast(static_cast(quotient - 0.5f)); + } + float y = Value - XM_2PI * quotient; + + // Map y to [-pi/2,pi/2] with sin(y) = sin(Value). + float sign; + if (y > XM_PIDIV2) + { + y = XM_PI - y; + sign = -1.0f; + } + else if (y < -XM_PIDIV2) + { + y = -XM_PI - y; + sign = -1.0f; + } + else + { + sign = +1.0f; + } + + float y2 = y * y; + + // 11-degree minimax approximation + *pSin = (((((-2.3889859e-08f * y2 + 2.7525562e-06f) * y2 - 0.00019840874f) * y2 + 0.0083333310f) * y2 - 0.16666667f) * y2 + 1.0f) * y; + + // 10-degree minimax approximation + float p = ((((-2.6051615e-07f * y2 + 2.4760495e-05f) * y2 - 0.0013888378f) * y2 + 0.041666638f) * y2 - 0.5f) * y2 + 1.0f; + *pCos = sign * p; +} + +//------------------------------------------------------------------------------ + +_Use_decl_annotations_ +inline void XMScalarSinCosEst +( + float* pSin, + float* pCos, + float Value +) noexcept +{ + assert(pSin); + assert(pCos); + + // Map Value to y in [-pi,pi], x = 2*pi*quotient + remainder. + float quotient = XM_1DIV2PI * Value; + if (Value >= 0.0f) + { + quotient = static_cast(static_cast(quotient + 0.5f)); + } + else + { + quotient = static_cast(static_cast(quotient - 0.5f)); + } + float y = Value - XM_2PI * quotient; + + // Map y to [-pi/2,pi/2] with sin(y) = sin(Value). + float sign; + if (y > XM_PIDIV2) + { + y = XM_PI - y; + sign = -1.0f; + } + else if (y < -XM_PIDIV2) + { + y = -XM_PI - y; + sign = -1.0f; + } + else + { + sign = +1.0f; + } + + float y2 = y * y; + + // 7-degree minimax approximation + *pSin = (((-0.00018524670f * y2 + 0.0083139502f) * y2 - 0.16665852f) * y2 + 1.0f) * y; + + // 6-degree minimax approximation + float p = ((-0.0012712436f * y2 + 0.041493919f) * y2 - 0.49992746f) * y2 + 1.0f; + *pCos = sign * p; +} + +//------------------------------------------------------------------------------ + +inline float XMScalarASin(float Value) noexcept +{ + // Clamp input to [-1,1]. + bool nonnegative = (Value >= 0.0f); + float x = fabsf(Value); + float omx = 1.0f - x; + if (omx < 0.0f) + { + omx = 0.0f; + } + float root = sqrtf(omx); + + // 7-degree minimax approximation + float result = ((((((-0.0012624911f * x + 0.0066700901f) * x - 0.0170881256f) * x + 0.0308918810f) * x - 0.0501743046f) * x + 0.0889789874f) * x - 0.2145988016f) * x + 1.5707963050f; + result *= root; // acos(|x|) + + // acos(x) = pi - acos(-x) when x < 0, asin(x) = pi/2 - acos(x) + return (nonnegative ? XM_PIDIV2 - result : result - XM_PIDIV2); +} + +//------------------------------------------------------------------------------ + +inline float XMScalarASinEst(float Value) noexcept +{ + // Clamp input to [-1,1]. + bool nonnegative = (Value >= 0.0f); + float x = fabsf(Value); + float omx = 1.0f - x; + if (omx < 0.0f) + { + omx = 0.0f; + } + float root = sqrtf(omx); + + // 3-degree minimax approximation + float result = ((-0.0187293f * x + 0.0742610f) * x - 0.2121144f) * x + 1.5707288f; + result *= root; // acos(|x|) + + // acos(x) = pi - acos(-x) when x < 0, asin(x) = pi/2 - acos(x) + return (nonnegative ? XM_PIDIV2 - result : result - XM_PIDIV2); +} + +//------------------------------------------------------------------------------ + +inline float XMScalarACos(float Value) noexcept +{ + // Clamp input to [-1,1]. + bool nonnegative = (Value >= 0.0f); + float x = fabsf(Value); + float omx = 1.0f - x; + if (omx < 0.0f) + { + omx = 0.0f; + } + float root = sqrtf(omx); + + // 7-degree minimax approximation + float result = ((((((-0.0012624911f * x + 0.0066700901f) * x - 0.0170881256f) * x + 0.0308918810f) * x - 0.0501743046f) * x + 0.0889789874f) * x - 0.2145988016f) * x + 1.5707963050f; + result *= root; + + // acos(x) = pi - acos(-x) when x < 0 + return (nonnegative ? result : XM_PI - result); +} + +//------------------------------------------------------------------------------ + +inline float XMScalarACosEst(float Value) noexcept +{ + // Clamp input to [-1,1]. + bool nonnegative = (Value >= 0.0f); + float x = fabsf(Value); + float omx = 1.0f - x; + if (omx < 0.0f) + { + omx = 0.0f; + } + float root = sqrtf(omx); + + // 3-degree minimax approximation + float result = ((-0.0187293f * x + 0.0742610f) * x - 0.2121144f) * x + 1.5707288f; + result *= root; + + // acos(x) = pi - acos(-x) when x < 0 + return (nonnegative ? result : XM_PI - result); +} + diff --git a/include/directxmath/directxmathvector.inl b/include/directxmath/directxmathvector.inl new file mode 100644 index 0000000..f76d597 --- /dev/null +++ b/include/directxmath/directxmathvector.inl @@ -0,0 +1,14689 @@ +//------------------------------------------------------------------------------------- +// DirectXMathVector.inl -- SIMD C++ Math library +// +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. +// +// http://go.microsoft.com/fwlink/?LinkID=615560 +//------------------------------------------------------------------------------------- + +#pragma once + +#if defined(_XM_NO_INTRINSICS_) +#define XMISNAN(x) isnan(x) +#define XMISINF(x) isinf(x) +#endif + +#if defined(_XM_SSE_INTRINSICS_) + +#define XM3UNPACK3INTO4(l1, l2, l3) \ + XMVECTOR V3 = _mm_shuffle_ps(l2, l3, _MM_SHUFFLE(0, 0, 3, 2));\ + XMVECTOR V2 = _mm_shuffle_ps(l2, l1, _MM_SHUFFLE(3, 3, 1, 0));\ + V2 = XM_PERMUTE_PS(V2, _MM_SHUFFLE(1, 1, 0, 2));\ + XMVECTOR V4 = _mm_castsi128_ps(_mm_srli_si128(_mm_castps_si128(L3), 32 / 8)) + +#define XM3PACK4INTO3(v2x) \ + v2x = _mm_shuffle_ps(V2, V3, _MM_SHUFFLE(1, 0, 2, 1));\ + V2 = _mm_shuffle_ps(V2, V1, _MM_SHUFFLE(2, 2, 0, 0));\ + V1 = _mm_shuffle_ps(V1, V2, _MM_SHUFFLE(0, 2, 1, 0));\ + V3 = _mm_shuffle_ps(V3, V4, _MM_SHUFFLE(0, 0, 2, 2));\ + V3 = _mm_shuffle_ps(V3, V4, _MM_SHUFFLE(2, 1, 2, 0)) + +#endif + +/**************************************************************************** + * + * General Vector + * + ****************************************************************************/ + + //------------------------------------------------------------------------------ + // Assignment operations + //------------------------------------------------------------------------------ + + //------------------------------------------------------------------------------ + // Return a vector with all elements equaling zero +inline XMVECTOR XM_CALLCONV XMVectorZero() noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + XMVECTORF32 vResult = { { { 0.0f, 0.0f, 0.0f, 0.0f } } }; + return vResult.v; +#elif defined(_XM_ARM_NEON_INTRINSICS_) + return vdupq_n_f32(0); +#elif defined(_XM_SSE_INTRINSICS_) + return _mm_setzero_ps(); +#endif +} + +//------------------------------------------------------------------------------ +// Initialize a vector with four floating point values +inline XMVECTOR XM_CALLCONV XMVectorSet +( + float x, + float y, + float z, + float w +) noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + XMVECTORF32 vResult = { { { x, y, z, w } } }; + return vResult.v; +#elif defined(_XM_ARM_NEON_INTRINSICS_) + float32x2_t V0 = vcreate_f32( + static_cast(*reinterpret_cast(&x)) + | (static_cast(*reinterpret_cast(&y)) << 32)); + float32x2_t V1 = vcreate_f32( + static_cast(*reinterpret_cast(&z)) + | (static_cast(*reinterpret_cast(&w)) << 32)); + return vcombine_f32(V0, V1); +#elif defined(_XM_SSE_INTRINSICS_) + return _mm_set_ps(w, z, y, x); +#endif +} + +//------------------------------------------------------------------------------ +// Initialize a vector with four integer values +inline XMVECTOR XM_CALLCONV XMVectorSetInt +( + uint32_t x, + uint32_t y, + uint32_t z, + uint32_t w +) noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + XMVECTORU32 vResult = { { { x, y, z, w } } }; + return vResult.v; +#elif defined(_XM_ARM_NEON_INTRINSICS_) + uint32x2_t V0 = vcreate_u32(static_cast(x) | (static_cast(y) << 32)); + uint32x2_t V1 = vcreate_u32(static_cast(z) | (static_cast(w) << 32)); + return vcombine_u32(V0, V1); +#elif defined(_XM_SSE_INTRINSICS_) + __m128i V = _mm_set_epi32(static_cast(w), static_cast(z), static_cast(y), static_cast(x)); + return _mm_castsi128_ps(V); +#endif +} + +//------------------------------------------------------------------------------ +// Initialize a vector with a replicated floating point value +inline XMVECTOR XM_CALLCONV XMVectorReplicate(float Value) noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + XMVECTORF32 vResult; + vResult.f[0] = + vResult.f[1] = + vResult.f[2] = + vResult.f[3] = Value; + return vResult.v; +#elif defined(_XM_ARM_NEON_INTRINSICS_) + return vdupq_n_f32(Value); +#elif defined(_XM_SSE_INTRINSICS_) + return _mm_set_ps1(Value); +#endif +} + +//------------------------------------------------------------------------------ +// Initialize a vector with a replicated floating point value passed by pointer +_Use_decl_annotations_ +inline XMVECTOR XM_CALLCONV XMVectorReplicatePtr(const float* pValue) noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + float Value = pValue[0]; + XMVECTORF32 vResult; + vResult.f[0] = + vResult.f[1] = + vResult.f[2] = + vResult.f[3] = Value; + return vResult.v; +#elif defined(_XM_ARM_NEON_INTRINSICS_) + return vld1q_dup_f32(pValue); +#elif defined(_XM_AVX_INTRINSICS_) + return _mm_broadcast_ss(pValue); +#elif defined(_XM_SSE_INTRINSICS_) + return _mm_load_ps1(pValue); +#endif +} + +//------------------------------------------------------------------------------ +// Initialize a vector with a replicated integer value +inline XMVECTOR XM_CALLCONV XMVectorReplicateInt(uint32_t Value) noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + XMVECTORU32 vResult; + vResult.u[0] = + vResult.u[1] = + vResult.u[2] = + vResult.u[3] = Value; + return vResult.v; +#elif defined(_XM_ARM_NEON_INTRINSICS_) + return vdupq_n_u32(Value); +#elif defined(_XM_SSE_INTRINSICS_) + __m128i vTemp = _mm_set1_epi32(static_cast(Value)); + return _mm_castsi128_ps(vTemp); +#endif +} + +//------------------------------------------------------------------------------ +// Initialize a vector with a replicated integer value passed by pointer +_Use_decl_annotations_ +inline XMVECTOR XM_CALLCONV XMVectorReplicateIntPtr(const uint32_t* pValue) noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + uint32_t Value = pValue[0]; + XMVECTORU32 vResult; + vResult.u[0] = + vResult.u[1] = + vResult.u[2] = + vResult.u[3] = Value; + return vResult.v; +#elif defined(_XM_ARM_NEON_INTRINSICS_) + return vld1q_dup_u32(pValue); +#elif defined(_XM_SSE_INTRINSICS_) + return _mm_load_ps1(reinterpret_cast(pValue)); +#endif +} + +//------------------------------------------------------------------------------ +// Initialize a vector with all bits set (true mask) +inline XMVECTOR XM_CALLCONV XMVectorTrueInt() noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + XMVECTORU32 vResult = { { { 0xFFFFFFFFU, 0xFFFFFFFFU, 0xFFFFFFFFU, 0xFFFFFFFFU } } }; + return vResult.v; +#elif defined(_XM_ARM_NEON_INTRINSICS_) + return vdupq_n_s32(-1); +#elif defined(_XM_SSE_INTRINSICS_) + __m128i V = _mm_set1_epi32(-1); + return _mm_castsi128_ps(V); +#endif +} + +//------------------------------------------------------------------------------ +// Initialize a vector with all bits clear (false mask) +inline XMVECTOR XM_CALLCONV XMVectorFalseInt() noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + XMVECTORF32 vResult = { { { 0.0f, 0.0f, 0.0f, 0.0f } } }; + return vResult; +#elif defined(_XM_ARM_NEON_INTRINSICS_) + return vdupq_n_u32(0); +#elif defined(_XM_SSE_INTRINSICS_) + return _mm_setzero_ps(); +#endif +} + +//------------------------------------------------------------------------------ +// Replicate the x component of the vector +inline XMVECTOR XM_CALLCONV XMVectorSplatX(FXMVECTOR V) noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + XMVECTORF32 vResult; + vResult.f[0] = + vResult.f[1] = + vResult.f[2] = + vResult.f[3] = V.vector4_f32[0]; + return vResult.v; +#elif defined(_XM_ARM_NEON_INTRINSICS_) + return vdupq_lane_f32(vget_low_f32(V), 0); +#elif defined(_XM_AVX2_INTRINSICS_) && defined(_XM_FAVOR_INTEL_) + return _mm_broadcastss_ps(V); +#elif defined(_XM_SSE_INTRINSICS_) + return XM_PERMUTE_PS(V, _MM_SHUFFLE(0, 0, 0, 0)); +#endif +} + +//------------------------------------------------------------------------------ +// Replicate the y component of the vector +inline XMVECTOR XM_CALLCONV XMVectorSplatY(FXMVECTOR V) noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + XMVECTORF32 vResult; + vResult.f[0] = + vResult.f[1] = + vResult.f[2] = + vResult.f[3] = V.vector4_f32[1]; + return vResult.v; +#elif defined(_XM_ARM_NEON_INTRINSICS_) + return vdupq_lane_f32(vget_low_f32(V), 1); +#elif defined(_XM_SSE_INTRINSICS_) + return XM_PERMUTE_PS(V, _MM_SHUFFLE(1, 1, 1, 1)); +#endif +} + +//------------------------------------------------------------------------------ +// Replicate the z component of the vector +inline XMVECTOR XM_CALLCONV XMVectorSplatZ(FXMVECTOR V) noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + XMVECTORF32 vResult; + vResult.f[0] = + vResult.f[1] = + vResult.f[2] = + vResult.f[3] = V.vector4_f32[2]; + return vResult.v; +#elif defined(_XM_ARM_NEON_INTRINSICS_) + return vdupq_lane_f32(vget_high_f32(V), 0); +#elif defined(_XM_SSE_INTRINSICS_) + return XM_PERMUTE_PS(V, _MM_SHUFFLE(2, 2, 2, 2)); +#endif +} + +//------------------------------------------------------------------------------ +// Replicate the w component of the vector +inline XMVECTOR XM_CALLCONV XMVectorSplatW(FXMVECTOR V) noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + XMVECTORF32 vResult; + vResult.f[0] = + vResult.f[1] = + vResult.f[2] = + vResult.f[3] = V.vector4_f32[3]; + return vResult.v; +#elif defined(_XM_ARM_NEON_INTRINSICS_) + return vdupq_lane_f32(vget_high_f32(V), 1); +#elif defined(_XM_SSE_INTRINSICS_) + return XM_PERMUTE_PS(V, _MM_SHUFFLE(3, 3, 3, 3)); +#endif +} + +//------------------------------------------------------------------------------ +// Return a vector of 1.0f,1.0f,1.0f,1.0f +inline XMVECTOR XM_CALLCONV XMVectorSplatOne() noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + XMVECTORF32 vResult; + vResult.f[0] = + vResult.f[1] = + vResult.f[2] = + vResult.f[3] = 1.0f; + return vResult.v; +#elif defined(_XM_ARM_NEON_INTRINSICS_) + return vdupq_n_f32(1.0f); +#elif defined(_XM_SSE_INTRINSICS_) + return g_XMOne; +#endif +} + +//------------------------------------------------------------------------------ +// Return a vector of INF,INF,INF,INF +inline XMVECTOR XM_CALLCONV XMVectorSplatInfinity() noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + XMVECTORU32 vResult; + vResult.u[0] = + vResult.u[1] = + vResult.u[2] = + vResult.u[3] = 0x7F800000; + return vResult.v; +#elif defined(_XM_ARM_NEON_INTRINSICS_) + return vdupq_n_u32(0x7F800000); +#elif defined(_XM_SSE_INTRINSICS_) + return g_XMInfinity; +#endif +} + +//------------------------------------------------------------------------------ +// Return a vector of Q_NAN,Q_NAN,Q_NAN,Q_NAN +inline XMVECTOR XM_CALLCONV XMVectorSplatQNaN() noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + XMVECTORU32 vResult; + vResult.u[0] = + vResult.u[1] = + vResult.u[2] = + vResult.u[3] = 0x7FC00000; + return vResult.v; +#elif defined(_XM_ARM_NEON_INTRINSICS_) + return vdupq_n_u32(0x7FC00000); +#elif defined(_XM_SSE_INTRINSICS_) + return g_XMQNaN; +#endif +} + +//------------------------------------------------------------------------------ +// Return a vector of 1.192092896e-7f,1.192092896e-7f,1.192092896e-7f,1.192092896e-7f +inline XMVECTOR XM_CALLCONV XMVectorSplatEpsilon() noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + XMVECTORU32 vResult; + vResult.u[0] = + vResult.u[1] = + vResult.u[2] = + vResult.u[3] = 0x34000000; + return vResult.v; +#elif defined(_XM_ARM_NEON_INTRINSICS_) + return vdupq_n_u32(0x34000000); +#elif defined(_XM_SSE_INTRINSICS_) + return g_XMEpsilon; +#endif +} + +//------------------------------------------------------------------------------ +// Return a vector of -0.0f (0x80000000),-0.0f,-0.0f,-0.0f +inline XMVECTOR XM_CALLCONV XMVectorSplatSignMask() noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + XMVECTORU32 vResult; + vResult.u[0] = + vResult.u[1] = + vResult.u[2] = + vResult.u[3] = 0x80000000U; + return vResult.v; +#elif defined(_XM_ARM_NEON_INTRINSICS_) + return vdupq_n_u32(0x80000000U); +#elif defined(_XM_SSE_INTRINSICS_) + __m128i V = _mm_set1_epi32(static_cast(0x80000000)); + return _mm_castsi128_ps(V); +#endif +} + +//------------------------------------------------------------------------------ +// Return a floating point value via an index. This is not a recommended +// function to use due to performance loss. +inline float XM_CALLCONV XMVectorGetByIndex(FXMVECTOR V, size_t i) noexcept +{ + assert(i < 4); + _Analysis_assume_(i < 4); +#if defined(_XM_NO_INTRINSICS_) + return V.vector4_f32[i]; +#else + XMVECTORF32 U; + U.v = V; + return U.f[i]; +#endif +} + +//------------------------------------------------------------------------------ +// Return the X component in an FPU register. +inline float XM_CALLCONV XMVectorGetX(FXMVECTOR V) noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + return V.vector4_f32[0]; +#elif defined(_XM_ARM_NEON_INTRINSICS_) + return vgetq_lane_f32(V, 0); +#elif defined(_XM_SSE_INTRINSICS_) + return _mm_cvtss_f32(V); +#endif +} + +// Return the Y component in an FPU register. +inline float XM_CALLCONV XMVectorGetY(FXMVECTOR V) noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + return V.vector4_f32[1]; +#elif defined(_XM_ARM_NEON_INTRINSICS_) + return vgetq_lane_f32(V, 1); +#elif defined(_XM_SSE_INTRINSICS_) + XMVECTOR vTemp = XM_PERMUTE_PS(V, _MM_SHUFFLE(1, 1, 1, 1)); + return _mm_cvtss_f32(vTemp); +#endif +} + +// Return the Z component in an FPU register. +inline float XM_CALLCONV XMVectorGetZ(FXMVECTOR V) noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + return V.vector4_f32[2]; +#elif defined(_XM_ARM_NEON_INTRINSICS_) + return vgetq_lane_f32(V, 2); +#elif defined(_XM_SSE_INTRINSICS_) + XMVECTOR vTemp = XM_PERMUTE_PS(V, _MM_SHUFFLE(2, 2, 2, 2)); + return _mm_cvtss_f32(vTemp); +#endif +} + +// Return the W component in an FPU register. +inline float XM_CALLCONV XMVectorGetW(FXMVECTOR V) noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + return V.vector4_f32[3]; +#elif defined(_XM_ARM_NEON_INTRINSICS_) + return vgetq_lane_f32(V, 3); +#elif defined(_XM_SSE_INTRINSICS_) + XMVECTOR vTemp = XM_PERMUTE_PS(V, _MM_SHUFFLE(3, 3, 3, 3)); + return _mm_cvtss_f32(vTemp); +#endif +} + +//------------------------------------------------------------------------------ + +// Store a component indexed by i into a 32 bit float location in memory. +_Use_decl_annotations_ +inline void XM_CALLCONV XMVectorGetByIndexPtr(float* f, FXMVECTOR V, size_t i) noexcept +{ + assert(f != nullptr); + assert(i < 4); + _Analysis_assume_(i < 4); +#if defined(_XM_NO_INTRINSICS_) + *f = V.vector4_f32[i]; +#else + XMVECTORF32 U; + U.v = V; + *f = U.f[i]; +#endif +} + +//------------------------------------------------------------------------------ + +// Store the X component into a 32 bit float location in memory. +_Use_decl_annotations_ +inline void XM_CALLCONV XMVectorGetXPtr(float* x, FXMVECTOR V) noexcept +{ + assert(x != nullptr); +#if defined(_XM_NO_INTRINSICS_) + *x = V.vector4_f32[0]; +#elif defined(_XM_ARM_NEON_INTRINSICS_) + vst1q_lane_f32(x, V, 0); +#elif defined(_XM_SSE_INTRINSICS_) + _mm_store_ss(x, V); +#endif +} + +// Store the Y component into a 32 bit float location in memory. +_Use_decl_annotations_ +inline void XM_CALLCONV XMVectorGetYPtr(float* y, FXMVECTOR V) noexcept +{ + assert(y != nullptr); +#if defined(_XM_NO_INTRINSICS_) + *y = V.vector4_f32[1]; +#elif defined(_XM_ARM_NEON_INTRINSICS_) + vst1q_lane_f32(y, V, 1); +#elif defined(_XM_SSE4_INTRINSICS_) + * (reinterpret_cast(y)) = _mm_extract_ps(V, 1); +#elif defined(_XM_SSE_INTRINSICS_) + XMVECTOR vResult = XM_PERMUTE_PS(V, _MM_SHUFFLE(1, 1, 1, 1)); + _mm_store_ss(y, vResult); +#endif +} + +// Store the Z component into a 32 bit float location in memory. +_Use_decl_annotations_ +inline void XM_CALLCONV XMVectorGetZPtr(float* z, FXMVECTOR V) noexcept +{ + assert(z != nullptr); +#if defined(_XM_NO_INTRINSICS_) + *z = V.vector4_f32[2]; +#elif defined(_XM_ARM_NEON_INTRINSICS_) + vst1q_lane_f32(z, V, 2); +#elif defined(_XM_SSE4_INTRINSICS_) + * (reinterpret_cast(z)) = _mm_extract_ps(V, 2); +#elif defined(_XM_SSE_INTRINSICS_) + XMVECTOR vResult = XM_PERMUTE_PS(V, _MM_SHUFFLE(2, 2, 2, 2)); + _mm_store_ss(z, vResult); +#endif +} + +// Store the W component into a 32 bit float location in memory. +_Use_decl_annotations_ +inline void XM_CALLCONV XMVectorGetWPtr(float* w, FXMVECTOR V) noexcept +{ + assert(w != nullptr); +#if defined(_XM_NO_INTRINSICS_) + *w = V.vector4_f32[3]; +#elif defined(_XM_ARM_NEON_INTRINSICS_) + vst1q_lane_f32(w, V, 3); +#elif defined(_XM_SSE4_INTRINSICS_) + * (reinterpret_cast(w)) = _mm_extract_ps(V, 3); +#elif defined(_XM_SSE_INTRINSICS_) + XMVECTOR vResult = XM_PERMUTE_PS(V, _MM_SHUFFLE(3, 3, 3, 3)); + _mm_store_ss(w, vResult); +#endif +} + +//------------------------------------------------------------------------------ + +// Return an integer value via an index. This is not a recommended +// function to use due to performance loss. +inline uint32_t XM_CALLCONV XMVectorGetIntByIndex(FXMVECTOR V, size_t i) noexcept +{ + assert(i < 4); + _Analysis_assume_(i < 4); +#if defined(_XM_NO_INTRINSICS_) + return V.vector4_u32[i]; +#else + XMVECTORU32 U; + U.v = V; + return U.u[i]; +#endif +} + +//------------------------------------------------------------------------------ + +// Return the X component in an integer register. +inline uint32_t XM_CALLCONV XMVectorGetIntX(FXMVECTOR V) noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + return V.vector4_u32[0]; +#elif defined(_XM_ARM_NEON_INTRINSICS_) + return vgetq_lane_u32(V, 0); +#elif defined(_XM_SSE_INTRINSICS_) + return static_cast(_mm_cvtsi128_si32(_mm_castps_si128(V))); +#endif +} + +// Return the Y component in an integer register. +inline uint32_t XM_CALLCONV XMVectorGetIntY(FXMVECTOR V) noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + return V.vector4_u32[1]; +#elif defined(_XM_ARM_NEON_INTRINSICS_) + return vgetq_lane_u32(V, 1); +#elif defined(_XM_SSE4_INTRINSICS_) + __m128i V1 = _mm_castps_si128(V); + return static_cast(_mm_extract_epi32(V1, 1)); +#elif defined(_XM_SSE_INTRINSICS_) + __m128i vResulti = _mm_shuffle_epi32(_mm_castps_si128(V), _MM_SHUFFLE(1, 1, 1, 1)); + return static_cast(_mm_cvtsi128_si32(vResulti)); +#endif +} + +// Return the Z component in an integer register. +inline uint32_t XM_CALLCONV XMVectorGetIntZ(FXMVECTOR V) noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + return V.vector4_u32[2]; +#elif defined(_XM_ARM_NEON_INTRINSICS_) + return vgetq_lane_u32(V, 2); +#elif defined(_XM_SSE4_INTRINSICS_) + __m128i V1 = _mm_castps_si128(V); + return static_cast(_mm_extract_epi32(V1, 2)); +#elif defined(_XM_SSE_INTRINSICS_) + __m128i vResulti = _mm_shuffle_epi32(_mm_castps_si128(V), _MM_SHUFFLE(2, 2, 2, 2)); + return static_cast(_mm_cvtsi128_si32(vResulti)); +#endif +} + +// Return the W component in an integer register. +inline uint32_t XM_CALLCONV XMVectorGetIntW(FXMVECTOR V) noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + return V.vector4_u32[3]; +#elif defined(_XM_ARM_NEON_INTRINSICS_) + return vgetq_lane_u32(V, 3); +#elif defined(_XM_SSE4_INTRINSICS_) + __m128i V1 = _mm_castps_si128(V); + return static_cast(_mm_extract_epi32(V1, 3)); +#elif defined(_XM_SSE_INTRINSICS_) + __m128i vResulti = _mm_shuffle_epi32(_mm_castps_si128(V), _MM_SHUFFLE(3, 3, 3, 3)); + return static_cast(_mm_cvtsi128_si32(vResulti)); +#endif +} + +//------------------------------------------------------------------------------ + +// Store a component indexed by i into a 32 bit integer location in memory. +_Use_decl_annotations_ +inline void XM_CALLCONV XMVectorGetIntByIndexPtr(uint32_t* x, FXMVECTOR V, size_t i) noexcept +{ + assert(x != nullptr); + assert(i < 4); + _Analysis_assume_(i < 4); +#if defined(_XM_NO_INTRINSICS_) + *x = V.vector4_u32[i]; +#else + XMVECTORU32 U; + U.v = V; + *x = U.u[i]; +#endif +} + +//------------------------------------------------------------------------------ + +// Store the X component into a 32 bit integer location in memory. +_Use_decl_annotations_ +inline void XM_CALLCONV XMVectorGetIntXPtr(uint32_t* x, FXMVECTOR V) noexcept +{ + assert(x != nullptr); +#if defined(_XM_NO_INTRINSICS_) + *x = V.vector4_u32[0]; +#elif defined(_XM_ARM_NEON_INTRINSICS_) + vst1q_lane_u32(x, *reinterpret_cast(&V), 0); +#elif defined(_XM_SSE_INTRINSICS_) + _mm_store_ss(reinterpret_cast(x), V); +#endif +} + +// Store the Y component into a 32 bit integer location in memory. +_Use_decl_annotations_ +inline void XM_CALLCONV XMVectorGetIntYPtr(uint32_t* y, FXMVECTOR V) noexcept +{ + assert(y != nullptr); +#if defined(_XM_NO_INTRINSICS_) + *y = V.vector4_u32[1]; +#elif defined(_XM_ARM_NEON_INTRINSICS_) + vst1q_lane_u32(y, *reinterpret_cast(&V), 1); +#elif defined(_XM_SSE4_INTRINSICS_) + __m128i V1 = _mm_castps_si128(V); + *y = static_cast(_mm_extract_epi32(V1, 1)); +#elif defined(_XM_SSE_INTRINSICS_) + XMVECTOR vResult = XM_PERMUTE_PS(V, _MM_SHUFFLE(1, 1, 1, 1)); + _mm_store_ss(reinterpret_cast(y), vResult); +#endif +} + +// Store the Z component into a 32 bit integer locaCantion in memory. +_Use_decl_annotations_ +inline void XM_CALLCONV XMVectorGetIntZPtr(uint32_t* z, FXMVECTOR V) noexcept +{ + assert(z != nullptr); +#if defined(_XM_NO_INTRINSICS_) + *z = V.vector4_u32[2]; +#elif defined(_XM_ARM_NEON_INTRINSICS_) + vst1q_lane_u32(z, *reinterpret_cast(&V), 2); +#elif defined(_XM_SSE4_INTRINSICS_) + __m128i V1 = _mm_castps_si128(V); + *z = static_cast(_mm_extract_epi32(V1, 2)); +#elif defined(_XM_SSE_INTRINSICS_) + XMVECTOR vResult = XM_PERMUTE_PS(V, _MM_SHUFFLE(2, 2, 2, 2)); + _mm_store_ss(reinterpret_cast(z), vResult); +#endif +} + +// Store the W component into a 32 bit integer location in memory. +_Use_decl_annotations_ +inline void XM_CALLCONV XMVectorGetIntWPtr(uint32_t* w, FXMVECTOR V) noexcept +{ + assert(w != nullptr); +#if defined(_XM_NO_INTRINSICS_) + *w = V.vector4_u32[3]; +#elif defined(_XM_ARM_NEON_INTRINSICS_) + vst1q_lane_u32(w, *reinterpret_cast(&V), 3); +#elif defined(_XM_SSE4_INTRINSICS_) + __m128i V1 = _mm_castps_si128(V); + *w = static_cast(_mm_extract_epi32(V1, 3)); +#elif defined(_XM_SSE_INTRINSICS_) + XMVECTOR vResult = XM_PERMUTE_PS(V, _MM_SHUFFLE(3, 3, 3, 3)); + _mm_store_ss(reinterpret_cast(w), vResult); +#endif +} + +//------------------------------------------------------------------------------ + +// Set a single indexed floating point component +inline XMVECTOR XM_CALLCONV XMVectorSetByIndex(FXMVECTOR V, float f, size_t i) noexcept +{ + assert(i < 4); + _Analysis_assume_(i < 4); + XMVECTORF32 U; + U.v = V; + U.f[i] = f; + return U.v; +} + +//------------------------------------------------------------------------------ + +// Sets the X component of a vector to a passed floating point value +inline XMVECTOR XM_CALLCONV XMVectorSetX(FXMVECTOR V, float x) noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + XMVECTORF32 U = { { { + x, + V.vector4_f32[1], + V.vector4_f32[2], + V.vector4_f32[3] + } } }; + return U.v; +#elif defined(_XM_ARM_NEON_INTRINSICS_) + return vsetq_lane_f32(x, V, 0); +#elif defined(_XM_SSE_INTRINSICS_) + XMVECTOR vResult = _mm_set_ss(x); + vResult = _mm_move_ss(V, vResult); + return vResult; +#endif +} + +// Sets the Y component of a vector to a passed floating point value +inline XMVECTOR XM_CALLCONV XMVectorSetY(FXMVECTOR V, float y) noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + XMVECTORF32 U = { { { + V.vector4_f32[0], + y, + V.vector4_f32[2], + V.vector4_f32[3] + } } }; + return U.v; +#elif defined(_XM_ARM_NEON_INTRINSICS_) + return vsetq_lane_f32(y, V, 1); +#elif defined(_XM_SSE4_INTRINSICS_) + XMVECTOR vResult = _mm_set_ss(y); + vResult = _mm_insert_ps(V, vResult, 0x10); + return vResult; +#elif defined(_XM_SSE_INTRINSICS_) + // Swap y and x + XMVECTOR vResult = XM_PERMUTE_PS(V, _MM_SHUFFLE(3, 2, 0, 1)); + // Convert input to vector + XMVECTOR vTemp = _mm_set_ss(y); + // Replace the x component + vResult = _mm_move_ss(vResult, vTemp); + // Swap y and x again + vResult = XM_PERMUTE_PS(vResult, _MM_SHUFFLE(3, 2, 0, 1)); + return vResult; +#endif +} +// Sets the Z component of a vector to a passed floating point value +inline XMVECTOR XM_CALLCONV XMVectorSetZ(FXMVECTOR V, float z) noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + XMVECTORF32 U = { { { + V.vector4_f32[0], + V.vector4_f32[1], + z, + V.vector4_f32[3] + } } }; + return U.v; +#elif defined(_XM_ARM_NEON_INTRINSICS_) + return vsetq_lane_f32(z, V, 2); +#elif defined(_XM_SSE4_INTRINSICS_) + XMVECTOR vResult = _mm_set_ss(z); + vResult = _mm_insert_ps(V, vResult, 0x20); + return vResult; +#elif defined(_XM_SSE_INTRINSICS_) + // Swap z and x + XMVECTOR vResult = XM_PERMUTE_PS(V, _MM_SHUFFLE(3, 0, 1, 2)); + // Convert input to vector + XMVECTOR vTemp = _mm_set_ss(z); + // Replace the x component + vResult = _mm_move_ss(vResult, vTemp); + // Swap z and x again + vResult = XM_PERMUTE_PS(vResult, _MM_SHUFFLE(3, 0, 1, 2)); + return vResult; +#endif +} + +// Sets the W component of a vector to a passed floating point value +inline XMVECTOR XM_CALLCONV XMVectorSetW(FXMVECTOR V, float w) noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + XMVECTORF32 U = { { { + V.vector4_f32[0], + V.vector4_f32[1], + V.vector4_f32[2], + w + } } }; + return U.v; +#elif defined(_XM_ARM_NEON_INTRINSICS_) + return vsetq_lane_f32(w, V, 3); +#elif defined(_XM_SSE4_INTRINSICS_) + XMVECTOR vResult = _mm_set_ss(w); + vResult = _mm_insert_ps(V, vResult, 0x30); + return vResult; +#elif defined(_XM_SSE_INTRINSICS_) + // Swap w and x + XMVECTOR vResult = XM_PERMUTE_PS(V, _MM_SHUFFLE(0, 2, 1, 3)); + // Convert input to vector + XMVECTOR vTemp = _mm_set_ss(w); + // Replace the x component + vResult = _mm_move_ss(vResult, vTemp); + // Swap w and x again + vResult = XM_PERMUTE_PS(vResult, _MM_SHUFFLE(0, 2, 1, 3)); + return vResult; +#endif +} + +//------------------------------------------------------------------------------ + +// Sets a component of a vector to a floating point value passed by pointer +_Use_decl_annotations_ +inline XMVECTOR XM_CALLCONV XMVectorSetByIndexPtr(FXMVECTOR V, const float* f, size_t i) noexcept +{ + assert(f != nullptr); + assert(i < 4); + _Analysis_assume_(i < 4); + XMVECTORF32 U; + U.v = V; + U.f[i] = *f; + return U.v; +} + +//------------------------------------------------------------------------------ + +// Sets the X component of a vector to a floating point value passed by pointer +_Use_decl_annotations_ +inline XMVECTOR XM_CALLCONV XMVectorSetXPtr(FXMVECTOR V, const float* x) noexcept +{ + assert(x != nullptr); +#if defined(_XM_NO_INTRINSICS_) + XMVECTORF32 U = { { { + *x, + V.vector4_f32[1], + V.vector4_f32[2], + V.vector4_f32[3] + } } }; + return U.v; +#elif defined(_XM_ARM_NEON_INTRINSICS_) + return vld1q_lane_f32(x, V, 0); +#elif defined(_XM_SSE_INTRINSICS_) + XMVECTOR vResult = _mm_load_ss(x); + vResult = _mm_move_ss(V, vResult); + return vResult; +#endif +} + +// Sets the Y component of a vector to a floating point value passed by pointer +_Use_decl_annotations_ +inline XMVECTOR XM_CALLCONV XMVectorSetYPtr(FXMVECTOR V, const float* y) noexcept +{ + assert(y != nullptr); +#if defined(_XM_NO_INTRINSICS_) + XMVECTORF32 U = { { { + V.vector4_f32[0], + *y, + V.vector4_f32[2], + V.vector4_f32[3] + } } }; + return U.v; +#elif defined(_XM_ARM_NEON_INTRINSICS_) + return vld1q_lane_f32(y, V, 1); +#elif defined(_XM_SSE_INTRINSICS_) + // Swap y and x + XMVECTOR vResult = XM_PERMUTE_PS(V, _MM_SHUFFLE(3, 2, 0, 1)); + // Convert input to vector + XMVECTOR vTemp = _mm_load_ss(y); + // Replace the x component + vResult = _mm_move_ss(vResult, vTemp); + // Swap y and x again + vResult = XM_PERMUTE_PS(vResult, _MM_SHUFFLE(3, 2, 0, 1)); + return vResult; +#endif +} + +// Sets the Z component of a vector to a floating point value passed by pointer +_Use_decl_annotations_ +inline XMVECTOR XM_CALLCONV XMVectorSetZPtr(FXMVECTOR V, const float* z) noexcept +{ + assert(z != nullptr); +#if defined(_XM_NO_INTRINSICS_) + XMVECTORF32 U = { { { + V.vector4_f32[0], + V.vector4_f32[1], + *z, + V.vector4_f32[3] + } } }; + return U.v; +#elif defined(_XM_ARM_NEON_INTRINSICS_) + return vld1q_lane_f32(z, V, 2); +#elif defined(_XM_SSE_INTRINSICS_) + // Swap z and x + XMVECTOR vResult = XM_PERMUTE_PS(V, _MM_SHUFFLE(3, 0, 1, 2)); + // Convert input to vector + XMVECTOR vTemp = _mm_load_ss(z); + // Replace the x component + vResult = _mm_move_ss(vResult, vTemp); + // Swap z and x again + vResult = XM_PERMUTE_PS(vResult, _MM_SHUFFLE(3, 0, 1, 2)); + return vResult; +#endif +} + +// Sets the W component of a vector to a floating point value passed by pointer +_Use_decl_annotations_ +inline XMVECTOR XM_CALLCONV XMVectorSetWPtr(FXMVECTOR V, const float* w) noexcept +{ + assert(w != nullptr); +#if defined(_XM_NO_INTRINSICS_) + XMVECTORF32 U = { { { + V.vector4_f32[0], + V.vector4_f32[1], + V.vector4_f32[2], + *w + } } }; + return U.v; +#elif defined(_XM_ARM_NEON_INTRINSICS_) + return vld1q_lane_f32(w, V, 3); +#elif defined(_XM_SSE_INTRINSICS_) + // Swap w and x + XMVECTOR vResult = XM_PERMUTE_PS(V, _MM_SHUFFLE(0, 2, 1, 3)); + // Convert input to vector + XMVECTOR vTemp = _mm_load_ss(w); + // Replace the x component + vResult = _mm_move_ss(vResult, vTemp); + // Swap w and x again + vResult = XM_PERMUTE_PS(vResult, _MM_SHUFFLE(0, 2, 1, 3)); + return vResult; +#endif +} + +//------------------------------------------------------------------------------ + +// Sets a component of a vector to an integer passed by value +inline XMVECTOR XM_CALLCONV XMVectorSetIntByIndex(FXMVECTOR V, uint32_t x, size_t i) noexcept +{ + assert(i < 4); + _Analysis_assume_(i < 4); + XMVECTORU32 tmp; + tmp.v = V; + tmp.u[i] = x; + return tmp; +} + +//------------------------------------------------------------------------------ + +// Sets the X component of a vector to an integer passed by value +inline XMVECTOR XM_CALLCONV XMVectorSetIntX(FXMVECTOR V, uint32_t x) noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + XMVECTORU32 U = { { { + x, + V.vector4_u32[1], + V.vector4_u32[2], + V.vector4_u32[3] + } } }; + return U.v; +#elif defined(_XM_ARM_NEON_INTRINSICS_) + return vsetq_lane_u32(x, V, 0); +#elif defined(_XM_SSE_INTRINSICS_) + __m128i vTemp = _mm_cvtsi32_si128(static_cast(x)); + XMVECTOR vResult = _mm_move_ss(V, _mm_castsi128_ps(vTemp)); + return vResult; +#endif +} + +// Sets the Y component of a vector to an integer passed by value +inline XMVECTOR XM_CALLCONV XMVectorSetIntY(FXMVECTOR V, uint32_t y) noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + XMVECTORU32 U = { { { + V.vector4_u32[0], + y, + V.vector4_u32[2], + V.vector4_u32[3] + } } }; + return U.v; +#elif defined(_XM_ARM_NEON_INTRINSICS_) + return vsetq_lane_u32(y, V, 1); +#elif defined(_XM_SSE4_INTRINSICS_) + __m128i vResult = _mm_castps_si128(V); + vResult = _mm_insert_epi32(vResult, static_cast(y), 1); + return _mm_castsi128_ps(vResult); +#elif defined(_XM_SSE_INTRINSICS_) + // Swap y and x + XMVECTOR vResult = XM_PERMUTE_PS(V, _MM_SHUFFLE(3, 2, 0, 1)); + // Convert input to vector + __m128i vTemp = _mm_cvtsi32_si128(static_cast(y)); + // Replace the x component + vResult = _mm_move_ss(vResult, _mm_castsi128_ps(vTemp)); + // Swap y and x again + vResult = XM_PERMUTE_PS(vResult, _MM_SHUFFLE(3, 2, 0, 1)); + return vResult; +#endif +} + +// Sets the Z component of a vector to an integer passed by value +inline XMVECTOR XM_CALLCONV XMVectorSetIntZ(FXMVECTOR V, uint32_t z) noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + XMVECTORU32 U = { { { + V.vector4_u32[0], + V.vector4_u32[1], + z, + V.vector4_u32[3] + } } }; + return U.v; +#elif defined(_XM_ARM_NEON_INTRINSICS_) + return vsetq_lane_u32(z, V, 2); +#elif defined(_XM_SSE4_INTRINSICS_) + __m128i vResult = _mm_castps_si128(V); + vResult = _mm_insert_epi32(vResult, static_cast(z), 2); + return _mm_castsi128_ps(vResult); +#elif defined(_XM_SSE_INTRINSICS_) + // Swap z and x + XMVECTOR vResult = XM_PERMUTE_PS(V, _MM_SHUFFLE(3, 0, 1, 2)); + // Convert input to vector + __m128i vTemp = _mm_cvtsi32_si128(static_cast(z)); + // Replace the x component + vResult = _mm_move_ss(vResult, _mm_castsi128_ps(vTemp)); + // Swap z and x again + vResult = XM_PERMUTE_PS(vResult, _MM_SHUFFLE(3, 0, 1, 2)); + return vResult; +#endif +} + +// Sets the W component of a vector to an integer passed by value +inline XMVECTOR XM_CALLCONV XMVectorSetIntW(FXMVECTOR V, uint32_t w) noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + XMVECTORU32 U = { { { + V.vector4_u32[0], + V.vector4_u32[1], + V.vector4_u32[2], + w + } } }; + return U.v; +#elif defined(_XM_ARM_NEON_INTRINSICS_) + return vsetq_lane_u32(w, V, 3); +#elif defined(_XM_SSE4_INTRINSICS_) + __m128i vResult = _mm_castps_si128(V); + vResult = _mm_insert_epi32(vResult, static_cast(w), 3); + return _mm_castsi128_ps(vResult); +#elif defined(_XM_SSE_INTRINSICS_) + // Swap w and x + XMVECTOR vResult = XM_PERMUTE_PS(V, _MM_SHUFFLE(0, 2, 1, 3)); + // Convert input to vector + __m128i vTemp = _mm_cvtsi32_si128(static_cast(w)); + // Replace the x component + vResult = _mm_move_ss(vResult, _mm_castsi128_ps(vTemp)); + // Swap w and x again + vResult = XM_PERMUTE_PS(vResult, _MM_SHUFFLE(0, 2, 1, 3)); + return vResult; +#endif +} + +//------------------------------------------------------------------------------ + +// Sets a component of a vector to an integer value passed by pointer +_Use_decl_annotations_ +inline XMVECTOR XM_CALLCONV XMVectorSetIntByIndexPtr(FXMVECTOR V, const uint32_t* x, size_t i) noexcept +{ + assert(x != nullptr); + assert(i < 4); + _Analysis_assume_(i < 4); + XMVECTORU32 tmp; + tmp.v = V; + tmp.u[i] = *x; + return tmp; +} + +//------------------------------------------------------------------------------ + +// Sets the X component of a vector to an integer value passed by pointer +_Use_decl_annotations_ +inline XMVECTOR XM_CALLCONV XMVectorSetIntXPtr(FXMVECTOR V, const uint32_t* x) noexcept +{ + assert(x != nullptr); +#if defined(_XM_NO_INTRINSICS_) + XMVECTORU32 U = { { { + *x, + V.vector4_u32[1], + V.vector4_u32[2], + V.vector4_u32[3] + } } }; + return U.v; +#elif defined(_XM_ARM_NEON_INTRINSICS_) + return vld1q_lane_u32(x, *reinterpret_cast(&V), 0); +#elif defined(_XM_SSE_INTRINSICS_) + XMVECTOR vTemp = _mm_load_ss(reinterpret_cast(x)); + XMVECTOR vResult = _mm_move_ss(V, vTemp); + return vResult; +#endif +} + +// Sets the Y component of a vector to an integer value passed by pointer +_Use_decl_annotations_ +inline XMVECTOR XM_CALLCONV XMVectorSetIntYPtr(FXMVECTOR V, const uint32_t* y) noexcept +{ + assert(y != nullptr); +#if defined(_XM_NO_INTRINSICS_) + XMVECTORU32 U = { { { + V.vector4_u32[0], + *y, + V.vector4_u32[2], + V.vector4_u32[3] + } } }; + return U.v; +#elif defined(_XM_ARM_NEON_INTRINSICS_) + return vld1q_lane_u32(y, *reinterpret_cast(&V), 1); +#elif defined(_XM_SSE_INTRINSICS_) + // Swap y and x + XMVECTOR vResult = XM_PERMUTE_PS(V, _MM_SHUFFLE(3, 2, 0, 1)); + // Convert input to vector + XMVECTOR vTemp = _mm_load_ss(reinterpret_cast(y)); + // Replace the x component + vResult = _mm_move_ss(vResult, vTemp); + // Swap y and x again + vResult = XM_PERMUTE_PS(vResult, _MM_SHUFFLE(3, 2, 0, 1)); + return vResult; +#endif +} + +// Sets the Z component of a vector to an integer value passed by pointer +_Use_decl_annotations_ +inline XMVECTOR XM_CALLCONV XMVectorSetIntZPtr(FXMVECTOR V, const uint32_t* z) noexcept +{ + assert(z != nullptr); +#if defined(_XM_NO_INTRINSICS_) + XMVECTORU32 U = { { { + V.vector4_u32[0], + V.vector4_u32[1], + *z, + V.vector4_u32[3] + } } }; + return U.v; +#elif defined(_XM_ARM_NEON_INTRINSICS_) + return vld1q_lane_u32(z, *reinterpret_cast(&V), 2); +#elif defined(_XM_SSE_INTRINSICS_) + // Swap z and x + XMVECTOR vResult = XM_PERMUTE_PS(V, _MM_SHUFFLE(3, 0, 1, 2)); + // Convert input to vector + XMVECTOR vTemp = _mm_load_ss(reinterpret_cast(z)); + // Replace the x component + vResult = _mm_move_ss(vResult, vTemp); + // Swap z and x again + vResult = XM_PERMUTE_PS(vResult, _MM_SHUFFLE(3, 0, 1, 2)); + return vResult; +#endif +} + +// Sets the W component of a vector to an integer value passed by pointer +_Use_decl_annotations_ +inline XMVECTOR XM_CALLCONV XMVectorSetIntWPtr(FXMVECTOR V, const uint32_t* w) noexcept +{ + assert(w != nullptr); +#if defined(_XM_NO_INTRINSICS_) + XMVECTORU32 U = { { { + V.vector4_u32[0], + V.vector4_u32[1], + V.vector4_u32[2], + *w + } } }; + return U.v; +#elif defined(_XM_ARM_NEON_INTRINSICS_) + return vld1q_lane_u32(w, *reinterpret_cast(&V), 3); +#elif defined(_XM_SSE_INTRINSICS_) + // Swap w and x + XMVECTOR vResult = XM_PERMUTE_PS(V, _MM_SHUFFLE(0, 2, 1, 3)); + // Convert input to vector + XMVECTOR vTemp = _mm_load_ss(reinterpret_cast(w)); + // Replace the x component + vResult = _mm_move_ss(vResult, vTemp); + // Swap w and x again + vResult = XM_PERMUTE_PS(vResult, _MM_SHUFFLE(0, 2, 1, 3)); + return vResult; +#endif +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMVectorSwizzle +( + FXMVECTOR V, + uint32_t E0, + uint32_t E1, + uint32_t E2, + uint32_t E3 +) noexcept +{ + assert((E0 < 4) && (E1 < 4) && (E2 < 4) && (E3 < 4)); + _Analysis_assume_((E0 < 4) && (E1 < 4) && (E2 < 4) && (E3 < 4)); +#if defined(_XM_NO_INTRINSICS_) + + XMVECTORF32 Result = { { { + V.vector4_f32[E0], + V.vector4_f32[E1], + V.vector4_f32[E2], + V.vector4_f32[E3] + } } }; + return Result.v; + +#elif defined(_XM_ARM_NEON_INTRINSICS_) + static const uint32_t ControlElement[4] = + { + 0x03020100, // XM_SWIZZLE_X + 0x07060504, // XM_SWIZZLE_Y + 0x0B0A0908, // XM_SWIZZLE_Z + 0x0F0E0D0C, // XM_SWIZZLE_W + }; + + uint8x8x2_t tbl; + tbl.val[0] = vget_low_f32(V); + tbl.val[1] = vget_high_f32(V); + + uint32x2_t idx = vcreate_u32(static_cast(ControlElement[E0]) | (static_cast(ControlElement[E1]) << 32)); + const uint8x8_t rL = vtbl2_u8(tbl, vreinterpret_u8_u32(idx)); + + idx = vcreate_u32(static_cast(ControlElement[E2]) | (static_cast(ControlElement[E3]) << 32)); + const uint8x8_t rH = vtbl2_u8(tbl, vreinterpret_u8_u32(idx)); + + return vcombine_f32(rL, rH); +#elif defined(_XM_AVX_INTRINSICS_) + unsigned int elem[4] = { E0, E1, E2, E3 }; + __m128i vControl = _mm_loadu_si128(reinterpret_cast(&elem[0])); + return _mm_permutevar_ps(V, vControl); +#else + auto aPtr = reinterpret_cast(&V); + + XMVECTOR Result; + auto pWork = reinterpret_cast(&Result); + + pWork[0] = aPtr[E0]; + pWork[1] = aPtr[E1]; + pWork[2] = aPtr[E2]; + pWork[3] = aPtr[E3]; + + return Result; +#endif +} + +//------------------------------------------------------------------------------ +inline XMVECTOR XM_CALLCONV XMVectorPermute +( + FXMVECTOR V1, + FXMVECTOR V2, + uint32_t PermuteX, + uint32_t PermuteY, + uint32_t PermuteZ, + uint32_t PermuteW +) noexcept +{ + assert(PermuteX <= 7 && PermuteY <= 7 && PermuteZ <= 7 && PermuteW <= 7); + _Analysis_assume_(PermuteX <= 7 && PermuteY <= 7 && PermuteZ <= 7 && PermuteW <= 7); + +#if defined(_XM_ARM_NEON_INTRINSICS_) && !defined(_XM_NO_INTRINSICS_) + static const uint32_t ControlElement[8] = + { + 0x03020100, // XM_PERMUTE_0X + 0x07060504, // XM_PERMUTE_0Y + 0x0B0A0908, // XM_PERMUTE_0Z + 0x0F0E0D0C, // XM_PERMUTE_0W + 0x13121110, // XM_PERMUTE_1X + 0x17161514, // XM_PERMUTE_1Y + 0x1B1A1918, // XM_PERMUTE_1Z + 0x1F1E1D1C, // XM_PERMUTE_1W + }; + + uint8x8x4_t tbl; + tbl.val[0] = vget_low_f32(V1); + tbl.val[1] = vget_high_f32(V1); + tbl.val[2] = vget_low_f32(V2); + tbl.val[3] = vget_high_f32(V2); + + uint32x2_t idx = vcreate_u32(static_cast(ControlElement[PermuteX]) | (static_cast(ControlElement[PermuteY]) << 32)); + const uint8x8_t rL = vtbl4_u8(tbl, vreinterpret_u8_u32(idx)); + + idx = vcreate_u32(static_cast(ControlElement[PermuteZ]) | (static_cast(ControlElement[PermuteW]) << 32)); + const uint8x8_t rH = vtbl4_u8(tbl, vreinterpret_u8_u32(idx)); + + return vcombine_f32(rL, rH); +#elif defined(_XM_AVX_INTRINSICS_) && !defined(_XM_NO_INTRINSICS_) + static const XMVECTORU32 three = { { { 3, 3, 3, 3 } } }; + + XM_ALIGNED_DATA(16) unsigned int elem[4] = { PermuteX, PermuteY, PermuteZ, PermuteW }; + __m128i vControl = _mm_load_si128(reinterpret_cast(&elem[0])); + + __m128i vSelect = _mm_cmpgt_epi32(vControl, three); + vControl = _mm_castps_si128(_mm_and_ps(_mm_castsi128_ps(vControl), three)); + + __m128 shuffled1 = _mm_permutevar_ps(V1, vControl); + __m128 shuffled2 = _mm_permutevar_ps(V2, vControl); + + __m128 masked1 = _mm_andnot_ps(_mm_castsi128_ps(vSelect), shuffled1); + __m128 masked2 = _mm_and_ps(_mm_castsi128_ps(vSelect), shuffled2); + + return _mm_or_ps(masked1, masked2); +#else + + const uint32_t* aPtr[2]; + aPtr[0] = reinterpret_cast(&V1); + aPtr[1] = reinterpret_cast(&V2); + + XMVECTOR Result; + auto pWork = reinterpret_cast(&Result); + + const uint32_t i0 = PermuteX & 3; + const uint32_t vi0 = PermuteX >> 2; + pWork[0] = aPtr[vi0][i0]; + + const uint32_t i1 = PermuteY & 3; + const uint32_t vi1 = PermuteY >> 2; + pWork[1] = aPtr[vi1][i1]; + + const uint32_t i2 = PermuteZ & 3; + const uint32_t vi2 = PermuteZ >> 2; + pWork[2] = aPtr[vi2][i2]; + + const uint32_t i3 = PermuteW & 3; + const uint32_t vi3 = PermuteW >> 2; + pWork[3] = aPtr[vi3][i3]; + + return Result; +#endif +} + +//------------------------------------------------------------------------------ +// Define a control vector to be used in XMVectorSelect +// operations. The four integers specified in XMVectorSelectControl +// serve as indices to select between components in two vectors. +// The first index controls selection for the first component of +// the vectors involved in a select operation, the second index +// controls selection for the second component etc. A value of +// zero for an index causes the corresponding component from the first +// vector to be selected whereas a one causes the component from the +// second vector to be selected instead. + +inline XMVECTOR XM_CALLCONV XMVectorSelectControl +( + uint32_t VectorIndex0, + uint32_t VectorIndex1, + uint32_t VectorIndex2, + uint32_t VectorIndex3 +) noexcept +{ +#if defined(_XM_SSE_INTRINSICS_) && !defined(_XM_NO_INTRINSICS_) + // x=Index0,y=Index1,z=Index2,w=Index3 + __m128i vTemp = _mm_set_epi32(static_cast(VectorIndex3), static_cast(VectorIndex2), static_cast(VectorIndex1), static_cast(VectorIndex0)); + // Any non-zero entries become 0xFFFFFFFF else 0 + vTemp = _mm_cmpgt_epi32(vTemp, g_XMZero); + return _mm_castsi128_ps(vTemp); +#elif defined(_XM_ARM_NEON_INTRINSICS_) && !defined(_XM_NO_INTRINSICS_) + int32x2_t V0 = vcreate_s32(static_cast(VectorIndex0) | (static_cast(VectorIndex1) << 32)); + int32x2_t V1 = vcreate_s32(static_cast(VectorIndex2) | (static_cast(VectorIndex3) << 32)); + int32x4_t vTemp = vcombine_s32(V0, V1); + // Any non-zero entries become 0xFFFFFFFF else 0 + return vcgtq_s32(vTemp, g_XMZero); +#else + XMVECTOR ControlVector; + const uint32_t ControlElement[] = + { + XM_SELECT_0, + XM_SELECT_1 + }; + + assert(VectorIndex0 < 2); + assert(VectorIndex1 < 2); + assert(VectorIndex2 < 2); + assert(VectorIndex3 < 2); + _Analysis_assume_(VectorIndex0 < 2); + _Analysis_assume_(VectorIndex1 < 2); + _Analysis_assume_(VectorIndex2 < 2); + _Analysis_assume_(VectorIndex3 < 2); + + ControlVector.vector4_u32[0] = ControlElement[VectorIndex0]; + ControlVector.vector4_u32[1] = ControlElement[VectorIndex1]; + ControlVector.vector4_u32[2] = ControlElement[VectorIndex2]; + ControlVector.vector4_u32[3] = ControlElement[VectorIndex3]; + + return ControlVector; + +#endif +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMVectorSelect +( + FXMVECTOR V1, + FXMVECTOR V2, + FXMVECTOR Control +) noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + + XMVECTORU32 Result = { { { + (V1.vector4_u32[0] & ~Control.vector4_u32[0]) | (V2.vector4_u32[0] & Control.vector4_u32[0]), + (V1.vector4_u32[1] & ~Control.vector4_u32[1]) | (V2.vector4_u32[1] & Control.vector4_u32[1]), + (V1.vector4_u32[2] & ~Control.vector4_u32[2]) | (V2.vector4_u32[2] & Control.vector4_u32[2]), + (V1.vector4_u32[3] & ~Control.vector4_u32[3]) | (V2.vector4_u32[3] & Control.vector4_u32[3]), + } } }; + return Result.v; + +#elif defined(_XM_ARM_NEON_INTRINSICS_) + return vbslq_f32(Control, V2, V1); +#elif defined(_XM_SSE_INTRINSICS_) + XMVECTOR vTemp1 = _mm_andnot_ps(Control, V1); + XMVECTOR vTemp2 = _mm_and_ps(V2, Control); + return _mm_or_ps(vTemp1, vTemp2); +#endif +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMVectorMergeXY +( + FXMVECTOR V1, + FXMVECTOR V2 +) noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + + XMVECTORU32 Result = { { { + V1.vector4_u32[0], + V2.vector4_u32[0], + V1.vector4_u32[1], + V2.vector4_u32[1], + } } }; + return Result.v; + +#elif defined(_XM_ARM_NEON_INTRINSICS_) + return vzipq_f32(V1, V2).val[0]; +#elif defined(_XM_SSE_INTRINSICS_) + return _mm_unpacklo_ps(V1, V2); +#endif +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMVectorMergeZW +( + FXMVECTOR V1, + FXMVECTOR V2 +) noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + + XMVECTORU32 Result = { { { + V1.vector4_u32[2], + V2.vector4_u32[2], + V1.vector4_u32[3], + V2.vector4_u32[3] + } } }; + return Result.v; + +#elif defined(_XM_ARM_NEON_INTRINSICS_) + return vzipq_f32(V1, V2).val[1]; +#elif defined(_XM_SSE_INTRINSICS_) + return _mm_unpackhi_ps(V1, V2); +#endif +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMVectorShiftLeft(FXMVECTOR V1, FXMVECTOR V2, uint32_t Elements) noexcept +{ + assert(Elements < 4); + _Analysis_assume_(Elements < 4); + return XMVectorPermute(V1, V2, Elements, ((Elements)+1), ((Elements)+2), ((Elements)+3)); +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMVectorRotateLeft(FXMVECTOR V, uint32_t Elements) noexcept +{ + assert(Elements < 4); + _Analysis_assume_(Elements < 4); + return XMVectorSwizzle(V, Elements & 3, (Elements + 1) & 3, (Elements + 2) & 3, (Elements + 3) & 3); +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMVectorRotateRight(FXMVECTOR V, uint32_t Elements) noexcept +{ + assert(Elements < 4); + _Analysis_assume_(Elements < 4); + return XMVectorSwizzle(V, (4 - (Elements)) & 3, (5 - (Elements)) & 3, (6 - (Elements)) & 3, (7 - (Elements)) & 3); +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMVectorInsert( + FXMVECTOR VD, FXMVECTOR VS, + uint32_t VSLeftRotateElements, + uint32_t Select0, uint32_t Select1, uint32_t Select2, uint32_t Select3) noexcept +{ + XMVECTOR Control = XMVectorSelectControl(Select0 & 1, Select1 & 1, Select2 & 1, Select3 & 1); + return XMVectorSelect(VD, XMVectorRotateLeft(VS, VSLeftRotateElements), Control); +} + +//------------------------------------------------------------------------------ +// Comparison operations +//------------------------------------------------------------------------------ + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMVectorEqual +( + FXMVECTOR V1, + FXMVECTOR V2 +) noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + + XMVECTORU32 Control = { { { + (V1.vector4_f32[0] == V2.vector4_f32[0]) ? 0xFFFFFFFF : 0, + (V1.vector4_f32[1] == V2.vector4_f32[1]) ? 0xFFFFFFFF : 0, + (V1.vector4_f32[2] == V2.vector4_f32[2]) ? 0xFFFFFFFF : 0, + (V1.vector4_f32[3] == V2.vector4_f32[3]) ? 0xFFFFFFFF : 0, + } } }; + return Control.v; + +#elif defined(_XM_ARM_NEON_INTRINSICS_) + return vceqq_f32(V1, V2); +#elif defined(_XM_SSE_INTRINSICS_) + return _mm_cmpeq_ps(V1, V2); +#endif +} + +//------------------------------------------------------------------------------ + +_Use_decl_annotations_ +inline XMVECTOR XM_CALLCONV XMVectorEqualR +( + uint32_t* pCR, + FXMVECTOR V1, + FXMVECTOR V2 +) noexcept +{ + assert(pCR != nullptr); +#if defined(_XM_NO_INTRINSICS_) + uint32_t ux = (V1.vector4_f32[0] == V2.vector4_f32[0]) ? 0xFFFFFFFFU : 0; + uint32_t uy = (V1.vector4_f32[1] == V2.vector4_f32[1]) ? 0xFFFFFFFFU : 0; + uint32_t uz = (V1.vector4_f32[2] == V2.vector4_f32[2]) ? 0xFFFFFFFFU : 0; + uint32_t uw = (V1.vector4_f32[3] == V2.vector4_f32[3]) ? 0xFFFFFFFFU : 0; + uint32_t CR = 0; + if (ux & uy & uz & uw) + { + // All elements are greater + CR = XM_CRMASK_CR6TRUE; + } + else if (!(ux | uy | uz | uw)) + { + // All elements are not greater + CR = XM_CRMASK_CR6FALSE; + } + *pCR = CR; + + XMVECTORU32 Control = { { { ux, uy, uz, uw } } }; + return Control; + +#elif defined(_XM_ARM_NEON_INTRINSICS_) + uint32x4_t vResult = vceqq_f32(V1, V2); + uint8x8x2_t vTemp = vzip_u8(vget_low_u8(vResult), vget_high_u8(vResult)); + uint16x4x2_t vTemp2 = vzip_u16(vTemp.val[0], vTemp.val[1]); + uint32_t r = vget_lane_u32(vTemp2.val[1], 1); + uint32_t CR = 0; + if (r == 0xFFFFFFFFU) + { + // All elements are equal + CR = XM_CRMASK_CR6TRUE; + } + else if (!r) + { + // All elements are not equal + CR = XM_CRMASK_CR6FALSE; + } + *pCR = CR; + return vResult; +#elif defined(_XM_SSE_INTRINSICS_) + XMVECTOR vTemp = _mm_cmpeq_ps(V1, V2); + uint32_t CR = 0; + int iTest = _mm_movemask_ps(vTemp); + if (iTest == 0xf) + { + CR = XM_CRMASK_CR6TRUE; + } + else if (!iTest) + { + // All elements are not greater + CR = XM_CRMASK_CR6FALSE; + } + *pCR = CR; + return vTemp; +#endif +} + +//------------------------------------------------------------------------------ +// Treat the components of the vectors as unsigned integers and +// compare individual bits between the two. This is useful for +// comparing control vectors and result vectors returned from +// other comparison operations. + +inline XMVECTOR XM_CALLCONV XMVectorEqualInt +( + FXMVECTOR V1, + FXMVECTOR V2 +) noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + + XMVECTORU32 Control = { { { + (V1.vector4_u32[0] == V2.vector4_u32[0]) ? 0xFFFFFFFF : 0, + (V1.vector4_u32[1] == V2.vector4_u32[1]) ? 0xFFFFFFFF : 0, + (V1.vector4_u32[2] == V2.vector4_u32[2]) ? 0xFFFFFFFF : 0, + (V1.vector4_u32[3] == V2.vector4_u32[3]) ? 0xFFFFFFFF : 0, + } } }; + return Control.v; + +#elif defined(_XM_ARM_NEON_INTRINSICS_) + return vceqq_u32(V1, V2); +#elif defined(_XM_SSE_INTRINSICS_) + __m128i V = _mm_cmpeq_epi32(_mm_castps_si128(V1), _mm_castps_si128(V2)); + return _mm_castsi128_ps(V); +#endif +} + +//------------------------------------------------------------------------------ + +_Use_decl_annotations_ +inline XMVECTOR XM_CALLCONV XMVectorEqualIntR +( + uint32_t* pCR, + FXMVECTOR V1, + FXMVECTOR V2 +) noexcept +{ + assert(pCR != nullptr); +#if defined(_XM_NO_INTRINSICS_) + + XMVECTOR Control = XMVectorEqualInt(V1, V2); + + *pCR = 0; + if (XMVector4EqualInt(Control, XMVectorTrueInt())) + { + // All elements are equal + *pCR |= XM_CRMASK_CR6TRUE; + } + else if (XMVector4EqualInt(Control, XMVectorFalseInt())) + { + // All elements are not equal + *pCR |= XM_CRMASK_CR6FALSE; + } + return Control; + +#elif defined(_XM_ARM_NEON_INTRINSICS_) + uint32x4_t vResult = vceqq_u32(V1, V2); + uint8x8x2_t vTemp = vzip_u8(vget_low_u8(vResult), vget_high_u8(vResult)); + uint16x4x2_t vTemp2 = vzip_u16(vTemp.val[0], vTemp.val[1]); + uint32_t r = vget_lane_u32(vTemp2.val[1], 1); + uint32_t CR = 0; + if (r == 0xFFFFFFFFU) + { + // All elements are equal + CR = XM_CRMASK_CR6TRUE; + } + else if (!r) + { + // All elements are not equal + CR = XM_CRMASK_CR6FALSE; + } + *pCR = CR; + return vResult; +#elif defined(_XM_SSE_INTRINSICS_) + __m128i V = _mm_cmpeq_epi32(_mm_castps_si128(V1), _mm_castps_si128(V2)); + int iTemp = _mm_movemask_ps(_mm_castsi128_ps(V)); + uint32_t CR = 0; + if (iTemp == 0x0F) + { + CR = XM_CRMASK_CR6TRUE; + } + else if (!iTemp) + { + CR = XM_CRMASK_CR6FALSE; + } + *pCR = CR; + return _mm_castsi128_ps(V); +#endif +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMVectorNearEqual +( + FXMVECTOR V1, + FXMVECTOR V2, + FXMVECTOR Epsilon +) noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + + float fDeltax = V1.vector4_f32[0] - V2.vector4_f32[0]; + float fDeltay = V1.vector4_f32[1] - V2.vector4_f32[1]; + float fDeltaz = V1.vector4_f32[2] - V2.vector4_f32[2]; + float fDeltaw = V1.vector4_f32[3] - V2.vector4_f32[3]; + + fDeltax = fabsf(fDeltax); + fDeltay = fabsf(fDeltay); + fDeltaz = fabsf(fDeltaz); + fDeltaw = fabsf(fDeltaw); + + XMVECTORU32 Control = { { { + (fDeltax <= Epsilon.vector4_f32[0]) ? 0xFFFFFFFFU : 0, + (fDeltay <= Epsilon.vector4_f32[1]) ? 0xFFFFFFFFU : 0, + (fDeltaz <= Epsilon.vector4_f32[2]) ? 0xFFFFFFFFU : 0, + (fDeltaw <= Epsilon.vector4_f32[3]) ? 0xFFFFFFFFU : 0, + } } }; + return Control.v; + +#elif defined(_XM_ARM_NEON_INTRINSICS_) + float32x4_t vDelta = vsubq_f32(V1, V2); +#ifdef _MSC_VER + return vacleq_f32(vDelta, Epsilon); +#else + return vcleq_f32(vabsq_f32(vDelta), Epsilon); +#endif +#elif defined(_XM_SSE_INTRINSICS_) + // Get the difference + XMVECTOR vDelta = _mm_sub_ps(V1, V2); + // Get the absolute value of the difference + XMVECTOR vTemp = _mm_setzero_ps(); + vTemp = _mm_sub_ps(vTemp, vDelta); + vTemp = _mm_max_ps(vTemp, vDelta); + vTemp = _mm_cmple_ps(vTemp, Epsilon); + return vTemp; +#endif +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMVectorNotEqual +( + FXMVECTOR V1, + FXMVECTOR V2 +) noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + + XMVECTORU32 Control = { { { + (V1.vector4_f32[0] != V2.vector4_f32[0]) ? 0xFFFFFFFF : 0, + (V1.vector4_f32[1] != V2.vector4_f32[1]) ? 0xFFFFFFFF : 0, + (V1.vector4_f32[2] != V2.vector4_f32[2]) ? 0xFFFFFFFF : 0, + (V1.vector4_f32[3] != V2.vector4_f32[3]) ? 0xFFFFFFFF : 0, + } } }; + return Control.v; + +#elif defined(_XM_ARM_NEON_INTRINSICS_) + return vmvnq_u32(vceqq_f32(V1, V2)); +#elif defined(_XM_SSE_INTRINSICS_) + return _mm_cmpneq_ps(V1, V2); +#endif +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMVectorNotEqualInt +( + FXMVECTOR V1, + FXMVECTOR V2 +) noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + + XMVECTORU32 Control = { { { + (V1.vector4_u32[0] != V2.vector4_u32[0]) ? 0xFFFFFFFFU : 0, + (V1.vector4_u32[1] != V2.vector4_u32[1]) ? 0xFFFFFFFFU : 0, + (V1.vector4_u32[2] != V2.vector4_u32[2]) ? 0xFFFFFFFFU : 0, + (V1.vector4_u32[3] != V2.vector4_u32[3]) ? 0xFFFFFFFFU : 0 + } } }; + return Control.v; + +#elif defined(_XM_ARM_NEON_INTRINSICS_) + return vmvnq_u32(vceqq_u32(V1, V2)); +#elif defined(_XM_SSE_INTRINSICS_) + __m128i V = _mm_cmpeq_epi32(_mm_castps_si128(V1), _mm_castps_si128(V2)); + return _mm_xor_ps(_mm_castsi128_ps(V), g_XMNegOneMask); +#endif +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMVectorGreater +( + FXMVECTOR V1, + FXMVECTOR V2 +) noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + + XMVECTORU32 Control = { { { + (V1.vector4_f32[0] > V2.vector4_f32[0]) ? 0xFFFFFFFF : 0, + (V1.vector4_f32[1] > V2.vector4_f32[1]) ? 0xFFFFFFFF : 0, + (V1.vector4_f32[2] > V2.vector4_f32[2]) ? 0xFFFFFFFF : 0, + (V1.vector4_f32[3] > V2.vector4_f32[3]) ? 0xFFFFFFFF : 0 + } } }; + return Control.v; + +#elif defined(_XM_ARM_NEON_INTRINSICS_) + return vcgtq_f32(V1, V2); +#elif defined(_XM_SSE_INTRINSICS_) + return _mm_cmpgt_ps(V1, V2); +#endif +} + +//------------------------------------------------------------------------------ + +_Use_decl_annotations_ +inline XMVECTOR XM_CALLCONV XMVectorGreaterR +( + uint32_t* pCR, + FXMVECTOR V1, + FXMVECTOR V2 +) noexcept +{ + assert(pCR != nullptr); +#if defined(_XM_NO_INTRINSICS_) + + uint32_t ux = (V1.vector4_f32[0] > V2.vector4_f32[0]) ? 0xFFFFFFFFU : 0; + uint32_t uy = (V1.vector4_f32[1] > V2.vector4_f32[1]) ? 0xFFFFFFFFU : 0; + uint32_t uz = (V1.vector4_f32[2] > V2.vector4_f32[2]) ? 0xFFFFFFFFU : 0; + uint32_t uw = (V1.vector4_f32[3] > V2.vector4_f32[3]) ? 0xFFFFFFFFU : 0; + uint32_t CR = 0; + if (ux & uy & uz & uw) + { + // All elements are greater + CR = XM_CRMASK_CR6TRUE; + } + else if (!(ux | uy | uz | uw)) + { + // All elements are not greater + CR = XM_CRMASK_CR6FALSE; + } + *pCR = CR; + + XMVECTORU32 Control = { { { ux, uy, uz, uw } } }; + return Control.v; + +#elif defined(_XM_ARM_NEON_INTRINSICS_) + uint32x4_t vResult = vcgtq_f32(V1, V2); + uint8x8x2_t vTemp = vzip_u8(vget_low_u8(vResult), vget_high_u8(vResult)); + uint16x4x2_t vTemp2 = vzip_u16(vTemp.val[0], vTemp.val[1]); + uint32_t r = vget_lane_u32(vTemp2.val[1], 1); + uint32_t CR = 0; + if (r == 0xFFFFFFFFU) + { + // All elements are greater + CR = XM_CRMASK_CR6TRUE; + } + else if (!r) + { + // All elements are not greater + CR = XM_CRMASK_CR6FALSE; + } + *pCR = CR; + return vResult; +#elif defined(_XM_SSE_INTRINSICS_) + XMVECTOR vTemp = _mm_cmpgt_ps(V1, V2); + uint32_t CR = 0; + int iTest = _mm_movemask_ps(vTemp); + if (iTest == 0xf) + { + CR = XM_CRMASK_CR6TRUE; + } + else if (!iTest) + { + // All elements are not greater + CR = XM_CRMASK_CR6FALSE; + } + *pCR = CR; + return vTemp; +#endif +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMVectorGreaterOrEqual +( + FXMVECTOR V1, + FXMVECTOR V2 +) noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + + XMVECTORU32 Control = { { { + (V1.vector4_f32[0] >= V2.vector4_f32[0]) ? 0xFFFFFFFF : 0, + (V1.vector4_f32[1] >= V2.vector4_f32[1]) ? 0xFFFFFFFF : 0, + (V1.vector4_f32[2] >= V2.vector4_f32[2]) ? 0xFFFFFFFF : 0, + (V1.vector4_f32[3] >= V2.vector4_f32[3]) ? 0xFFFFFFFF : 0 + } } }; + return Control.v; + +#elif defined(_XM_ARM_NEON_INTRINSICS_) + return vcgeq_f32(V1, V2); +#elif defined(_XM_SSE_INTRINSICS_) + return _mm_cmpge_ps(V1, V2); +#endif +} + +//------------------------------------------------------------------------------ + +_Use_decl_annotations_ +inline XMVECTOR XM_CALLCONV XMVectorGreaterOrEqualR +( + uint32_t* pCR, + FXMVECTOR V1, + FXMVECTOR V2 +) noexcept +{ + assert(pCR != nullptr); +#if defined(_XM_NO_INTRINSICS_) + + uint32_t ux = (V1.vector4_f32[0] >= V2.vector4_f32[0]) ? 0xFFFFFFFFU : 0; + uint32_t uy = (V1.vector4_f32[1] >= V2.vector4_f32[1]) ? 0xFFFFFFFFU : 0; + uint32_t uz = (V1.vector4_f32[2] >= V2.vector4_f32[2]) ? 0xFFFFFFFFU : 0; + uint32_t uw = (V1.vector4_f32[3] >= V2.vector4_f32[3]) ? 0xFFFFFFFFU : 0; + uint32_t CR = 0; + if (ux & uy & uz & uw) + { + // All elements are greater + CR = XM_CRMASK_CR6TRUE; + } + else if (!(ux | uy | uz | uw)) + { + // All elements are not greater + CR = XM_CRMASK_CR6FALSE; + } + *pCR = CR; + + XMVECTORU32 Control = { { { ux, uy, uz, uw } } }; + return Control.v; + +#elif defined(_XM_ARM_NEON_INTRINSICS_) + uint32x4_t vResult = vcgeq_f32(V1, V2); + uint8x8x2_t vTemp = vzip_u8(vget_low_u8(vResult), vget_high_u8(vResult)); + uint16x4x2_t vTemp2 = vzip_u16(vTemp.val[0], vTemp.val[1]); + uint32_t r = vget_lane_u32(vTemp2.val[1], 1); + uint32_t CR = 0; + if (r == 0xFFFFFFFFU) + { + // All elements are greater or equal + CR = XM_CRMASK_CR6TRUE; + } + else if (!r) + { + // All elements are not greater or equal + CR = XM_CRMASK_CR6FALSE; + } + *pCR = CR; + return vResult; +#elif defined(_XM_SSE_INTRINSICS_) + XMVECTOR vTemp = _mm_cmpge_ps(V1, V2); + uint32_t CR = 0; + int iTest = _mm_movemask_ps(vTemp); + if (iTest == 0xf) + { + CR = XM_CRMASK_CR6TRUE; + } + else if (!iTest) + { + // All elements are not greater + CR = XM_CRMASK_CR6FALSE; + } + *pCR = CR; + return vTemp; +#endif +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMVectorLess +( + FXMVECTOR V1, + FXMVECTOR V2 +) noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + + XMVECTORU32 Control = { { { + (V1.vector4_f32[0] < V2.vector4_f32[0]) ? 0xFFFFFFFF : 0, + (V1.vector4_f32[1] < V2.vector4_f32[1]) ? 0xFFFFFFFF : 0, + (V1.vector4_f32[2] < V2.vector4_f32[2]) ? 0xFFFFFFFF : 0, + (V1.vector4_f32[3] < V2.vector4_f32[3]) ? 0xFFFFFFFF : 0 + } } }; + return Control.v; + +#elif defined(_XM_ARM_NEON_INTRINSICS_) + return vcltq_f32(V1, V2); +#elif defined(_XM_SSE_INTRINSICS_) + return _mm_cmplt_ps(V1, V2); +#endif +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMVectorLessOrEqual +( + FXMVECTOR V1, + FXMVECTOR V2 +) noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + + XMVECTORU32 Control = { { { + (V1.vector4_f32[0] <= V2.vector4_f32[0]) ? 0xFFFFFFFF : 0, + (V1.vector4_f32[1] <= V2.vector4_f32[1]) ? 0xFFFFFFFF : 0, + (V1.vector4_f32[2] <= V2.vector4_f32[2]) ? 0xFFFFFFFF : 0, + (V1.vector4_f32[3] <= V2.vector4_f32[3]) ? 0xFFFFFFFF : 0 + } } }; + return Control.v; + +#elif defined(_XM_ARM_NEON_INTRINSICS_) + return vcleq_f32(V1, V2); +#elif defined(_XM_SSE_INTRINSICS_) + return _mm_cmple_ps(V1, V2); +#endif +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMVectorInBounds +( + FXMVECTOR V, + FXMVECTOR Bounds +) noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + + XMVECTORU32 Control = { { { + (V.vector4_f32[0] <= Bounds.vector4_f32[0] && V.vector4_f32[0] >= -Bounds.vector4_f32[0]) ? 0xFFFFFFFF : 0, + (V.vector4_f32[1] <= Bounds.vector4_f32[1] && V.vector4_f32[1] >= -Bounds.vector4_f32[1]) ? 0xFFFFFFFF : 0, + (V.vector4_f32[2] <= Bounds.vector4_f32[2] && V.vector4_f32[2] >= -Bounds.vector4_f32[2]) ? 0xFFFFFFFF : 0, + (V.vector4_f32[3] <= Bounds.vector4_f32[3] && V.vector4_f32[3] >= -Bounds.vector4_f32[3]) ? 0xFFFFFFFF : 0 + } } }; + return Control.v; + +#elif defined(_XM_ARM_NEON_INTRINSICS_) + // Test if less than or equal + XMVECTOR vTemp1 = vcleq_f32(V, Bounds); + // Negate the bounds + XMVECTOR vTemp2 = vnegq_f32(Bounds); + // Test if greater or equal (Reversed) + vTemp2 = vcleq_f32(vTemp2, V); + // Blend answers + vTemp1 = vandq_u32(vTemp1, vTemp2); + return vTemp1; +#elif defined(_XM_SSE_INTRINSICS_) + // Test if less than or equal + XMVECTOR vTemp1 = _mm_cmple_ps(V, Bounds); + // Negate the bounds + XMVECTOR vTemp2 = _mm_mul_ps(Bounds, g_XMNegativeOne); + // Test if greater or equal (Reversed) + vTemp2 = _mm_cmple_ps(vTemp2, V); + // Blend answers + vTemp1 = _mm_and_ps(vTemp1, vTemp2); + return vTemp1; +#endif +} + +//------------------------------------------------------------------------------ + +_Use_decl_annotations_ +inline XMVECTOR XM_CALLCONV XMVectorInBoundsR +( + uint32_t* pCR, + FXMVECTOR V, + FXMVECTOR Bounds +) noexcept +{ + assert(pCR != nullptr); +#if defined(_XM_NO_INTRINSICS_) + + uint32_t ux = (V.vector4_f32[0] <= Bounds.vector4_f32[0] && V.vector4_f32[0] >= -Bounds.vector4_f32[0]) ? 0xFFFFFFFFU : 0; + uint32_t uy = (V.vector4_f32[1] <= Bounds.vector4_f32[1] && V.vector4_f32[1] >= -Bounds.vector4_f32[1]) ? 0xFFFFFFFFU : 0; + uint32_t uz = (V.vector4_f32[2] <= Bounds.vector4_f32[2] && V.vector4_f32[2] >= -Bounds.vector4_f32[2]) ? 0xFFFFFFFFU : 0; + uint32_t uw = (V.vector4_f32[3] <= Bounds.vector4_f32[3] && V.vector4_f32[3] >= -Bounds.vector4_f32[3]) ? 0xFFFFFFFFU : 0; + + uint32_t CR = 0; + if (ux & uy & uz & uw) + { + // All elements are in bounds + CR = XM_CRMASK_CR6BOUNDS; + } + *pCR = CR; + + XMVECTORU32 Control = { { { ux, uy, uz, uw } } }; + return Control.v; + +#elif defined(_XM_ARM_NEON_INTRINSICS_) + // Test if less than or equal + XMVECTOR vTemp1 = vcleq_f32(V, Bounds); + // Negate the bounds + XMVECTOR vTemp2 = vnegq_f32(Bounds); + // Test if greater or equal (Reversed) + vTemp2 = vcleq_f32(vTemp2, V); + // Blend answers + vTemp1 = vandq_u32(vTemp1, vTemp2); + uint8x8x2_t vTemp = vzip_u8(vget_low_u8(vTemp1), vget_high_u8(vTemp1)); + uint16x4x2_t vTemp3 = vzip_u16(vTemp.val[0], vTemp.val[1]); + uint32_t r = vget_lane_u32(vTemp3.val[1], 1); + uint32_t CR = 0; + if (r == 0xFFFFFFFFU) + { + // All elements are in bounds + CR = XM_CRMASK_CR6BOUNDS; + } + *pCR = CR; + return vTemp1; +#elif defined(_XM_SSE_INTRINSICS_) + // Test if less than or equal + XMVECTOR vTemp1 = _mm_cmple_ps(V, Bounds); + // Negate the bounds + XMVECTOR vTemp2 = _mm_mul_ps(Bounds, g_XMNegativeOne); + // Test if greater or equal (Reversed) + vTemp2 = _mm_cmple_ps(vTemp2, V); + // Blend answers + vTemp1 = _mm_and_ps(vTemp1, vTemp2); + + uint32_t CR = 0; + if (_mm_movemask_ps(vTemp1) == 0xf) + { + // All elements are in bounds + CR = XM_CRMASK_CR6BOUNDS; + } + *pCR = CR; + return vTemp1; +#endif +} + +//------------------------------------------------------------------------------ + +#if !defined(_XM_NO_INTRINSICS_) && defined(_MSC_VER) && !defined(__clang__) && !defined(__INTEL_COMPILER) +#pragma float_control(push) +#pragma float_control(precise, on) +#endif + +inline XMVECTOR XM_CALLCONV XMVectorIsNaN(FXMVECTOR V) noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + + XMVECTORU32 Control = { { { + XMISNAN(V.vector4_f32[0]) ? 0xFFFFFFFFU : 0, + XMISNAN(V.vector4_f32[1]) ? 0xFFFFFFFFU : 0, + XMISNAN(V.vector4_f32[2]) ? 0xFFFFFFFFU : 0, + XMISNAN(V.vector4_f32[3]) ? 0xFFFFFFFFU : 0 + } } }; + return Control.v; + +#elif defined(_XM_ARM_NEON_INTRINSICS_) + // Test against itself. NaN is always not equal + uint32x4_t vTempNan = vceqq_f32(V, V); + // Flip results + return vmvnq_u32(vTempNan); +#elif defined(_XM_SSE_INTRINSICS_) + // Test against itself. NaN is always not equal + return _mm_cmpneq_ps(V, V); +#endif +} + +#if !defined(_XM_NO_INTRINSICS_) && defined(_MSC_VER) && !defined(__clang__) && !defined(__INTEL_COMPILER) +#pragma float_control(pop) +#endif + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMVectorIsInfinite(FXMVECTOR V) noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + + XMVECTORU32 Control = { { { + XMISINF(V.vector4_f32[0]) ? 0xFFFFFFFFU : 0, + XMISINF(V.vector4_f32[1]) ? 0xFFFFFFFFU : 0, + XMISINF(V.vector4_f32[2]) ? 0xFFFFFFFFU : 0, + XMISINF(V.vector4_f32[3]) ? 0xFFFFFFFFU : 0 + } } }; + return Control.v; + +#elif defined(_XM_ARM_NEON_INTRINSICS_) + // Mask off the sign bit + uint32x4_t vTemp = vandq_u32(V, g_XMAbsMask); + // Compare to infinity + vTemp = vceqq_f32(vTemp, g_XMInfinity); + // If any are infinity, the signs are true. + return vTemp; +#elif defined(_XM_SSE_INTRINSICS_) + // Mask off the sign bit + __m128 vTemp = _mm_and_ps(V, g_XMAbsMask); + // Compare to infinity + vTemp = _mm_cmpeq_ps(vTemp, g_XMInfinity); + // If any are infinity, the signs are true. + return vTemp; +#endif +} + +//------------------------------------------------------------------------------ +// Rounding and clamping operations +//------------------------------------------------------------------------------ + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMVectorMin +( + FXMVECTOR V1, + FXMVECTOR V2 +) noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + + XMVECTORF32 Result = { { { + (V1.vector4_f32[0] < V2.vector4_f32[0]) ? V1.vector4_f32[0] : V2.vector4_f32[0], + (V1.vector4_f32[1] < V2.vector4_f32[1]) ? V1.vector4_f32[1] : V2.vector4_f32[1], + (V1.vector4_f32[2] < V2.vector4_f32[2]) ? V1.vector4_f32[2] : V2.vector4_f32[2], + (V1.vector4_f32[3] < V2.vector4_f32[3]) ? V1.vector4_f32[3] : V2.vector4_f32[3] + } } }; + return Result.v; + +#elif defined(_XM_ARM_NEON_INTRINSICS_) + return vminq_f32(V1, V2); +#elif defined(_XM_SSE_INTRINSICS_) + return _mm_min_ps(V1, V2); +#endif +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMVectorMax +( + FXMVECTOR V1, + FXMVECTOR V2 +) noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + + XMVECTORF32 Result = { { { + (V1.vector4_f32[0] > V2.vector4_f32[0]) ? V1.vector4_f32[0] : V2.vector4_f32[0], + (V1.vector4_f32[1] > V2.vector4_f32[1]) ? V1.vector4_f32[1] : V2.vector4_f32[1], + (V1.vector4_f32[2] > V2.vector4_f32[2]) ? V1.vector4_f32[2] : V2.vector4_f32[2], + (V1.vector4_f32[3] > V2.vector4_f32[3]) ? V1.vector4_f32[3] : V2.vector4_f32[3] + } } }; + return Result.v; + +#elif defined(_XM_ARM_NEON_INTRINSICS_) + return vmaxq_f32(V1, V2); +#elif defined(_XM_SSE_INTRINSICS_) + return _mm_max_ps(V1, V2); +#endif +} + +//------------------------------------------------------------------------------ + +namespace Internal +{ + // Round to nearest (even) a.k.a. banker's rounding + inline float round_to_nearest(float x) + { + float i = floorf(x); + x -= i; + if (x < 0.5f) + return i; + if (x > 0.5f) + return i + 1.f; + + float int_part; + (void)modff(i / 2.f, &int_part); + if ((2.f * int_part) == i) + { + return i; + } + + return i + 1.f; + } +} + +#if !defined(_XM_NO_INTRINSICS_) && defined(_MSC_VER) && !defined(__clang__) && !defined(__INTEL_COMPILER) +#pragma float_control(push) +#pragma float_control(precise, on) +#endif + +inline XMVECTOR XM_CALLCONV XMVectorRound(FXMVECTOR V) noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + + XMVECTORF32 Result = { { { + Internal::round_to_nearest(V.vector4_f32[0]), + Internal::round_to_nearest(V.vector4_f32[1]), + Internal::round_to_nearest(V.vector4_f32[2]), + Internal::round_to_nearest(V.vector4_f32[3]) + } } }; + return Result.v; + +#elif defined(_XM_ARM_NEON_INTRINSICS_) +#if defined(_M_ARM64) || defined(_M_HYBRID_X86_ARM64) || __aarch64__ + return vrndnq_f32(V); +#else + uint32x4_t sign = vandq_u32(V, g_XMNegativeZero); + uint32x4_t sMagic = vorrq_u32(g_XMNoFraction, sign); + float32x4_t R1 = vaddq_f32(V, sMagic); + R1 = vsubq_f32(R1, sMagic); + float32x4_t R2 = vabsq_f32(V); + uint32x4_t mask = vcleq_f32(R2, g_XMNoFraction); + XMVECTOR vResult = vbslq_f32(mask, R1, V); + return vResult; +#endif +#elif defined(_XM_SSE4_INTRINSICS_) + return _mm_round_ps(V, _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC); +#elif defined(_XM_SSE_INTRINSICS_) + __m128 sign = _mm_and_ps(V, g_XMNegativeZero); + __m128 sMagic = _mm_or_ps(g_XMNoFraction, sign); + __m128 R1 = _mm_add_ps(V, sMagic); + R1 = _mm_sub_ps(R1, sMagic); + __m128 R2 = _mm_and_ps(V, g_XMAbsMask); + __m128 mask = _mm_cmple_ps(R2, g_XMNoFraction); + R2 = _mm_andnot_ps(mask, V); + R1 = _mm_and_ps(R1, mask); + XMVECTOR vResult = _mm_xor_ps(R1, R2); + return vResult; +#endif +} + +#if !defined(_XM_NO_INTRINSICS_) && defined(_MSC_VER) && !defined(__clang__) && !defined(__INTEL_COMPILER) +#pragma float_control(pop) +#endif + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMVectorTruncate(FXMVECTOR V) noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + XMVECTOR Result; + uint32_t i; + + // Avoid C4701 + Result.vector4_f32[0] = 0.0f; + + for (i = 0; i < 4; i++) + { + if (XMISNAN(V.vector4_f32[i])) + { + Result.vector4_u32[i] = 0x7FC00000; + } + else if (fabsf(V.vector4_f32[i]) < 8388608.0f) + { + Result.vector4_f32[i] = static_cast(static_cast(V.vector4_f32[i])); + } + else + { + Result.vector4_f32[i] = V.vector4_f32[i]; + } + } + return Result; + +#elif defined(_XM_ARM_NEON_INTRINSICS_) +#if defined(_M_ARM64) || defined(_M_HYBRID_X86_ARM64) || __aarch64__ + return vrndq_f32(V); +#else + float32x4_t vTest = vabsq_f32(V); + vTest = vcltq_f32(vTest, g_XMNoFraction); + + int32x4_t vInt = vcvtq_s32_f32(V); + XMVECTOR vResult = vcvtq_f32_s32(vInt); + + // All numbers less than 8388608 will use the round to int + // All others, use the ORIGINAL value + return vbslq_f32(vTest, vResult, V); +#endif +#elif defined(_XM_SSE4_INTRINSICS_) + return _mm_round_ps(V, _MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC); +#elif defined(_XM_SSE_INTRINSICS_) + // To handle NAN, INF and numbers greater than 8388608, use masking + // Get the abs value + __m128i vTest = _mm_and_si128(_mm_castps_si128(V), g_XMAbsMask); + // Test for greater than 8388608 (All floats with NO fractionals, NAN and INF + vTest = _mm_cmplt_epi32(vTest, g_XMNoFraction); + // Convert to int and back to float for rounding with truncation + __m128i vInt = _mm_cvttps_epi32(V); + // Convert back to floats + XMVECTOR vResult = _mm_cvtepi32_ps(vInt); + // All numbers less than 8388608 will use the round to int + vResult = _mm_and_ps(vResult, _mm_castsi128_ps(vTest)); + // All others, use the ORIGINAL value + vTest = _mm_andnot_si128(vTest, _mm_castps_si128(V)); + vResult = _mm_or_ps(vResult, _mm_castsi128_ps(vTest)); + return vResult; +#endif +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMVectorFloor(FXMVECTOR V) noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + XMVECTORF32 Result = { { { + floorf(V.vector4_f32[0]), + floorf(V.vector4_f32[1]), + floorf(V.vector4_f32[2]), + floorf(V.vector4_f32[3]) + } } }; + return Result.v; +#elif defined(_XM_ARM_NEON_INTRINSICS_) +#if defined(_M_ARM64) || defined(_M_HYBRID_X86_ARM64) || __aarch64__ + return vrndmq_f32(V); +#else + float32x4_t vTest = vabsq_f32(V); + vTest = vcltq_f32(vTest, g_XMNoFraction); + // Truncate + int32x4_t vInt = vcvtq_s32_f32(V); + XMVECTOR vResult = vcvtq_f32_s32(vInt); + XMVECTOR vLarger = vcgtq_f32(vResult, V); + // 0 -> 0, 0xffffffff -> -1.0f + vLarger = vcvtq_f32_s32(vLarger); + vResult = vaddq_f32(vResult, vLarger); + // All numbers less than 8388608 will use the round to int + // All others, use the ORIGINAL value + return vbslq_f32(vTest, vResult, V); +#endif +#elif defined(_XM_SSE4_INTRINSICS_) + return _mm_floor_ps(V); +#elif defined(_XM_SSE_INTRINSICS_) + // To handle NAN, INF and numbers greater than 8388608, use masking + __m128i vTest = _mm_and_si128(_mm_castps_si128(V), g_XMAbsMask); + vTest = _mm_cmplt_epi32(vTest, g_XMNoFraction); + // Truncate + __m128i vInt = _mm_cvttps_epi32(V); + XMVECTOR vResult = _mm_cvtepi32_ps(vInt); + __m128 vLarger = _mm_cmpgt_ps(vResult, V); + // 0 -> 0, 0xffffffff -> -1.0f + vLarger = _mm_cvtepi32_ps(_mm_castps_si128(vLarger)); + vResult = _mm_add_ps(vResult, vLarger); + // All numbers less than 8388608 will use the round to int + vResult = _mm_and_ps(vResult, _mm_castsi128_ps(vTest)); + // All others, use the ORIGINAL value + vTest = _mm_andnot_si128(vTest, _mm_castps_si128(V)); + vResult = _mm_or_ps(vResult, _mm_castsi128_ps(vTest)); + return vResult; +#endif +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMVectorCeiling(FXMVECTOR V) noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + XMVECTORF32 Result = { { { + ceilf(V.vector4_f32[0]), + ceilf(V.vector4_f32[1]), + ceilf(V.vector4_f32[2]), + ceilf(V.vector4_f32[3]) + } } }; + return Result.v; +#elif defined(_XM_ARM_NEON_INTRINSICS_) +#if defined(_M_ARM64) || defined(_M_HYBRID_X86_ARM64) || __aarch64__ + return vrndpq_f32(V); +#else + float32x4_t vTest = vabsq_f32(V); + vTest = vcltq_f32(vTest, g_XMNoFraction); + // Truncate + int32x4_t vInt = vcvtq_s32_f32(V); + XMVECTOR vResult = vcvtq_f32_s32(vInt); + XMVECTOR vSmaller = vcltq_f32(vResult, V); + // 0 -> 0, 0xffffffff -> -1.0f + vSmaller = vcvtq_f32_s32(vSmaller); + vResult = vsubq_f32(vResult, vSmaller); + // All numbers less than 8388608 will use the round to int + // All others, use the ORIGINAL value + return vbslq_f32(vTest, vResult, V); +#endif +#elif defined(_XM_SSE4_INTRINSICS_) + return _mm_ceil_ps(V); +#elif defined(_XM_SSE_INTRINSICS_) + // To handle NAN, INF and numbers greater than 8388608, use masking + __m128i vTest = _mm_and_si128(_mm_castps_si128(V), g_XMAbsMask); + vTest = _mm_cmplt_epi32(vTest, g_XMNoFraction); + // Truncate + __m128i vInt = _mm_cvttps_epi32(V); + XMVECTOR vResult = _mm_cvtepi32_ps(vInt); + __m128 vSmaller = _mm_cmplt_ps(vResult, V); + // 0 -> 0, 0xffffffff -> -1.0f + vSmaller = _mm_cvtepi32_ps(_mm_castps_si128(vSmaller)); + vResult = _mm_sub_ps(vResult, vSmaller); + // All numbers less than 8388608 will use the round to int + vResult = _mm_and_ps(vResult, _mm_castsi128_ps(vTest)); + // All others, use the ORIGINAL value + vTest = _mm_andnot_si128(vTest, _mm_castps_si128(V)); + vResult = _mm_or_ps(vResult, _mm_castsi128_ps(vTest)); + return vResult; +#endif +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMVectorClamp +( + FXMVECTOR V, + FXMVECTOR Min, + FXMVECTOR Max +) noexcept +{ + assert(XMVector4LessOrEqual(Min, Max)); + +#if defined(_XM_NO_INTRINSICS_) + + XMVECTOR Result; + Result = XMVectorMax(Min, V); + Result = XMVectorMin(Max, Result); + return Result; + +#elif defined(_XM_ARM_NEON_INTRINSICS_) + XMVECTOR vResult; + vResult = vmaxq_f32(Min, V); + vResult = vminq_f32(Max, vResult); + return vResult; +#elif defined(_XM_SSE_INTRINSICS_) + XMVECTOR vResult; + vResult = _mm_max_ps(Min, V); + vResult = _mm_min_ps(Max, vResult); + return vResult; +#endif +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMVectorSaturate(FXMVECTOR V) noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + + const XMVECTOR Zero = XMVectorZero(); + + return XMVectorClamp(V, Zero, g_XMOne.v); + +#elif defined(_XM_ARM_NEON_INTRINSICS_) + // Set <0 to 0 + XMVECTOR vResult = vmaxq_f32(V, vdupq_n_f32(0)); + // Set>1 to 1 + return vminq_f32(vResult, vdupq_n_f32(1.0f)); +#elif defined(_XM_SSE_INTRINSICS_) + // Set <0 to 0 + XMVECTOR vResult = _mm_max_ps(V, g_XMZero); + // Set>1 to 1 + return _mm_min_ps(vResult, g_XMOne); +#endif +} + +//------------------------------------------------------------------------------ +// Bitwise logical operations +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMVectorAndInt +( + FXMVECTOR V1, + FXMVECTOR V2 +) noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + + XMVECTORU32 Result = { { { + V1.vector4_u32[0] & V2.vector4_u32[0], + V1.vector4_u32[1] & V2.vector4_u32[1], + V1.vector4_u32[2] & V2.vector4_u32[2], + V1.vector4_u32[3] & V2.vector4_u32[3] + } } }; + return Result; + +#elif defined(_XM_ARM_NEON_INTRINSICS_) + return vandq_u32(V1, V2); +#elif defined(_XM_SSE_INTRINSICS_) + return _mm_and_ps(V1, V2); +#endif +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMVectorAndCInt +( + FXMVECTOR V1, + FXMVECTOR V2 +) noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + + XMVECTORU32 Result = { { { + V1.vector4_u32[0] & ~V2.vector4_u32[0], + V1.vector4_u32[1] & ~V2.vector4_u32[1], + V1.vector4_u32[2] & ~V2.vector4_u32[2], + V1.vector4_u32[3] & ~V2.vector4_u32[3] + } } }; + return Result.v; + +#elif defined(_XM_ARM_NEON_INTRINSICS_) + return vbicq_u32(V1, V2); +#elif defined(_XM_SSE_INTRINSICS_) + __m128i V = _mm_andnot_si128(_mm_castps_si128(V2), _mm_castps_si128(V1)); + return _mm_castsi128_ps(V); +#endif +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMVectorOrInt +( + FXMVECTOR V1, + FXMVECTOR V2 +) noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + + XMVECTORU32 Result = { { { + V1.vector4_u32[0] | V2.vector4_u32[0], + V1.vector4_u32[1] | V2.vector4_u32[1], + V1.vector4_u32[2] | V2.vector4_u32[2], + V1.vector4_u32[3] | V2.vector4_u32[3] + } } }; + return Result.v; + +#elif defined(_XM_ARM_NEON_INTRINSICS_) + return vorrq_u32(V1, V2); +#elif defined(_XM_SSE_INTRINSICS_) + __m128i V = _mm_or_si128(_mm_castps_si128(V1), _mm_castps_si128(V2)); + return _mm_castsi128_ps(V); +#endif +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMVectorNorInt +( + FXMVECTOR V1, + FXMVECTOR V2 +) noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + + XMVECTORU32 Result = { { { + ~(V1.vector4_u32[0] | V2.vector4_u32[0]), + ~(V1.vector4_u32[1] | V2.vector4_u32[1]), + ~(V1.vector4_u32[2] | V2.vector4_u32[2]), + ~(V1.vector4_u32[3] | V2.vector4_u32[3]) + } } }; + return Result.v; + +#elif defined(_XM_ARM_NEON_INTRINSICS_) + uint32x4_t Result = vorrq_u32(V1, V2); + return vbicq_u32(g_XMNegOneMask, Result); +#elif defined(_XM_SSE_INTRINSICS_) + __m128i Result; + Result = _mm_or_si128(_mm_castps_si128(V1), _mm_castps_si128(V2)); + Result = _mm_andnot_si128(Result, g_XMNegOneMask); + return _mm_castsi128_ps(Result); +#endif +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMVectorXorInt +( + FXMVECTOR V1, + FXMVECTOR V2 +) noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + + XMVECTORU32 Result = { { { + V1.vector4_u32[0] ^ V2.vector4_u32[0], + V1.vector4_u32[1] ^ V2.vector4_u32[1], + V1.vector4_u32[2] ^ V2.vector4_u32[2], + V1.vector4_u32[3] ^ V2.vector4_u32[3] + } } }; + return Result.v; + +#elif defined(_XM_ARM_NEON_INTRINSICS_) + return veorq_u32(V1, V2); +#elif defined(_XM_SSE_INTRINSICS_) + __m128i V = _mm_xor_si128(_mm_castps_si128(V1), _mm_castps_si128(V2)); + return _mm_castsi128_ps(V); +#endif +} + +//------------------------------------------------------------------------------ +// Computation operations +//------------------------------------------------------------------------------ + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMVectorNegate(FXMVECTOR V) noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + + XMVECTORF32 Result = { { { + -V.vector4_f32[0], + -V.vector4_f32[1], + -V.vector4_f32[2], + -V.vector4_f32[3] + } } }; + return Result.v; + +#elif defined(_XM_ARM_NEON_INTRINSICS_) + return vnegq_f32(V); +#elif defined(_XM_SSE_INTRINSICS_) + XMVECTOR Z; + + Z = _mm_setzero_ps(); + + return _mm_sub_ps(Z, V); +#endif +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMVectorAdd +( + FXMVECTOR V1, + FXMVECTOR V2 +) noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + + XMVECTORF32 Result = { { { + V1.vector4_f32[0] + V2.vector4_f32[0], + V1.vector4_f32[1] + V2.vector4_f32[1], + V1.vector4_f32[2] + V2.vector4_f32[2], + V1.vector4_f32[3] + V2.vector4_f32[3] + } } }; + return Result.v; + +#elif defined(_XM_ARM_NEON_INTRINSICS_) + return vaddq_f32(V1, V2); +#elif defined(_XM_SSE_INTRINSICS_) + return _mm_add_ps(V1, V2); +#endif +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMVectorSum(FXMVECTOR V) noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + + XMVECTORF32 Result; + Result.f[0] = + Result.f[1] = + Result.f[2] = + Result.f[3] = V.vector4_f32[0] + V.vector4_f32[1] + V.vector4_f32[2] + V.vector4_f32[3]; + return Result.v; + +#elif defined(_XM_ARM_NEON_INTRINSICS_) +#if defined(_M_ARM64) || defined(_M_HYBRID_X86_ARM64) || __aarch64__ + XMVECTOR vTemp = vpaddq_f32(V, V); + return vpaddq_f32(vTemp, vTemp); +#else + float32x2_t v1 = vget_low_f32(V); + float32x2_t v2 = vget_high_f32(V); + v1 = vadd_f32(v1, v2); + v1 = vpadd_f32(v1, v1); + return vcombine_f32(v1, v1); +#endif +#elif defined(_XM_SSE3_INTRINSICS_) + XMVECTOR vTemp = _mm_hadd_ps(V, V); + return _mm_hadd_ps(vTemp, vTemp); +#elif defined(_XM_SSE_INTRINSICS_) + XMVECTOR vTemp = XM_PERMUTE_PS(V, _MM_SHUFFLE(2, 3, 0, 1)); + XMVECTOR vTemp2 = _mm_add_ps(V, vTemp); + vTemp = XM_PERMUTE_PS(vTemp2, _MM_SHUFFLE(1, 0, 3, 2)); + return _mm_add_ps(vTemp, vTemp2); +#endif +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMVectorAddAngles +( + FXMVECTOR V1, + FXMVECTOR V2 +) noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + + const XMVECTOR Zero = XMVectorZero(); + + // Add the given angles together. If the range of V1 is such + // that -Pi <= V1 < Pi and the range of V2 is such that + // -2Pi <= V2 <= 2Pi, then the range of the resulting angle + // will be -Pi <= Result < Pi. + XMVECTOR Result = XMVectorAdd(V1, V2); + + XMVECTOR Mask = XMVectorLess(Result, g_XMNegativePi.v); + XMVECTOR Offset = XMVectorSelect(Zero, g_XMTwoPi.v, Mask); + + Mask = XMVectorGreaterOrEqual(Result, g_XMPi.v); + Offset = XMVectorSelect(Offset, g_XMNegativeTwoPi.v, Mask); + + Result = XMVectorAdd(Result, Offset); + + return Result; + +#elif defined(_XM_ARM_NEON_INTRINSICS_) + // Adjust the angles + XMVECTOR vResult = vaddq_f32(V1, V2); + // Less than Pi? + uint32x4_t vOffset = vcltq_f32(vResult, g_XMNegativePi); + vOffset = vandq_u32(vOffset, g_XMTwoPi); + // Add 2Pi to all entries less than -Pi + vResult = vaddq_f32(vResult, vOffset); + // Greater than or equal to Pi? + vOffset = vcgeq_f32(vResult, g_XMPi); + vOffset = vandq_u32(vOffset, g_XMTwoPi); + // Sub 2Pi to all entries greater than Pi + vResult = vsubq_f32(vResult, vOffset); + return vResult; +#elif defined(_XM_SSE_INTRINSICS_) + // Adjust the angles + XMVECTOR vResult = _mm_add_ps(V1, V2); + // Less than Pi? + XMVECTOR vOffset = _mm_cmplt_ps(vResult, g_XMNegativePi); + vOffset = _mm_and_ps(vOffset, g_XMTwoPi); + // Add 2Pi to all entries less than -Pi + vResult = _mm_add_ps(vResult, vOffset); + // Greater than or equal to Pi? + vOffset = _mm_cmpge_ps(vResult, g_XMPi); + vOffset = _mm_and_ps(vOffset, g_XMTwoPi); + // Sub 2Pi to all entries greater than Pi + vResult = _mm_sub_ps(vResult, vOffset); + return vResult; +#endif +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMVectorSubtract +( + FXMVECTOR V1, + FXMVECTOR V2 +) noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + + XMVECTORF32 Result = { { { + V1.vector4_f32[0] - V2.vector4_f32[0], + V1.vector4_f32[1] - V2.vector4_f32[1], + V1.vector4_f32[2] - V2.vector4_f32[2], + V1.vector4_f32[3] - V2.vector4_f32[3] + } } }; + return Result.v; + +#elif defined(_XM_ARM_NEON_INTRINSICS_) + return vsubq_f32(V1, V2); +#elif defined(_XM_SSE_INTRINSICS_) + return _mm_sub_ps(V1, V2); +#endif +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMVectorSubtractAngles +( + FXMVECTOR V1, + FXMVECTOR V2 +) noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + + const XMVECTOR Zero = XMVectorZero(); + + // Subtract the given angles. If the range of V1 is such + // that -Pi <= V1 < Pi and the range of V2 is such that + // -2Pi <= V2 <= 2Pi, then the range of the resulting angle + // will be -Pi <= Result < Pi. + XMVECTOR Result = XMVectorSubtract(V1, V2); + + XMVECTOR Mask = XMVectorLess(Result, g_XMNegativePi.v); + XMVECTOR Offset = XMVectorSelect(Zero, g_XMTwoPi.v, Mask); + + Mask = XMVectorGreaterOrEqual(Result, g_XMPi.v); + Offset = XMVectorSelect(Offset, g_XMNegativeTwoPi.v, Mask); + + Result = XMVectorAdd(Result, Offset); + + return Result; + +#elif defined(_XM_ARM_NEON_INTRINSICS_) + // Adjust the angles + XMVECTOR vResult = vsubq_f32(V1, V2); + // Less than Pi? + uint32x4_t vOffset = vcltq_f32(vResult, g_XMNegativePi); + vOffset = vandq_u32(vOffset, g_XMTwoPi); + // Add 2Pi to all entries less than -Pi + vResult = vaddq_f32(vResult, vOffset); + // Greater than or equal to Pi? + vOffset = vcgeq_f32(vResult, g_XMPi); + vOffset = vandq_u32(vOffset, g_XMTwoPi); + // Sub 2Pi to all entries greater than Pi + vResult = vsubq_f32(vResult, vOffset); + return vResult; +#elif defined(_XM_SSE_INTRINSICS_) + // Adjust the angles + XMVECTOR vResult = _mm_sub_ps(V1, V2); + // Less than Pi? + XMVECTOR vOffset = _mm_cmplt_ps(vResult, g_XMNegativePi); + vOffset = _mm_and_ps(vOffset, g_XMTwoPi); + // Add 2Pi to all entries less than -Pi + vResult = _mm_add_ps(vResult, vOffset); + // Greater than or equal to Pi? + vOffset = _mm_cmpge_ps(vResult, g_XMPi); + vOffset = _mm_and_ps(vOffset, g_XMTwoPi); + // Sub 2Pi to all entries greater than Pi + vResult = _mm_sub_ps(vResult, vOffset); + return vResult; +#endif +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMVectorMultiply +( + FXMVECTOR V1, + FXMVECTOR V2 +) noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + XMVECTORF32 Result = { { { + V1.vector4_f32[0] * V2.vector4_f32[0], + V1.vector4_f32[1] * V2.vector4_f32[1], + V1.vector4_f32[2] * V2.vector4_f32[2], + V1.vector4_f32[3] * V2.vector4_f32[3] + } } }; + return Result.v; +#elif defined(_XM_ARM_NEON_INTRINSICS_) + return vmulq_f32(V1, V2); +#elif defined(_XM_SSE_INTRINSICS_) + return _mm_mul_ps(V1, V2); +#endif +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMVectorMultiplyAdd +( + FXMVECTOR V1, + FXMVECTOR V2, + FXMVECTOR V3 +) noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + XMVECTORF32 Result = { { { + V1.vector4_f32[0] * V2.vector4_f32[0] + V3.vector4_f32[0], + V1.vector4_f32[1] * V2.vector4_f32[1] + V3.vector4_f32[1], + V1.vector4_f32[2] * V2.vector4_f32[2] + V3.vector4_f32[2], + V1.vector4_f32[3] * V2.vector4_f32[3] + V3.vector4_f32[3] + } } }; + return Result.v; +#elif defined(_XM_ARM_NEON_INTRINSICS_) +#if defined(_M_ARM64) || defined(_M_HYBRID_X86_ARM64) || __aarch64__ + return vfmaq_f32(V3, V1, V2); +#else + return vmlaq_f32(V3, V1, V2); +#endif +#elif defined(_XM_SSE_INTRINSICS_) + return XM_FMADD_PS(V1, V2, V3); +#endif +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMVectorDivide +( + FXMVECTOR V1, + FXMVECTOR V2 +) noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + XMVECTORF32 Result = { { { + V1.vector4_f32[0] / V2.vector4_f32[0], + V1.vector4_f32[1] / V2.vector4_f32[1], + V1.vector4_f32[2] / V2.vector4_f32[2], + V1.vector4_f32[3] / V2.vector4_f32[3] + } } }; + return Result.v; +#elif defined(_XM_ARM_NEON_INTRINSICS_) +#if defined(_M_ARM64) || defined(_M_HYBRID_X86_ARM64) || __aarch64__ + return vdivq_f32(V1, V2); +#else + // 2 iterations of Newton-Raphson refinement of reciprocal + float32x4_t Reciprocal = vrecpeq_f32(V2); + float32x4_t S = vrecpsq_f32(Reciprocal, V2); + Reciprocal = vmulq_f32(S, Reciprocal); + S = vrecpsq_f32(Reciprocal, V2); + Reciprocal = vmulq_f32(S, Reciprocal); + return vmulq_f32(V1, Reciprocal); +#endif +#elif defined(_XM_SSE_INTRINSICS_) + return _mm_div_ps(V1, V2); +#endif +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMVectorNegativeMultiplySubtract +( + FXMVECTOR V1, + FXMVECTOR V2, + FXMVECTOR V3 +) noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + XMVECTORF32 Result = { { { + V3.vector4_f32[0] - (V1.vector4_f32[0] * V2.vector4_f32[0]), + V3.vector4_f32[1] - (V1.vector4_f32[1] * V2.vector4_f32[1]), + V3.vector4_f32[2] - (V1.vector4_f32[2] * V2.vector4_f32[2]), + V3.vector4_f32[3] - (V1.vector4_f32[3] * V2.vector4_f32[3]) + } } }; + return Result; +#elif defined(_XM_ARM_NEON_INTRINSICS_) +#if defined(_M_ARM64) || defined(_M_HYBRID_X86_ARM64) || __aarch64__ + return vfmsq_f32(V3, V1, V2); +#else + return vmlsq_f32(V3, V1, V2); +#endif +#elif defined(_XM_SSE_INTRINSICS_) + return XM_FNMADD_PS(V1, V2, V3); +#endif +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMVectorScale +( + FXMVECTOR V, + float ScaleFactor +) noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + XMVECTORF32 Result = { { { + V.vector4_f32[0] * ScaleFactor, + V.vector4_f32[1] * ScaleFactor, + V.vector4_f32[2] * ScaleFactor, + V.vector4_f32[3] * ScaleFactor + } } }; + return Result.v; +#elif defined(_XM_ARM_NEON_INTRINSICS_) + return vmulq_n_f32(V, ScaleFactor); +#elif defined(_XM_SSE_INTRINSICS_) + XMVECTOR vResult = _mm_set_ps1(ScaleFactor); + return _mm_mul_ps(vResult, V); +#endif +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMVectorReciprocalEst(FXMVECTOR V) noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + XMVECTORF32 Result = { { { + 1.f / V.vector4_f32[0], + 1.f / V.vector4_f32[1], + 1.f / V.vector4_f32[2], + 1.f / V.vector4_f32[3] + } } }; + return Result.v; +#elif defined(_XM_ARM_NEON_INTRINSICS_) + return vrecpeq_f32(V); +#elif defined(_XM_SSE_INTRINSICS_) + return _mm_rcp_ps(V); +#endif +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMVectorReciprocal(FXMVECTOR V) noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + XMVECTORF32 Result = { { { + 1.f / V.vector4_f32[0], + 1.f / V.vector4_f32[1], + 1.f / V.vector4_f32[2], + 1.f / V.vector4_f32[3] + } } }; + return Result.v; +#elif defined(_XM_ARM_NEON_INTRINSICS_) +#if defined(_M_ARM64) || defined(_M_HYBRID_X86_ARM64) || __aarch64__ + float32x4_t one = vdupq_n_f32(1.0f); + return vdivq_f32(one, V); +#else + // 2 iterations of Newton-Raphson refinement + float32x4_t Reciprocal = vrecpeq_f32(V); + float32x4_t S = vrecpsq_f32(Reciprocal, V); + Reciprocal = vmulq_f32(S, Reciprocal); + S = vrecpsq_f32(Reciprocal, V); + return vmulq_f32(S, Reciprocal); +#endif +#elif defined(_XM_SSE_INTRINSICS_) + return _mm_div_ps(g_XMOne, V); +#endif +} + +//------------------------------------------------------------------------------ +// Return an estimated square root +inline XMVECTOR XM_CALLCONV XMVectorSqrtEst(FXMVECTOR V) noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + XMVECTORF32 Result = { { { + sqrtf(V.vector4_f32[0]), + sqrtf(V.vector4_f32[1]), + sqrtf(V.vector4_f32[2]), + sqrtf(V.vector4_f32[3]) + } } }; + return Result.v; +#elif defined(_XM_ARM_NEON_INTRINSICS_) + // 1 iteration of Newton-Raphson refinment of sqrt + float32x4_t S0 = vrsqrteq_f32(V); + float32x4_t P0 = vmulq_f32(V, S0); + float32x4_t R0 = vrsqrtsq_f32(P0, S0); + float32x4_t S1 = vmulq_f32(S0, R0); + + XMVECTOR VEqualsInfinity = XMVectorEqualInt(V, g_XMInfinity.v); + XMVECTOR VEqualsZero = XMVectorEqual(V, vdupq_n_f32(0)); + XMVECTOR Result = vmulq_f32(V, S1); + XMVECTOR Select = XMVectorEqualInt(VEqualsInfinity, VEqualsZero); + return XMVectorSelect(V, Result, Select); +#elif defined(_XM_SSE_INTRINSICS_) + return _mm_sqrt_ps(V); +#endif +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMVectorSqrt(FXMVECTOR V) noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + XMVECTORF32 Result = { { { + sqrtf(V.vector4_f32[0]), + sqrtf(V.vector4_f32[1]), + sqrtf(V.vector4_f32[2]), + sqrtf(V.vector4_f32[3]) + } } }; + return Result.v; +#elif defined(_XM_ARM_NEON_INTRINSICS_) + // 3 iterations of Newton-Raphson refinment of sqrt + float32x4_t S0 = vrsqrteq_f32(V); + float32x4_t P0 = vmulq_f32(V, S0); + float32x4_t R0 = vrsqrtsq_f32(P0, S0); + float32x4_t S1 = vmulq_f32(S0, R0); + float32x4_t P1 = vmulq_f32(V, S1); + float32x4_t R1 = vrsqrtsq_f32(P1, S1); + float32x4_t S2 = vmulq_f32(S1, R1); + float32x4_t P2 = vmulq_f32(V, S2); + float32x4_t R2 = vrsqrtsq_f32(P2, S2); + float32x4_t S3 = vmulq_f32(S2, R2); + + XMVECTOR VEqualsInfinity = XMVectorEqualInt(V, g_XMInfinity.v); + XMVECTOR VEqualsZero = XMVectorEqual(V, vdupq_n_f32(0)); + XMVECTOR Result = vmulq_f32(V, S3); + XMVECTOR Select = XMVectorEqualInt(VEqualsInfinity, VEqualsZero); + return XMVectorSelect(V, Result, Select); +#elif defined(_XM_SSE_INTRINSICS_) + return _mm_sqrt_ps(V); +#endif +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMVectorReciprocalSqrtEst(FXMVECTOR V) noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + XMVECTORF32 Result = { { { + 1.f / sqrtf(V.vector4_f32[0]), + 1.f / sqrtf(V.vector4_f32[1]), + 1.f / sqrtf(V.vector4_f32[2]), + 1.f / sqrtf(V.vector4_f32[3]) + } } }; + return Result.v; +#elif defined(_XM_ARM_NEON_INTRINSICS_) + return vrsqrteq_f32(V); +#elif defined(_XM_SSE_INTRINSICS_) + return _mm_rsqrt_ps(V); +#endif +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMVectorReciprocalSqrt(FXMVECTOR V) noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + XMVECTORF32 Result = { { { + 1.f / sqrtf(V.vector4_f32[0]), + 1.f / sqrtf(V.vector4_f32[1]), + 1.f / sqrtf(V.vector4_f32[2]), + 1.f / sqrtf(V.vector4_f32[3]) + } } }; + return Result; +#elif defined(_XM_ARM_NEON_INTRINSICS_) + // 2 iterations of Newton-Raphson refinement of reciprocal + float32x4_t S0 = vrsqrteq_f32(V); + + float32x4_t P0 = vmulq_f32(V, S0); + float32x4_t R0 = vrsqrtsq_f32(P0, S0); + + float32x4_t S1 = vmulq_f32(S0, R0); + float32x4_t P1 = vmulq_f32(V, S1); + float32x4_t R1 = vrsqrtsq_f32(P1, S1); + + return vmulq_f32(S1, R1); +#elif defined(_XM_SSE_INTRINSICS_) + XMVECTOR vResult = _mm_sqrt_ps(V); + vResult = _mm_div_ps(g_XMOne, vResult); + return vResult; +#endif +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMVectorExp2(FXMVECTOR V) noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + + XMVECTORF32 Result = { { { + powf(2.0f, V.vector4_f32[0]), + powf(2.0f, V.vector4_f32[1]), + powf(2.0f, V.vector4_f32[2]), + powf(2.0f, V.vector4_f32[3]) + } } }; + return Result; + +#elif defined(_XM_ARM_NEON_INTRINSICS_) + int32x4_t itrunc = vcvtq_s32_f32(V); + float32x4_t ftrunc = vcvtq_f32_s32(itrunc); + float32x4_t y = vsubq_f32(V, ftrunc); + + float32x4_t poly = vmlaq_f32(g_XMExpEst6, g_XMExpEst7, y); + poly = vmlaq_f32(g_XMExpEst5, poly, y); + poly = vmlaq_f32(g_XMExpEst4, poly, y); + poly = vmlaq_f32(g_XMExpEst3, poly, y); + poly = vmlaq_f32(g_XMExpEst2, poly, y); + poly = vmlaq_f32(g_XMExpEst1, poly, y); + poly = vmlaq_f32(g_XMOne, poly, y); + + int32x4_t biased = vaddq_s32(itrunc, g_XMExponentBias); + biased = vshlq_n_s32(biased, 23); + float32x4_t result0 = XMVectorDivide(biased, poly); + + biased = vaddq_s32(itrunc, g_XM253); + biased = vshlq_n_s32(biased, 23); + float32x4_t result1 = XMVectorDivide(biased, poly); + result1 = vmulq_f32(g_XMMinNormal.v, result1); + + // Use selection to handle the cases + // if (V is NaN) -> QNaN; + // else if (V sign bit set) + // if (V > -150) + // if (V.exponent < -126) -> result1 + // else -> result0 + // else -> +0 + // else + // if (V < 128) -> result0 + // else -> +inf + + int32x4_t comp = vcltq_s32(V, g_XMBin128); + float32x4_t result2 = vbslq_f32(comp, result0, g_XMInfinity); + + comp = vcltq_s32(itrunc, g_XMSubnormalExponent); + float32x4_t result3 = vbslq_f32(comp, result1, result0); + + comp = vcltq_s32(V, g_XMBinNeg150); + float32x4_t result4 = vbslq_f32(comp, result3, g_XMZero); + + int32x4_t sign = vandq_s32(V, g_XMNegativeZero); + comp = vceqq_s32(sign, g_XMNegativeZero); + float32x4_t result5 = vbslq_f32(comp, result4, result2); + + int32x4_t t0 = vandq_s32(V, g_XMQNaNTest); + int32x4_t t1 = vandq_s32(V, g_XMInfinity); + t0 = vceqq_s32(t0, g_XMZero); + t1 = vceqq_s32(t1, g_XMInfinity); + int32x4_t isNaN = vbicq_s32(t1, t0); + + float32x4_t vResult = vbslq_f32(isNaN, g_XMQNaN, result5); + return vResult; +#elif defined(_XM_SSE_INTRINSICS_) + __m128i itrunc = _mm_cvttps_epi32(V); + __m128 ftrunc = _mm_cvtepi32_ps(itrunc); + __m128 y = _mm_sub_ps(V, ftrunc); + + __m128 poly = XM_FMADD_PS(g_XMExpEst7, y, g_XMExpEst6); + poly = XM_FMADD_PS(poly, y, g_XMExpEst5); + poly = XM_FMADD_PS(poly, y, g_XMExpEst4); + poly = XM_FMADD_PS(poly, y, g_XMExpEst3); + poly = XM_FMADD_PS(poly, y, g_XMExpEst2); + poly = XM_FMADD_PS(poly, y, g_XMExpEst1); + poly = XM_FMADD_PS(poly, y, g_XMOne); + + __m128i biased = _mm_add_epi32(itrunc, g_XMExponentBias); + biased = _mm_slli_epi32(biased, 23); + __m128 result0 = _mm_div_ps(_mm_castsi128_ps(biased), poly); + + biased = _mm_add_epi32(itrunc, g_XM253); + biased = _mm_slli_epi32(biased, 23); + __m128 result1 = _mm_div_ps(_mm_castsi128_ps(biased), poly); + result1 = _mm_mul_ps(g_XMMinNormal.v, result1); + + // Use selection to handle the cases + // if (V is NaN) -> QNaN; + // else if (V sign bit set) + // if (V > -150) + // if (V.exponent < -126) -> result1 + // else -> result0 + // else -> +0 + // else + // if (V < 128) -> result0 + // else -> +inf + + __m128i comp = _mm_cmplt_epi32(_mm_castps_si128(V), g_XMBin128); + __m128i select0 = _mm_and_si128(comp, _mm_castps_si128(result0)); + __m128i select1 = _mm_andnot_si128(comp, g_XMInfinity); + __m128i result2 = _mm_or_si128(select0, select1); + + comp = _mm_cmplt_epi32(itrunc, g_XMSubnormalExponent); + select1 = _mm_and_si128(comp, _mm_castps_si128(result1)); + select0 = _mm_andnot_si128(comp, _mm_castps_si128(result0)); + __m128i result3 = _mm_or_si128(select0, select1); + + comp = _mm_cmplt_epi32(_mm_castps_si128(V), g_XMBinNeg150); + select0 = _mm_and_si128(comp, result3); + select1 = _mm_andnot_si128(comp, g_XMZero); + __m128i result4 = _mm_or_si128(select0, select1); + + __m128i sign = _mm_and_si128(_mm_castps_si128(V), g_XMNegativeZero); + comp = _mm_cmpeq_epi32(sign, g_XMNegativeZero); + select0 = _mm_and_si128(comp, result4); + select1 = _mm_andnot_si128(comp, result2); + __m128i result5 = _mm_or_si128(select0, select1); + + __m128i t0 = _mm_and_si128(_mm_castps_si128(V), g_XMQNaNTest); + __m128i t1 = _mm_and_si128(_mm_castps_si128(V), g_XMInfinity); + t0 = _mm_cmpeq_epi32(t0, g_XMZero); + t1 = _mm_cmpeq_epi32(t1, g_XMInfinity); + __m128i isNaN = _mm_andnot_si128(t0, t1); + + select0 = _mm_and_si128(isNaN, g_XMQNaN); + select1 = _mm_andnot_si128(isNaN, result5); + __m128i vResult = _mm_or_si128(select0, select1); + + return _mm_castsi128_ps(vResult); +#endif +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMVectorExpE(FXMVECTOR V) noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + + XMVECTORF32 Result = { { { + expf(V.vector4_f32[0]), + expf(V.vector4_f32[1]), + expf(V.vector4_f32[2]), + expf(V.vector4_f32[3]) + } } }; + return Result.v; + +#elif defined(_XM_ARM_NEON_INTRINSICS_) + // expE(V) = exp2(vin*log2(e)) + float32x4_t Ve = vmulq_f32(g_XMLgE, V); + + int32x4_t itrunc = vcvtq_s32_f32(Ve); + float32x4_t ftrunc = vcvtq_f32_s32(itrunc); + float32x4_t y = vsubq_f32(Ve, ftrunc); + + float32x4_t poly = vmlaq_f32(g_XMExpEst6, g_XMExpEst7, y); + poly = vmlaq_f32(g_XMExpEst5, poly, y); + poly = vmlaq_f32(g_XMExpEst4, poly, y); + poly = vmlaq_f32(g_XMExpEst3, poly, y); + poly = vmlaq_f32(g_XMExpEst2, poly, y); + poly = vmlaq_f32(g_XMExpEst1, poly, y); + poly = vmlaq_f32(g_XMOne, poly, y); + + int32x4_t biased = vaddq_s32(itrunc, g_XMExponentBias); + biased = vshlq_n_s32(biased, 23); + float32x4_t result0 = XMVectorDivide(biased, poly); + + biased = vaddq_s32(itrunc, g_XM253); + biased = vshlq_n_s32(biased, 23); + float32x4_t result1 = XMVectorDivide(biased, poly); + result1 = vmulq_f32(g_XMMinNormal.v, result1); + + // Use selection to handle the cases + // if (V is NaN) -> QNaN; + // else if (V sign bit set) + // if (V > -150) + // if (V.exponent < -126) -> result1 + // else -> result0 + // else -> +0 + // else + // if (V < 128) -> result0 + // else -> +inf + + int32x4_t comp = vcltq_s32(Ve, g_XMBin128); + float32x4_t result2 = vbslq_f32(comp, result0, g_XMInfinity); + + comp = vcltq_s32(itrunc, g_XMSubnormalExponent); + float32x4_t result3 = vbslq_f32(comp, result1, result0); + + comp = vcltq_s32(Ve, g_XMBinNeg150); + float32x4_t result4 = vbslq_f32(comp, result3, g_XMZero); + + int32x4_t sign = vandq_s32(Ve, g_XMNegativeZero); + comp = vceqq_s32(sign, g_XMNegativeZero); + float32x4_t result5 = vbslq_f32(comp, result4, result2); + + int32x4_t t0 = vandq_s32(Ve, g_XMQNaNTest); + int32x4_t t1 = vandq_s32(Ve, g_XMInfinity); + t0 = vceqq_s32(t0, g_XMZero); + t1 = vceqq_s32(t1, g_XMInfinity); + int32x4_t isNaN = vbicq_s32(t1, t0); + + float32x4_t vResult = vbslq_f32(isNaN, g_XMQNaN, result5); + return vResult; +#elif defined(_XM_SSE_INTRINSICS_) + // expE(V) = exp2(vin*log2(e)) + __m128 Ve = _mm_mul_ps(g_XMLgE, V); + + __m128i itrunc = _mm_cvttps_epi32(Ve); + __m128 ftrunc = _mm_cvtepi32_ps(itrunc); + __m128 y = _mm_sub_ps(Ve, ftrunc); + + __m128 poly = XM_FMADD_PS(y, g_XMExpEst7, g_XMExpEst6); + poly = XM_FMADD_PS(poly, y, g_XMExpEst5); + poly = XM_FMADD_PS(poly, y, g_XMExpEst4); + poly = XM_FMADD_PS(poly, y, g_XMExpEst3); + poly = XM_FMADD_PS(poly, y, g_XMExpEst2); + poly = XM_FMADD_PS(poly, y, g_XMExpEst1); + poly = XM_FMADD_PS(poly, y, g_XMOne); + + __m128i biased = _mm_add_epi32(itrunc, g_XMExponentBias); + biased = _mm_slli_epi32(biased, 23); + __m128 result0 = _mm_div_ps(_mm_castsi128_ps(biased), poly); + + biased = _mm_add_epi32(itrunc, g_XM253); + biased = _mm_slli_epi32(biased, 23); + __m128 result1 = _mm_div_ps(_mm_castsi128_ps(biased), poly); + result1 = _mm_mul_ps(g_XMMinNormal.v, result1); + + // Use selection to handle the cases + // if (V is NaN) -> QNaN; + // else if (V sign bit set) + // if (V > -150) + // if (V.exponent < -126) -> result1 + // else -> result0 + // else -> +0 + // else + // if (V < 128) -> result0 + // else -> +inf + + __m128i comp = _mm_cmplt_epi32(_mm_castps_si128(Ve), g_XMBin128); + __m128i select0 = _mm_and_si128(comp, _mm_castps_si128(result0)); + __m128i select1 = _mm_andnot_si128(comp, g_XMInfinity); + __m128i result2 = _mm_or_si128(select0, select1); + + comp = _mm_cmplt_epi32(itrunc, g_XMSubnormalExponent); + select1 = _mm_and_si128(comp, _mm_castps_si128(result1)); + select0 = _mm_andnot_si128(comp, _mm_castps_si128(result0)); + __m128i result3 = _mm_or_si128(select0, select1); + + comp = _mm_cmplt_epi32(_mm_castps_si128(Ve), g_XMBinNeg150); + select0 = _mm_and_si128(comp, result3); + select1 = _mm_andnot_si128(comp, g_XMZero); + __m128i result4 = _mm_or_si128(select0, select1); + + __m128i sign = _mm_and_si128(_mm_castps_si128(Ve), g_XMNegativeZero); + comp = _mm_cmpeq_epi32(sign, g_XMNegativeZero); + select0 = _mm_and_si128(comp, result4); + select1 = _mm_andnot_si128(comp, result2); + __m128i result5 = _mm_or_si128(select0, select1); + + __m128i t0 = _mm_and_si128(_mm_castps_si128(Ve), g_XMQNaNTest); + __m128i t1 = _mm_and_si128(_mm_castps_si128(Ve), g_XMInfinity); + t0 = _mm_cmpeq_epi32(t0, g_XMZero); + t1 = _mm_cmpeq_epi32(t1, g_XMInfinity); + __m128i isNaN = _mm_andnot_si128(t0, t1); + + select0 = _mm_and_si128(isNaN, g_XMQNaN); + select1 = _mm_andnot_si128(isNaN, result5); + __m128i vResult = _mm_or_si128(select0, select1); + + return _mm_castsi128_ps(vResult); +#endif +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMVectorExp(FXMVECTOR V) noexcept +{ + return XMVectorExp2(V); +} + +//------------------------------------------------------------------------------ + +#if defined(_XM_SSE_INTRINSICS_) + +namespace Internal +{ + inline __m128i multi_sll_epi32(__m128i value, __m128i count) + { + __m128i v = _mm_shuffle_epi32(value, _MM_SHUFFLE(0, 0, 0, 0)); + __m128i c = _mm_shuffle_epi32(count, _MM_SHUFFLE(0, 0, 0, 0)); + c = _mm_and_si128(c, g_XMMaskX); + __m128i r0 = _mm_sll_epi32(v, c); + + v = _mm_shuffle_epi32(value, _MM_SHUFFLE(1, 1, 1, 1)); + c = _mm_shuffle_epi32(count, _MM_SHUFFLE(1, 1, 1, 1)); + c = _mm_and_si128(c, g_XMMaskX); + __m128i r1 = _mm_sll_epi32(v, c); + + v = _mm_shuffle_epi32(value, _MM_SHUFFLE(2, 2, 2, 2)); + c = _mm_shuffle_epi32(count, _MM_SHUFFLE(2, 2, 2, 2)); + c = _mm_and_si128(c, g_XMMaskX); + __m128i r2 = _mm_sll_epi32(v, c); + + v = _mm_shuffle_epi32(value, _MM_SHUFFLE(3, 3, 3, 3)); + c = _mm_shuffle_epi32(count, _MM_SHUFFLE(3, 3, 3, 3)); + c = _mm_and_si128(c, g_XMMaskX); + __m128i r3 = _mm_sll_epi32(v, c); + + // (r0,r0,r1,r1) + __m128 r01 = _mm_shuffle_ps(_mm_castsi128_ps(r0), _mm_castsi128_ps(r1), _MM_SHUFFLE(0, 0, 0, 0)); + // (r2,r2,r3,r3) + __m128 r23 = _mm_shuffle_ps(_mm_castsi128_ps(r2), _mm_castsi128_ps(r3), _MM_SHUFFLE(0, 0, 0, 0)); + // (r0,r1,r2,r3) + __m128 result = _mm_shuffle_ps(r01, r23, _MM_SHUFFLE(2, 0, 2, 0)); + return _mm_castps_si128(result); + } + + inline __m128i multi_srl_epi32(__m128i value, __m128i count) + { + __m128i v = _mm_shuffle_epi32(value, _MM_SHUFFLE(0, 0, 0, 0)); + __m128i c = _mm_shuffle_epi32(count, _MM_SHUFFLE(0, 0, 0, 0)); + c = _mm_and_si128(c, g_XMMaskX); + __m128i r0 = _mm_srl_epi32(v, c); + + v = _mm_shuffle_epi32(value, _MM_SHUFFLE(1, 1, 1, 1)); + c = _mm_shuffle_epi32(count, _MM_SHUFFLE(1, 1, 1, 1)); + c = _mm_and_si128(c, g_XMMaskX); + __m128i r1 = _mm_srl_epi32(v, c); + + v = _mm_shuffle_epi32(value, _MM_SHUFFLE(2, 2, 2, 2)); + c = _mm_shuffle_epi32(count, _MM_SHUFFLE(2, 2, 2, 2)); + c = _mm_and_si128(c, g_XMMaskX); + __m128i r2 = _mm_srl_epi32(v, c); + + v = _mm_shuffle_epi32(value, _MM_SHUFFLE(3, 3, 3, 3)); + c = _mm_shuffle_epi32(count, _MM_SHUFFLE(3, 3, 3, 3)); + c = _mm_and_si128(c, g_XMMaskX); + __m128i r3 = _mm_srl_epi32(v, c); + + // (r0,r0,r1,r1) + __m128 r01 = _mm_shuffle_ps(_mm_castsi128_ps(r0), _mm_castsi128_ps(r1), _MM_SHUFFLE(0, 0, 0, 0)); + // (r2,r2,r3,r3) + __m128 r23 = _mm_shuffle_ps(_mm_castsi128_ps(r2), _mm_castsi128_ps(r3), _MM_SHUFFLE(0, 0, 0, 0)); + // (r0,r1,r2,r3) + __m128 result = _mm_shuffle_ps(r01, r23, _MM_SHUFFLE(2, 0, 2, 0)); + return _mm_castps_si128(result); + } + + inline __m128i GetLeadingBit(const __m128i value) + { + static const XMVECTORI32 g_XM0000FFFF = { { { 0x0000FFFF, 0x0000FFFF, 0x0000FFFF, 0x0000FFFF } } }; + static const XMVECTORI32 g_XM000000FF = { { { 0x000000FF, 0x000000FF, 0x000000FF, 0x000000FF } } }; + static const XMVECTORI32 g_XM0000000F = { { { 0x0000000F, 0x0000000F, 0x0000000F, 0x0000000F } } }; + static const XMVECTORI32 g_XM00000003 = { { { 0x00000003, 0x00000003, 0x00000003, 0x00000003 } } }; + + __m128i v = value, r, c, b, s; + + c = _mm_cmpgt_epi32(v, g_XM0000FFFF); // c = (v > 0xFFFF) + b = _mm_srli_epi32(c, 31); // b = (c ? 1 : 0) + r = _mm_slli_epi32(b, 4); // r = (b << 4) + v = multi_srl_epi32(v, r); // v = (v >> r) + + c = _mm_cmpgt_epi32(v, g_XM000000FF); // c = (v > 0xFF) + b = _mm_srli_epi32(c, 31); // b = (c ? 1 : 0) + s = _mm_slli_epi32(b, 3); // s = (b << 3) + v = multi_srl_epi32(v, s); // v = (v >> s) + r = _mm_or_si128(r, s); // r = (r | s) + + c = _mm_cmpgt_epi32(v, g_XM0000000F); // c = (v > 0xF) + b = _mm_srli_epi32(c, 31); // b = (c ? 1 : 0) + s = _mm_slli_epi32(b, 2); // s = (b << 2) + v = multi_srl_epi32(v, s); // v = (v >> s) + r = _mm_or_si128(r, s); // r = (r | s) + + c = _mm_cmpgt_epi32(v, g_XM00000003); // c = (v > 0x3) + b = _mm_srli_epi32(c, 31); // b = (c ? 1 : 0) + s = _mm_slli_epi32(b, 1); // s = (b << 1) + v = multi_srl_epi32(v, s); // v = (v >> s) + r = _mm_or_si128(r, s); // r = (r | s) + + s = _mm_srli_epi32(v, 1); + r = _mm_or_si128(r, s); + return r; + } +} // namespace Internal + +#endif // _XM_SSE_INTRINSICS_ + +#if defined(_XM_ARM_NEON_INTRINSICS_) + +namespace Internal +{ + inline int32x4_t GetLeadingBit(const int32x4_t value) + { + static const XMVECTORI32 g_XM0000FFFF = { { { 0x0000FFFF, 0x0000FFFF, 0x0000FFFF, 0x0000FFFF } } }; + static const XMVECTORI32 g_XM000000FF = { { { 0x000000FF, 0x000000FF, 0x000000FF, 0x000000FF } } }; + static const XMVECTORI32 g_XM0000000F = { { { 0x0000000F, 0x0000000F, 0x0000000F, 0x0000000F } } }; + static const XMVECTORI32 g_XM00000003 = { { { 0x00000003, 0x00000003, 0x00000003, 0x00000003 } } }; + + int32x4_t v = value, r, c, b, s; + + c = vcgtq_s32(v, g_XM0000FFFF); // c = (v > 0xFFFF) + b = vshrq_n_u32(c, 31); // b = (c ? 1 : 0) + r = vshlq_n_s32(b, 4); // r = (b << 4) + r = vnegq_s32(r); + v = vshlq_u32(v, r); // v = (v >> r) + + c = vcgtq_s32(v, g_XM000000FF); // c = (v > 0xFF) + b = vshrq_n_u32(c, 31); // b = (c ? 1 : 0) + s = vshlq_n_s32(b, 3); // s = (b << 3) + s = vnegq_s32(s); + v = vshlq_u32(v, s); // v = (v >> s) + r = vorrq_s32(r, s); // r = (r | s) + + c = vcgtq_s32(v, g_XM0000000F); // c = (v > 0xF) + b = vshrq_n_u32(c, 31); // b = (c ? 1 : 0) + s = vshlq_n_s32(b, 2); // s = (b << 2) + s = vnegq_s32(s); + v = vshlq_u32(v, s); // v = (v >> s) + r = vorrq_s32(r, s); // r = (r | s) + + c = vcgtq_s32(v, g_XM00000003); // c = (v > 0x3) + b = vshrq_n_u32(c, 31); // b = (c ? 1 : 0) + s = vshlq_n_s32(b, 1); // s = (b << 1) + s = vnegq_s32(s); + v = vshlq_u32(v, s); // v = (v >> s) + r = vorrq_s32(r, s); // r = (r | s) + + s = vshrq_n_u32(v, 1); + r = vorrq_s32(r, s); + return r; + } + +} // namespace Internal + +#endif + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMVectorLog2(FXMVECTOR V) noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + + const float fScale = 1.4426950f; // (1.0f / logf(2.0f)); + + XMVECTORF32 Result = { { { + logf(V.vector4_f32[0]) * fScale, + logf(V.vector4_f32[1]) * fScale, + logf(V.vector4_f32[2]) * fScale, + logf(V.vector4_f32[3]) * fScale + } } }; + return Result.v; + +#elif defined(_XM_ARM_NEON_INTRINSICS_) + int32x4_t rawBiased = vandq_s32(V, g_XMInfinity); + int32x4_t trailing = vandq_s32(V, g_XMQNaNTest); + int32x4_t isExponentZero = vceqq_s32(g_XMZero, rawBiased); + + // Compute exponent and significand for normals. + int32x4_t biased = vshrq_n_u32(rawBiased, 23); + int32x4_t exponentNor = vsubq_s32(biased, g_XMExponentBias); + int32x4_t trailingNor = trailing; + + // Compute exponent and significand for subnormals. + int32x4_t leading = Internal::GetLeadingBit(trailing); + int32x4_t shift = vsubq_s32(g_XMNumTrailing, leading); + int32x4_t exponentSub = vsubq_s32(g_XMSubnormalExponent, shift); + int32x4_t trailingSub = vshlq_u32(trailing, shift); + trailingSub = vandq_s32(trailingSub, g_XMQNaNTest); + int32x4_t e = vbslq_f32(isExponentZero, exponentSub, exponentNor); + int32x4_t t = vbslq_f32(isExponentZero, trailingSub, trailingNor); + + // Compute the approximation. + int32x4_t tmp = vorrq_s32(g_XMOne, t); + float32x4_t y = vsubq_f32(tmp, g_XMOne); + + float32x4_t log2 = vmlaq_f32(g_XMLogEst6, g_XMLogEst7, y); + log2 = vmlaq_f32(g_XMLogEst5, log2, y); + log2 = vmlaq_f32(g_XMLogEst4, log2, y); + log2 = vmlaq_f32(g_XMLogEst3, log2, y); + log2 = vmlaq_f32(g_XMLogEst2, log2, y); + log2 = vmlaq_f32(g_XMLogEst1, log2, y); + log2 = vmlaq_f32(g_XMLogEst0, log2, y); + log2 = vmlaq_f32(vcvtq_f32_s32(e), log2, y); + + // if (x is NaN) -> QNaN + // else if (V is positive) + // if (V is infinite) -> +inf + // else -> log2(V) + // else + // if (V is zero) -> -inf + // else -> -QNaN + + int32x4_t isInfinite = vandq_s32((V), g_XMAbsMask); + isInfinite = vceqq_s32(isInfinite, g_XMInfinity); + + int32x4_t isGreaterZero = vcgtq_s32((V), g_XMZero); + int32x4_t isNotFinite = vcgtq_s32((V), g_XMInfinity); + int32x4_t isPositive = vbicq_s32(isGreaterZero, isNotFinite); + + int32x4_t isZero = vandq_s32((V), g_XMAbsMask); + isZero = vceqq_s32(isZero, g_XMZero); + + int32x4_t t0 = vandq_s32((V), g_XMQNaNTest); + int32x4_t t1 = vandq_s32((V), g_XMInfinity); + t0 = vceqq_s32(t0, g_XMZero); + t1 = vceqq_s32(t1, g_XMInfinity); + int32x4_t isNaN = vbicq_s32(t1, t0); + + float32x4_t result = vbslq_f32(isInfinite, g_XMInfinity, log2); + tmp = vbslq_f32(isZero, g_XMNegInfinity, g_XMNegQNaN); + result = vbslq_f32(isPositive, result, tmp); + result = vbslq_f32(isNaN, g_XMQNaN, result); + return result; +#elif defined(_XM_SSE_INTRINSICS_) + __m128i rawBiased = _mm_and_si128(_mm_castps_si128(V), g_XMInfinity); + __m128i trailing = _mm_and_si128(_mm_castps_si128(V), g_XMQNaNTest); + __m128i isExponentZero = _mm_cmpeq_epi32(g_XMZero, rawBiased); + + // Compute exponent and significand for normals. + __m128i biased = _mm_srli_epi32(rawBiased, 23); + __m128i exponentNor = _mm_sub_epi32(biased, g_XMExponentBias); + __m128i trailingNor = trailing; + + // Compute exponent and significand for subnormals. + __m128i leading = Internal::GetLeadingBit(trailing); + __m128i shift = _mm_sub_epi32(g_XMNumTrailing, leading); + __m128i exponentSub = _mm_sub_epi32(g_XMSubnormalExponent, shift); + __m128i trailingSub = Internal::multi_sll_epi32(trailing, shift); + trailingSub = _mm_and_si128(trailingSub, g_XMQNaNTest); + + __m128i select0 = _mm_and_si128(isExponentZero, exponentSub); + __m128i select1 = _mm_andnot_si128(isExponentZero, exponentNor); + __m128i e = _mm_or_si128(select0, select1); + + select0 = _mm_and_si128(isExponentZero, trailingSub); + select1 = _mm_andnot_si128(isExponentZero, trailingNor); + __m128i t = _mm_or_si128(select0, select1); + + // Compute the approximation. + __m128i tmp = _mm_or_si128(g_XMOne, t); + __m128 y = _mm_sub_ps(_mm_castsi128_ps(tmp), g_XMOne); + + __m128 log2 = XM_FMADD_PS(g_XMLogEst7, y, g_XMLogEst6); + log2 = XM_FMADD_PS(log2, y, g_XMLogEst5); + log2 = XM_FMADD_PS(log2, y, g_XMLogEst4); + log2 = XM_FMADD_PS(log2, y, g_XMLogEst3); + log2 = XM_FMADD_PS(log2, y, g_XMLogEst2); + log2 = XM_FMADD_PS(log2, y, g_XMLogEst1); + log2 = XM_FMADD_PS(log2, y, g_XMLogEst0); + log2 = XM_FMADD_PS(log2, y, _mm_cvtepi32_ps(e)); + + // if (x is NaN) -> QNaN + // else if (V is positive) + // if (V is infinite) -> +inf + // else -> log2(V) + // else + // if (V is zero) -> -inf + // else -> -QNaN + + __m128i isInfinite = _mm_and_si128(_mm_castps_si128(V), g_XMAbsMask); + isInfinite = _mm_cmpeq_epi32(isInfinite, g_XMInfinity); + + __m128i isGreaterZero = _mm_cmpgt_epi32(_mm_castps_si128(V), g_XMZero); + __m128i isNotFinite = _mm_cmpgt_epi32(_mm_castps_si128(V), g_XMInfinity); + __m128i isPositive = _mm_andnot_si128(isNotFinite, isGreaterZero); + + __m128i isZero = _mm_and_si128(_mm_castps_si128(V), g_XMAbsMask); + isZero = _mm_cmpeq_epi32(isZero, g_XMZero); + + __m128i t0 = _mm_and_si128(_mm_castps_si128(V), g_XMQNaNTest); + __m128i t1 = _mm_and_si128(_mm_castps_si128(V), g_XMInfinity); + t0 = _mm_cmpeq_epi32(t0, g_XMZero); + t1 = _mm_cmpeq_epi32(t1, g_XMInfinity); + __m128i isNaN = _mm_andnot_si128(t0, t1); + + select0 = _mm_and_si128(isInfinite, g_XMInfinity); + select1 = _mm_andnot_si128(isInfinite, _mm_castps_si128(log2)); + __m128i result = _mm_or_si128(select0, select1); + + select0 = _mm_and_si128(isZero, g_XMNegInfinity); + select1 = _mm_andnot_si128(isZero, g_XMNegQNaN); + tmp = _mm_or_si128(select0, select1); + + select0 = _mm_and_si128(isPositive, result); + select1 = _mm_andnot_si128(isPositive, tmp); + result = _mm_or_si128(select0, select1); + + select0 = _mm_and_si128(isNaN, g_XMQNaN); + select1 = _mm_andnot_si128(isNaN, result); + result = _mm_or_si128(select0, select1); + + return _mm_castsi128_ps(result); +#endif +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMVectorLogE(FXMVECTOR V) noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + + XMVECTORF32 Result = { { { + logf(V.vector4_f32[0]), + logf(V.vector4_f32[1]), + logf(V.vector4_f32[2]), + logf(V.vector4_f32[3]) + } } }; + return Result.v; + +#elif defined(_XM_ARM_NEON_INTRINSICS_) + int32x4_t rawBiased = vandq_s32(V, g_XMInfinity); + int32x4_t trailing = vandq_s32(V, g_XMQNaNTest); + int32x4_t isExponentZero = vceqq_s32(g_XMZero, rawBiased); + + // Compute exponent and significand for normals. + int32x4_t biased = vshrq_n_u32(rawBiased, 23); + int32x4_t exponentNor = vsubq_s32(biased, g_XMExponentBias); + int32x4_t trailingNor = trailing; + + // Compute exponent and significand for subnormals. + int32x4_t leading = Internal::GetLeadingBit(trailing); + int32x4_t shift = vsubq_s32(g_XMNumTrailing, leading); + int32x4_t exponentSub = vsubq_s32(g_XMSubnormalExponent, shift); + int32x4_t trailingSub = vshlq_u32(trailing, shift); + trailingSub = vandq_s32(trailingSub, g_XMQNaNTest); + int32x4_t e = vbslq_f32(isExponentZero, exponentSub, exponentNor); + int32x4_t t = vbslq_f32(isExponentZero, trailingSub, trailingNor); + + // Compute the approximation. + int32x4_t tmp = vorrq_s32(g_XMOne, t); + float32x4_t y = vsubq_f32(tmp, g_XMOne); + + float32x4_t log2 = vmlaq_f32(g_XMLogEst6, g_XMLogEst7, y); + log2 = vmlaq_f32(g_XMLogEst5, log2, y); + log2 = vmlaq_f32(g_XMLogEst4, log2, y); + log2 = vmlaq_f32(g_XMLogEst3, log2, y); + log2 = vmlaq_f32(g_XMLogEst2, log2, y); + log2 = vmlaq_f32(g_XMLogEst1, log2, y); + log2 = vmlaq_f32(g_XMLogEst0, log2, y); + log2 = vmlaq_f32(vcvtq_f32_s32(e), log2, y); + + log2 = vmulq_f32(g_XMInvLgE, log2); + + // if (x is NaN) -> QNaN + // else if (V is positive) + // if (V is infinite) -> +inf + // else -> log2(V) + // else + // if (V is zero) -> -inf + // else -> -QNaN + + int32x4_t isInfinite = vandq_s32((V), g_XMAbsMask); + isInfinite = vceqq_s32(isInfinite, g_XMInfinity); + + int32x4_t isGreaterZero = vcgtq_s32((V), g_XMZero); + int32x4_t isNotFinite = vcgtq_s32((V), g_XMInfinity); + int32x4_t isPositive = vbicq_s32(isGreaterZero, isNotFinite); + + int32x4_t isZero = vandq_s32((V), g_XMAbsMask); + isZero = vceqq_s32(isZero, g_XMZero); + + int32x4_t t0 = vandq_s32((V), g_XMQNaNTest); + int32x4_t t1 = vandq_s32((V), g_XMInfinity); + t0 = vceqq_s32(t0, g_XMZero); + t1 = vceqq_s32(t1, g_XMInfinity); + int32x4_t isNaN = vbicq_s32(t1, t0); + + float32x4_t result = vbslq_f32(isInfinite, g_XMInfinity, log2); + tmp = vbslq_f32(isZero, g_XMNegInfinity, g_XMNegQNaN); + result = vbslq_f32(isPositive, result, tmp); + result = vbslq_f32(isNaN, g_XMQNaN, result); + return result; +#elif defined(_XM_SSE_INTRINSICS_) + __m128i rawBiased = _mm_and_si128(_mm_castps_si128(V), g_XMInfinity); + __m128i trailing = _mm_and_si128(_mm_castps_si128(V), g_XMQNaNTest); + __m128i isExponentZero = _mm_cmpeq_epi32(g_XMZero, rawBiased); + + // Compute exponent and significand for normals. + __m128i biased = _mm_srli_epi32(rawBiased, 23); + __m128i exponentNor = _mm_sub_epi32(biased, g_XMExponentBias); + __m128i trailingNor = trailing; + + // Compute exponent and significand for subnormals. + __m128i leading = Internal::GetLeadingBit(trailing); + __m128i shift = _mm_sub_epi32(g_XMNumTrailing, leading); + __m128i exponentSub = _mm_sub_epi32(g_XMSubnormalExponent, shift); + __m128i trailingSub = Internal::multi_sll_epi32(trailing, shift); + trailingSub = _mm_and_si128(trailingSub, g_XMQNaNTest); + + __m128i select0 = _mm_and_si128(isExponentZero, exponentSub); + __m128i select1 = _mm_andnot_si128(isExponentZero, exponentNor); + __m128i e = _mm_or_si128(select0, select1); + + select0 = _mm_and_si128(isExponentZero, trailingSub); + select1 = _mm_andnot_si128(isExponentZero, trailingNor); + __m128i t = _mm_or_si128(select0, select1); + + // Compute the approximation. + __m128i tmp = _mm_or_si128(g_XMOne, t); + __m128 y = _mm_sub_ps(_mm_castsi128_ps(tmp), g_XMOne); + + __m128 log2 = XM_FMADD_PS(g_XMLogEst7, y, g_XMLogEst6); + log2 = XM_FMADD_PS(log2, y, g_XMLogEst5); + log2 = XM_FMADD_PS(log2, y, g_XMLogEst4); + log2 = XM_FMADD_PS(log2, y, g_XMLogEst3); + log2 = XM_FMADD_PS(log2, y, g_XMLogEst2); + log2 = XM_FMADD_PS(log2, y, g_XMLogEst1); + log2 = XM_FMADD_PS(log2, y, g_XMLogEst0); + log2 = XM_FMADD_PS(log2, y, _mm_cvtepi32_ps(e)); + + log2 = _mm_mul_ps(g_XMInvLgE, log2); + + // if (x is NaN) -> QNaN + // else if (V is positive) + // if (V is infinite) -> +inf + // else -> log2(V) + // else + // if (V is zero) -> -inf + // else -> -QNaN + + __m128i isInfinite = _mm_and_si128(_mm_castps_si128(V), g_XMAbsMask); + isInfinite = _mm_cmpeq_epi32(isInfinite, g_XMInfinity); + + __m128i isGreaterZero = _mm_cmpgt_epi32(_mm_castps_si128(V), g_XMZero); + __m128i isNotFinite = _mm_cmpgt_epi32(_mm_castps_si128(V), g_XMInfinity); + __m128i isPositive = _mm_andnot_si128(isNotFinite, isGreaterZero); + + __m128i isZero = _mm_and_si128(_mm_castps_si128(V), g_XMAbsMask); + isZero = _mm_cmpeq_epi32(isZero, g_XMZero); + + __m128i t0 = _mm_and_si128(_mm_castps_si128(V), g_XMQNaNTest); + __m128i t1 = _mm_and_si128(_mm_castps_si128(V), g_XMInfinity); + t0 = _mm_cmpeq_epi32(t0, g_XMZero); + t1 = _mm_cmpeq_epi32(t1, g_XMInfinity); + __m128i isNaN = _mm_andnot_si128(t0, t1); + + select0 = _mm_and_si128(isInfinite, g_XMInfinity); + select1 = _mm_andnot_si128(isInfinite, _mm_castps_si128(log2)); + __m128i result = _mm_or_si128(select0, select1); + + select0 = _mm_and_si128(isZero, g_XMNegInfinity); + select1 = _mm_andnot_si128(isZero, g_XMNegQNaN); + tmp = _mm_or_si128(select0, select1); + + select0 = _mm_and_si128(isPositive, result); + select1 = _mm_andnot_si128(isPositive, tmp); + result = _mm_or_si128(select0, select1); + + select0 = _mm_and_si128(isNaN, g_XMQNaN); + select1 = _mm_andnot_si128(isNaN, result); + result = _mm_or_si128(select0, select1); + + return _mm_castsi128_ps(result); +#endif +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMVectorLog(FXMVECTOR V) noexcept +{ + return XMVectorLog2(V); +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMVectorPow +( + FXMVECTOR V1, + FXMVECTOR V2 +) noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + + XMVECTORF32 Result = { { { + powf(V1.vector4_f32[0], V2.vector4_f32[0]), + powf(V1.vector4_f32[1], V2.vector4_f32[1]), + powf(V1.vector4_f32[2], V2.vector4_f32[2]), + powf(V1.vector4_f32[3], V2.vector4_f32[3]) + } } }; + return Result.v; + +#elif defined(_XM_ARM_NEON_INTRINSICS_) + XMVECTORF32 vResult = { { { + powf(vgetq_lane_f32(V1, 0), vgetq_lane_f32(V2, 0)), + powf(vgetq_lane_f32(V1, 1), vgetq_lane_f32(V2, 1)), + powf(vgetq_lane_f32(V1, 2), vgetq_lane_f32(V2, 2)), + powf(vgetq_lane_f32(V1, 3), vgetq_lane_f32(V2, 3)) + } } }; + return vResult.v; +#elif defined(_XM_SSE_INTRINSICS_) + XM_ALIGNED_DATA(16) float a[4]; + XM_ALIGNED_DATA(16) float b[4]; + _mm_store_ps(a, V1); + _mm_store_ps(b, V2); + XMVECTOR vResult = _mm_setr_ps( + powf(a[0], b[0]), + powf(a[1], b[1]), + powf(a[2], b[2]), + powf(a[3], b[3])); + return vResult; +#endif +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMVectorAbs(FXMVECTOR V) noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + XMVECTORF32 vResult = { { { + fabsf(V.vector4_f32[0]), + fabsf(V.vector4_f32[1]), + fabsf(V.vector4_f32[2]), + fabsf(V.vector4_f32[3]) + } } }; + return vResult.v; +#elif defined(_XM_ARM_NEON_INTRINSICS_) + return vabsq_f32(V); +#elif defined(_XM_SSE_INTRINSICS_) + XMVECTOR vResult = _mm_setzero_ps(); + vResult = _mm_sub_ps(vResult, V); + vResult = _mm_max_ps(vResult, V); + return vResult; +#endif +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMVectorMod +( + FXMVECTOR V1, + FXMVECTOR V2 +) noexcept +{ + // V1 % V2 = V1 - V2 * truncate(V1 / V2) + +#if defined(_XM_NO_INTRINSICS_) + + XMVECTOR Quotient = XMVectorDivide(V1, V2); + Quotient = XMVectorTruncate(Quotient); + XMVECTOR Result = XMVectorNegativeMultiplySubtract(V2, Quotient, V1); + return Result; + +#elif defined(_XM_ARM_NEON_INTRINSICS_) + XMVECTOR vResult = XMVectorDivide(V1, V2); + vResult = XMVectorTruncate(vResult); + return vmlsq_f32(V1, vResult, V2); +#elif defined(_XM_SSE_INTRINSICS_) + XMVECTOR vResult = _mm_div_ps(V1, V2); + vResult = XMVectorTruncate(vResult); + return XM_FNMADD_PS(vResult, V2, V1); +#endif +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMVectorModAngles(FXMVECTOR Angles) noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + + XMVECTOR V; + XMVECTOR Result; + + // Modulo the range of the given angles such that -XM_PI <= Angles < XM_PI + V = XMVectorMultiply(Angles, g_XMReciprocalTwoPi.v); + V = XMVectorRound(V); + Result = XMVectorNegativeMultiplySubtract(g_XMTwoPi.v, V, Angles); + return Result; + +#elif defined(_XM_ARM_NEON_INTRINSICS_) + // Modulo the range of the given angles such that -XM_PI <= Angles < XM_PI + XMVECTOR vResult = vmulq_f32(Angles, g_XMReciprocalTwoPi); + // Use the inline function due to complexity for rounding + vResult = XMVectorRound(vResult); + return vmlsq_f32(Angles, vResult, g_XMTwoPi); +#elif defined(_XM_SSE_INTRINSICS_) + // Modulo the range of the given angles such that -XM_PI <= Angles < XM_PI + XMVECTOR vResult = _mm_mul_ps(Angles, g_XMReciprocalTwoPi); + // Use the inline function due to complexity for rounding + vResult = XMVectorRound(vResult); + return XM_FNMADD_PS(vResult, g_XMTwoPi, Angles); +#endif +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMVectorSin(FXMVECTOR V) noexcept +{ + // 11-degree minimax approximation + +#if defined(_XM_NO_INTRINSICS_) + XMVECTORF32 Result = { { { + sinf(V.vector4_f32[0]), + sinf(V.vector4_f32[1]), + sinf(V.vector4_f32[2]), + sinf(V.vector4_f32[3]) + } } }; + return Result.v; +#elif defined(_XM_ARM_NEON_INTRINSICS_) + // Force the value within the bounds of pi + XMVECTOR x = XMVectorModAngles(V); + + // Map in [-pi/2,pi/2] with sin(y) = sin(x). + uint32x4_t sign = vandq_u32(x, g_XMNegativeZero); + uint32x4_t c = vorrq_u32(g_XMPi, sign); // pi when x >= 0, -pi when x < 0 + float32x4_t absx = vabsq_f32(x); + float32x4_t rflx = vsubq_f32(c, x); + uint32x4_t comp = vcleq_f32(absx, g_XMHalfPi); + x = vbslq_f32(comp, x, rflx); + + float32x4_t x2 = vmulq_f32(x, x); + + // Compute polynomial approximation + const XMVECTOR SC1 = g_XMSinCoefficients1; + const XMVECTOR SC0 = g_XMSinCoefficients0; + XMVECTOR vConstants = vdupq_lane_f32(vget_high_f32(SC0), 1); + XMVECTOR Result = vmlaq_lane_f32(vConstants, x2, vget_low_f32(SC1), 0); + + vConstants = vdupq_lane_f32(vget_high_f32(SC0), 0); + Result = vmlaq_f32(vConstants, Result, x2); + + vConstants = vdupq_lane_f32(vget_low_f32(SC0), 1); + Result = vmlaq_f32(vConstants, Result, x2); + + vConstants = vdupq_lane_f32(vget_low_f32(SC0), 0); + Result = vmlaq_f32(vConstants, Result, x2); + + Result = vmlaq_f32(g_XMOne, Result, x2); + Result = vmulq_f32(Result, x); + return Result; +#elif defined(_XM_SSE_INTRINSICS_) + // Force the value within the bounds of pi + XMVECTOR x = XMVectorModAngles(V); + + // Map in [-pi/2,pi/2] with sin(y) = sin(x). + __m128 sign = _mm_and_ps(x, g_XMNegativeZero); + __m128 c = _mm_or_ps(g_XMPi, sign); // pi when x >= 0, -pi when x < 0 + __m128 absx = _mm_andnot_ps(sign, x); // |x| + __m128 rflx = _mm_sub_ps(c, x); + __m128 comp = _mm_cmple_ps(absx, g_XMHalfPi); + __m128 select0 = _mm_and_ps(comp, x); + __m128 select1 = _mm_andnot_ps(comp, rflx); + x = _mm_or_ps(select0, select1); + + __m128 x2 = _mm_mul_ps(x, x); + + // Compute polynomial approximation + const XMVECTOR SC1 = g_XMSinCoefficients1; + __m128 vConstantsB = XM_PERMUTE_PS(SC1, _MM_SHUFFLE(0, 0, 0, 0)); + const XMVECTOR SC0 = g_XMSinCoefficients0; + __m128 vConstants = XM_PERMUTE_PS(SC0, _MM_SHUFFLE(3, 3, 3, 3)); + __m128 Result = XM_FMADD_PS(vConstantsB, x2, vConstants); + + vConstants = XM_PERMUTE_PS(SC0, _MM_SHUFFLE(2, 2, 2, 2)); + Result = XM_FMADD_PS(Result, x2, vConstants); + + vConstants = XM_PERMUTE_PS(SC0, _MM_SHUFFLE(1, 1, 1, 1)); + Result = XM_FMADD_PS(Result, x2, vConstants); + + vConstants = XM_PERMUTE_PS(SC0, _MM_SHUFFLE(0, 0, 0, 0)); + Result = XM_FMADD_PS(Result, x2, vConstants); + + Result = XM_FMADD_PS(Result, x2, g_XMOne); + Result = _mm_mul_ps(Result, x); + return Result; +#endif +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMVectorCos(FXMVECTOR V) noexcept +{ + // 10-degree minimax approximation + +#if defined(_XM_NO_INTRINSICS_) + XMVECTORF32 Result = { { { + cosf(V.vector4_f32[0]), + cosf(V.vector4_f32[1]), + cosf(V.vector4_f32[2]), + cosf(V.vector4_f32[3]) + } } }; + return Result.v; +#elif defined(_XM_ARM_NEON_INTRINSICS_) + // Map V to x in [-pi,pi]. + XMVECTOR x = XMVectorModAngles(V); + + // Map in [-pi/2,pi/2] with cos(y) = sign*cos(x). + uint32x4_t sign = vandq_u32(x, g_XMNegativeZero); + uint32x4_t c = vorrq_u32(g_XMPi, sign); // pi when x >= 0, -pi when x < 0 + float32x4_t absx = vabsq_f32(x); + float32x4_t rflx = vsubq_f32(c, x); + uint32x4_t comp = vcleq_f32(absx, g_XMHalfPi); + x = vbslq_f32(comp, x, rflx); + sign = vbslq_f32(comp, g_XMOne, g_XMNegativeOne); + + float32x4_t x2 = vmulq_f32(x, x); + + // Compute polynomial approximation + const XMVECTOR CC1 = g_XMCosCoefficients1; + const XMVECTOR CC0 = g_XMCosCoefficients0; + XMVECTOR vConstants = vdupq_lane_f32(vget_high_f32(CC0), 1); + XMVECTOR Result = vmlaq_lane_f32(vConstants, x2, vget_low_f32(CC1), 0); + + vConstants = vdupq_lane_f32(vget_high_f32(CC0), 0); + Result = vmlaq_f32(vConstants, Result, x2); + + vConstants = vdupq_lane_f32(vget_low_f32(CC0), 1); + Result = vmlaq_f32(vConstants, Result, x2); + + vConstants = vdupq_lane_f32(vget_low_f32(CC0), 0); + Result = vmlaq_f32(vConstants, Result, x2); + + Result = vmlaq_f32(g_XMOne, Result, x2); + Result = vmulq_f32(Result, sign); + return Result; +#elif defined(_XM_SSE_INTRINSICS_) + // Map V to x in [-pi,pi]. + XMVECTOR x = XMVectorModAngles(V); + + // Map in [-pi/2,pi/2] with cos(y) = sign*cos(x). + XMVECTOR sign = _mm_and_ps(x, g_XMNegativeZero); + __m128 c = _mm_or_ps(g_XMPi, sign); // pi when x >= 0, -pi when x < 0 + __m128 absx = _mm_andnot_ps(sign, x); // |x| + __m128 rflx = _mm_sub_ps(c, x); + __m128 comp = _mm_cmple_ps(absx, g_XMHalfPi); + __m128 select0 = _mm_and_ps(comp, x); + __m128 select1 = _mm_andnot_ps(comp, rflx); + x = _mm_or_ps(select0, select1); + select0 = _mm_and_ps(comp, g_XMOne); + select1 = _mm_andnot_ps(comp, g_XMNegativeOne); + sign = _mm_or_ps(select0, select1); + + __m128 x2 = _mm_mul_ps(x, x); + + // Compute polynomial approximation + const XMVECTOR CC1 = g_XMCosCoefficients1; + __m128 vConstantsB = XM_PERMUTE_PS(CC1, _MM_SHUFFLE(0, 0, 0, 0)); + const XMVECTOR CC0 = g_XMCosCoefficients0; + __m128 vConstants = XM_PERMUTE_PS(CC0, _MM_SHUFFLE(3, 3, 3, 3)); + __m128 Result = XM_FMADD_PS(vConstantsB, x2, vConstants); + + vConstants = XM_PERMUTE_PS(CC0, _MM_SHUFFLE(2, 2, 2, 2)); + Result = XM_FMADD_PS(Result, x2, vConstants); + + vConstants = XM_PERMUTE_PS(CC0, _MM_SHUFFLE(1, 1, 1, 1)); + Result = XM_FMADD_PS(Result, x2, vConstants); + + vConstants = XM_PERMUTE_PS(CC0, _MM_SHUFFLE(0, 0, 0, 0)); + Result = XM_FMADD_PS(Result, x2, vConstants); + + Result = XM_FMADD_PS(Result, x2, g_XMOne); + Result = _mm_mul_ps(Result, sign); + return Result; +#endif +} + +//------------------------------------------------------------------------------ + +_Use_decl_annotations_ +inline void XM_CALLCONV XMVectorSinCos +( + XMVECTOR* pSin, + XMVECTOR* pCos, + FXMVECTOR V +) noexcept +{ + assert(pSin != nullptr); + assert(pCos != nullptr); + + // 11/10-degree minimax approximation + +#if defined(_XM_NO_INTRINSICS_) + XMVECTORF32 Sin = { { { + sinf(V.vector4_f32[0]), + sinf(V.vector4_f32[1]), + sinf(V.vector4_f32[2]), + sinf(V.vector4_f32[3]) + } } }; + + XMVECTORF32 Cos = { { { + cosf(V.vector4_f32[0]), + cosf(V.vector4_f32[1]), + cosf(V.vector4_f32[2]), + cosf(V.vector4_f32[3]) + } } }; + + *pSin = Sin.v; + *pCos = Cos.v; +#elif defined(_XM_ARM_NEON_INTRINSICS_) + // Force the value within the bounds of pi + XMVECTOR x = XMVectorModAngles(V); + + // Map in [-pi/2,pi/2] with cos(y) = sign*cos(x). + uint32x4_t sign = vandq_u32(x, g_XMNegativeZero); + uint32x4_t c = vorrq_u32(g_XMPi, sign); // pi when x >= 0, -pi when x < 0 + float32x4_t absx = vabsq_f32(x); + float32x4_t rflx = vsubq_f32(c, x); + uint32x4_t comp = vcleq_f32(absx, g_XMHalfPi); + x = vbslq_f32(comp, x, rflx); + sign = vbslq_f32(comp, g_XMOne, g_XMNegativeOne); + + float32x4_t x2 = vmulq_f32(x, x); + + // Compute polynomial approximation for sine + const XMVECTOR SC1 = g_XMSinCoefficients1; + const XMVECTOR SC0 = g_XMSinCoefficients0; + XMVECTOR vConstants = vdupq_lane_f32(vget_high_f32(SC0), 1); + XMVECTOR Result = vmlaq_lane_f32(vConstants, x2, vget_low_f32(SC1), 0); + + vConstants = vdupq_lane_f32(vget_high_f32(SC0), 0); + Result = vmlaq_f32(vConstants, Result, x2); + + vConstants = vdupq_lane_f32(vget_low_f32(SC0), 1); + Result = vmlaq_f32(vConstants, Result, x2); + + vConstants = vdupq_lane_f32(vget_low_f32(SC0), 0); + Result = vmlaq_f32(vConstants, Result, x2); + + Result = vmlaq_f32(g_XMOne, Result, x2); + *pSin = vmulq_f32(Result, x); + + // Compute polynomial approximation for cosine + const XMVECTOR CC1 = g_XMCosCoefficients1; + const XMVECTOR CC0 = g_XMCosCoefficients0; + vConstants = vdupq_lane_f32(vget_high_f32(CC0), 1); + Result = vmlaq_lane_f32(vConstants, x2, vget_low_f32(CC1), 0); + + vConstants = vdupq_lane_f32(vget_high_f32(CC0), 0); + Result = vmlaq_f32(vConstants, Result, x2); + + vConstants = vdupq_lane_f32(vget_low_f32(CC0), 1); + Result = vmlaq_f32(vConstants, Result, x2); + + vConstants = vdupq_lane_f32(vget_low_f32(CC0), 0); + Result = vmlaq_f32(vConstants, Result, x2); + + Result = vmlaq_f32(g_XMOne, Result, x2); + *pCos = vmulq_f32(Result, sign); +#elif defined(_XM_SSE_INTRINSICS_) + // Force the value within the bounds of pi + XMVECTOR x = XMVectorModAngles(V); + + // Map in [-pi/2,pi/2] with sin(y) = sin(x), cos(y) = sign*cos(x). + XMVECTOR sign = _mm_and_ps(x, g_XMNegativeZero); + __m128 c = _mm_or_ps(g_XMPi, sign); // pi when x >= 0, -pi when x < 0 + __m128 absx = _mm_andnot_ps(sign, x); // |x| + __m128 rflx = _mm_sub_ps(c, x); + __m128 comp = _mm_cmple_ps(absx, g_XMHalfPi); + __m128 select0 = _mm_and_ps(comp, x); + __m128 select1 = _mm_andnot_ps(comp, rflx); + x = _mm_or_ps(select0, select1); + select0 = _mm_and_ps(comp, g_XMOne); + select1 = _mm_andnot_ps(comp, g_XMNegativeOne); + sign = _mm_or_ps(select0, select1); + + __m128 x2 = _mm_mul_ps(x, x); + + // Compute polynomial approximation of sine + const XMVECTOR SC1 = g_XMSinCoefficients1; + __m128 vConstantsB = XM_PERMUTE_PS(SC1, _MM_SHUFFLE(0, 0, 0, 0)); + const XMVECTOR SC0 = g_XMSinCoefficients0; + __m128 vConstants = XM_PERMUTE_PS(SC0, _MM_SHUFFLE(3, 3, 3, 3)); + __m128 Result = XM_FMADD_PS(vConstantsB, x2, vConstants); + + vConstants = XM_PERMUTE_PS(SC0, _MM_SHUFFLE(2, 2, 2, 2)); + Result = XM_FMADD_PS(Result, x2, vConstants); + + vConstants = XM_PERMUTE_PS(SC0, _MM_SHUFFLE(1, 1, 1, 1)); + Result = XM_FMADD_PS(Result, x2, vConstants); + + vConstants = XM_PERMUTE_PS(SC0, _MM_SHUFFLE(0, 0, 0, 0)); + Result = XM_FMADD_PS(Result, x2, vConstants); + + Result = XM_FMADD_PS(Result, x2, g_XMOne); + Result = _mm_mul_ps(Result, x); + *pSin = Result; + + // Compute polynomial approximation of cosine + const XMVECTOR CC1 = g_XMCosCoefficients1; + vConstantsB = XM_PERMUTE_PS(CC1, _MM_SHUFFLE(0, 0, 0, 0)); + const XMVECTOR CC0 = g_XMCosCoefficients0; + vConstants = XM_PERMUTE_PS(CC0, _MM_SHUFFLE(3, 3, 3, 3)); + Result = XM_FMADD_PS(vConstantsB, x2, vConstants); + + vConstants = XM_PERMUTE_PS(CC0, _MM_SHUFFLE(2, 2, 2, 2)); + Result = XM_FMADD_PS(Result, x2, vConstants); + + vConstants = XM_PERMUTE_PS(CC0, _MM_SHUFFLE(1, 1, 1, 1)); + Result = XM_FMADD_PS(Result, x2, vConstants); + + vConstants = XM_PERMUTE_PS(CC0, _MM_SHUFFLE(0, 0, 0, 0)); + Result = XM_FMADD_PS(Result, x2, vConstants); + + Result = XM_FMADD_PS(Result, x2, g_XMOne); + Result = _mm_mul_ps(Result, sign); + *pCos = Result; +#endif +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMVectorTan(FXMVECTOR V) noexcept +{ + // Cody and Waite algorithm to compute tangent. + +#if defined(_XM_NO_INTRINSICS_) + XMVECTORF32 Result = { { { + tanf(V.vector4_f32[0]), + tanf(V.vector4_f32[1]), + tanf(V.vector4_f32[2]), + tanf(V.vector4_f32[3]) + } } }; + return Result.v; +#elif defined(_XM_SSE_INTRINSICS_) || defined(_XM_ARM_NEON_INTRINSICS_) + + static const XMVECTORF32 TanCoefficients0 = { { { 1.0f, -4.667168334e-1f, 2.566383229e-2f, -3.118153191e-4f } } }; + static const XMVECTORF32 TanCoefficients1 = { { { 4.981943399e-7f, -1.333835001e-1f, 3.424887824e-3f, -1.786170734e-5f } } }; + static const XMVECTORF32 TanConstants = { { { 1.570796371f, 6.077100628e-11f, 0.000244140625f, 0.63661977228f /*2 / Pi*/ } } }; + static const XMVECTORU32 Mask = { { { 0x1, 0x1, 0x1, 0x1 } } }; + + XMVECTOR TwoDivPi = XMVectorSplatW(TanConstants.v); + + XMVECTOR Zero = XMVectorZero(); + + XMVECTOR C0 = XMVectorSplatX(TanConstants.v); + XMVECTOR C1 = XMVectorSplatY(TanConstants.v); + XMVECTOR Epsilon = XMVectorSplatZ(TanConstants.v); + + XMVECTOR VA = XMVectorMultiply(V, TwoDivPi); + + VA = XMVectorRound(VA); + + XMVECTOR VC = XMVectorNegativeMultiplySubtract(VA, C0, V); + + XMVECTOR VB = XMVectorAbs(VA); + + VC = XMVectorNegativeMultiplySubtract(VA, C1, VC); + +#if defined(_XM_ARM_NEON_INTRINSICS_) && !defined(_XM_NO_INTRINSICS_) + VB = vcvtq_u32_f32(VB); +#elif defined(_XM_SSE_INTRINSICS_) && !defined(_XM_NO_INTRINSICS_) + reinterpret_cast<__m128i*>(&VB)[0] = _mm_cvttps_epi32(VB); +#else + for (size_t i = 0; i < 4; i++) + { + VB.vector4_u32[i] = static_cast(VB.vector4_f32[i]); + } +#endif + + XMVECTOR VC2 = XMVectorMultiply(VC, VC); + + XMVECTOR T7 = XMVectorSplatW(TanCoefficients1.v); + XMVECTOR T6 = XMVectorSplatZ(TanCoefficients1.v); + XMVECTOR T4 = XMVectorSplatX(TanCoefficients1.v); + XMVECTOR T3 = XMVectorSplatW(TanCoefficients0.v); + XMVECTOR T5 = XMVectorSplatY(TanCoefficients1.v); + XMVECTOR T2 = XMVectorSplatZ(TanCoefficients0.v); + XMVECTOR T1 = XMVectorSplatY(TanCoefficients0.v); + XMVECTOR T0 = XMVectorSplatX(TanCoefficients0.v); + + XMVECTOR VBIsEven = XMVectorAndInt(VB, Mask.v); + VBIsEven = XMVectorEqualInt(VBIsEven, Zero); + + XMVECTOR N = XMVectorMultiplyAdd(VC2, T7, T6); + XMVECTOR D = XMVectorMultiplyAdd(VC2, T4, T3); + N = XMVectorMultiplyAdd(VC2, N, T5); + D = XMVectorMultiplyAdd(VC2, D, T2); + N = XMVectorMultiply(VC2, N); + D = XMVectorMultiplyAdd(VC2, D, T1); + N = XMVectorMultiplyAdd(VC, N, VC); + XMVECTOR VCNearZero = XMVectorInBounds(VC, Epsilon); + D = XMVectorMultiplyAdd(VC2, D, T0); + + N = XMVectorSelect(N, VC, VCNearZero); + D = XMVectorSelect(D, g_XMOne.v, VCNearZero); + + XMVECTOR R0 = XMVectorNegate(N); + XMVECTOR R1 = XMVectorDivide(N, D); + R0 = XMVectorDivide(D, R0); + + XMVECTOR VIsZero = XMVectorEqual(V, Zero); + + XMVECTOR Result = XMVectorSelect(R0, R1, VBIsEven); + + Result = XMVectorSelect(Result, Zero, VIsZero); + + return Result; + +#endif +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMVectorSinH(FXMVECTOR V) noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + XMVECTORF32 Result = { { { + sinhf(V.vector4_f32[0]), + sinhf(V.vector4_f32[1]), + sinhf(V.vector4_f32[2]), + sinhf(V.vector4_f32[3]) + } } }; + return Result.v; +#elif defined(_XM_ARM_NEON_INTRINSICS_) + static const XMVECTORF32 Scale = { { { 1.442695040888963f, 1.442695040888963f, 1.442695040888963f, 1.442695040888963f } } }; // 1.0f / ln(2.0f) + + XMVECTOR V1 = vmlaq_f32(g_XMNegativeOne.v, V, Scale.v); + XMVECTOR V2 = vmlsq_f32(g_XMNegativeOne.v, V, Scale.v); + XMVECTOR E1 = XMVectorExp(V1); + XMVECTOR E2 = XMVectorExp(V2); + + return vsubq_f32(E1, E2); +#elif defined(_XM_SSE_INTRINSICS_) + static const XMVECTORF32 Scale = { { { 1.442695040888963f, 1.442695040888963f, 1.442695040888963f, 1.442695040888963f } } }; // 1.0f / ln(2.0f) + + XMVECTOR V1 = XM_FMADD_PS(V, Scale, g_XMNegativeOne); + XMVECTOR V2 = XM_FNMADD_PS(V, Scale, g_XMNegativeOne); + XMVECTOR E1 = XMVectorExp(V1); + XMVECTOR E2 = XMVectorExp(V2); + + return _mm_sub_ps(E1, E2); +#endif +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMVectorCosH(FXMVECTOR V) noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + XMVECTORF32 Result = { { { + coshf(V.vector4_f32[0]), + coshf(V.vector4_f32[1]), + coshf(V.vector4_f32[2]), + coshf(V.vector4_f32[3]) + } } }; + return Result.v; +#elif defined(_XM_ARM_NEON_INTRINSICS_) + static const XMVECTORF32 Scale = { { { 1.442695040888963f, 1.442695040888963f, 1.442695040888963f, 1.442695040888963f } } }; // 1.0f / ln(2.0f) + + XMVECTOR V1 = vmlaq_f32(g_XMNegativeOne.v, V, Scale.v); + XMVECTOR V2 = vmlsq_f32(g_XMNegativeOne.v, V, Scale.v); + XMVECTOR E1 = XMVectorExp(V1); + XMVECTOR E2 = XMVectorExp(V2); + return vaddq_f32(E1, E2); +#elif defined(_XM_SSE_INTRINSICS_) + static const XMVECTORF32 Scale = { { { 1.442695040888963f, 1.442695040888963f, 1.442695040888963f, 1.442695040888963f } } }; // 1.0f / ln(2.0f) + + XMVECTOR V1 = XM_FMADD_PS(V, Scale.v, g_XMNegativeOne.v); + XMVECTOR V2 = XM_FNMADD_PS(V, Scale.v, g_XMNegativeOne.v); + XMVECTOR E1 = XMVectorExp(V1); + XMVECTOR E2 = XMVectorExp(V2); + return _mm_add_ps(E1, E2); +#endif +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMVectorTanH(FXMVECTOR V) noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + XMVECTORF32 Result = { { { + tanhf(V.vector4_f32[0]), + tanhf(V.vector4_f32[1]), + tanhf(V.vector4_f32[2]), + tanhf(V.vector4_f32[3]) + } } }; + return Result.v; +#elif defined(_XM_ARM_NEON_INTRINSICS_) + static const XMVECTORF32 Scale = { { { 2.8853900817779268f, 2.8853900817779268f, 2.8853900817779268f, 2.8853900817779268f } } }; // 2.0f / ln(2.0f) + + XMVECTOR E = vmulq_f32(V, Scale.v); + E = XMVectorExp(E); + E = vmlaq_f32(g_XMOneHalf.v, E, g_XMOneHalf.v); + E = XMVectorReciprocal(E); + return vsubq_f32(g_XMOne.v, E); +#elif defined(_XM_SSE_INTRINSICS_) + static const XMVECTORF32 Scale = { { { 2.8853900817779268f, 2.8853900817779268f, 2.8853900817779268f, 2.8853900817779268f } } }; // 2.0f / ln(2.0f) + + XMVECTOR E = _mm_mul_ps(V, Scale.v); + E = XMVectorExp(E); + E = XM_FMADD_PS(E, g_XMOneHalf.v, g_XMOneHalf.v); + E = _mm_div_ps(g_XMOne.v, E); + return _mm_sub_ps(g_XMOne.v, E); +#endif +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMVectorASin(FXMVECTOR V) noexcept +{ + // 7-degree minimax approximation + +#if defined(_XM_NO_INTRINSICS_) + XMVECTORF32 Result = { { { + asinf(V.vector4_f32[0]), + asinf(V.vector4_f32[1]), + asinf(V.vector4_f32[2]), + asinf(V.vector4_f32[3]) + } } }; + return Result.v; +#elif defined(_XM_ARM_NEON_INTRINSICS_) + uint32x4_t nonnegative = vcgeq_f32(V, g_XMZero); + float32x4_t x = vabsq_f32(V); + + // Compute (1-|V|), clamp to zero to avoid sqrt of negative number. + float32x4_t oneMValue = vsubq_f32(g_XMOne, x); + float32x4_t clampOneMValue = vmaxq_f32(g_XMZero, oneMValue); + float32x4_t root = XMVectorSqrt(clampOneMValue); + + // Compute polynomial approximation + const XMVECTOR AC1 = g_XMArcCoefficients1; + XMVECTOR vConstants = vdupq_lane_f32(vget_high_f32(AC1), 0); + XMVECTOR t0 = vmlaq_lane_f32(vConstants, x, vget_high_f32(AC1), 1); + + vConstants = vdupq_lane_f32(vget_low_f32(AC1), 1); + t0 = vmlaq_f32(vConstants, t0, x); + + vConstants = vdupq_lane_f32(vget_low_f32(AC1), 0); + t0 = vmlaq_f32(vConstants, t0, x); + + const XMVECTOR AC0 = g_XMArcCoefficients0; + vConstants = vdupq_lane_f32(vget_high_f32(AC0), 1); + t0 = vmlaq_f32(vConstants, t0, x); + + vConstants = vdupq_lane_f32(vget_high_f32(AC0), 0); + t0 = vmlaq_f32(vConstants, t0, x); + + vConstants = vdupq_lane_f32(vget_low_f32(AC0), 1); + t0 = vmlaq_f32(vConstants, t0, x); + + vConstants = vdupq_lane_f32(vget_low_f32(AC0), 0); + t0 = vmlaq_f32(vConstants, t0, x); + t0 = vmulq_f32(t0, root); + + float32x4_t t1 = vsubq_f32(g_XMPi, t0); + t0 = vbslq_f32(nonnegative, t0, t1); + t0 = vsubq_f32(g_XMHalfPi, t0); + return t0; +#elif defined(_XM_SSE_INTRINSICS_) + __m128 nonnegative = _mm_cmpge_ps(V, g_XMZero); + __m128 mvalue = _mm_sub_ps(g_XMZero, V); + __m128 x = _mm_max_ps(V, mvalue); // |V| + + // Compute (1-|V|), clamp to zero to avoid sqrt of negative number. + __m128 oneMValue = _mm_sub_ps(g_XMOne, x); + __m128 clampOneMValue = _mm_max_ps(g_XMZero, oneMValue); + __m128 root = _mm_sqrt_ps(clampOneMValue); // sqrt(1-|V|) + + // Compute polynomial approximation + const XMVECTOR AC1 = g_XMArcCoefficients1; + __m128 vConstantsB = XM_PERMUTE_PS(AC1, _MM_SHUFFLE(3, 3, 3, 3)); + __m128 vConstants = XM_PERMUTE_PS(AC1, _MM_SHUFFLE(2, 2, 2, 2)); + __m128 t0 = XM_FMADD_PS(vConstantsB, x, vConstants); + + vConstants = XM_PERMUTE_PS(AC1, _MM_SHUFFLE(1, 1, 1, 1)); + t0 = XM_FMADD_PS(t0, x, vConstants); + + vConstants = XM_PERMUTE_PS(AC1, _MM_SHUFFLE(0, 0, 0, 0)); + t0 = XM_FMADD_PS(t0, x, vConstants); + + const XMVECTOR AC0 = g_XMArcCoefficients0; + vConstants = XM_PERMUTE_PS(AC0, _MM_SHUFFLE(3, 3, 3, 3)); + t0 = XM_FMADD_PS(t0, x, vConstants); + + vConstants = XM_PERMUTE_PS(AC0, _MM_SHUFFLE(2, 2, 2, 2)); + t0 = XM_FMADD_PS(t0, x, vConstants); + + vConstants = XM_PERMUTE_PS(AC0, _MM_SHUFFLE(1, 1, 1, 1)); + t0 = XM_FMADD_PS(t0, x, vConstants); + + vConstants = XM_PERMUTE_PS(AC0, _MM_SHUFFLE(0, 0, 0, 0)); + t0 = XM_FMADD_PS(t0, x, vConstants); + t0 = _mm_mul_ps(t0, root); + + __m128 t1 = _mm_sub_ps(g_XMPi, t0); + t0 = _mm_and_ps(nonnegative, t0); + t1 = _mm_andnot_ps(nonnegative, t1); + t0 = _mm_or_ps(t0, t1); + t0 = _mm_sub_ps(g_XMHalfPi, t0); + return t0; +#endif +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMVectorACos(FXMVECTOR V) noexcept +{ + // 7-degree minimax approximation + +#if defined(_XM_NO_INTRINSICS_) + XMVECTORF32 Result = { { { + acosf(V.vector4_f32[0]), + acosf(V.vector4_f32[1]), + acosf(V.vector4_f32[2]), + acosf(V.vector4_f32[3]) + } } }; + return Result.v; +#elif defined(_XM_ARM_NEON_INTRINSICS_) + uint32x4_t nonnegative = vcgeq_f32(V, g_XMZero); + float32x4_t x = vabsq_f32(V); + + // Compute (1-|V|), clamp to zero to avoid sqrt of negative number. + float32x4_t oneMValue = vsubq_f32(g_XMOne, x); + float32x4_t clampOneMValue = vmaxq_f32(g_XMZero, oneMValue); + float32x4_t root = XMVectorSqrt(clampOneMValue); + + // Compute polynomial approximation + const XMVECTOR AC1 = g_XMArcCoefficients1; + XMVECTOR vConstants = vdupq_lane_f32(vget_high_f32(AC1), 0); + XMVECTOR t0 = vmlaq_lane_f32(vConstants, x, vget_high_f32(AC1), 1); + + vConstants = vdupq_lane_f32(vget_low_f32(AC1), 1); + t0 = vmlaq_f32(vConstants, t0, x); + + vConstants = vdupq_lane_f32(vget_low_f32(AC1), 0); + t0 = vmlaq_f32(vConstants, t0, x); + + const XMVECTOR AC0 = g_XMArcCoefficients0; + vConstants = vdupq_lane_f32(vget_high_f32(AC0), 1); + t0 = vmlaq_f32(vConstants, t0, x); + + vConstants = vdupq_lane_f32(vget_high_f32(AC0), 0); + t0 = vmlaq_f32(vConstants, t0, x); + + vConstants = vdupq_lane_f32(vget_low_f32(AC0), 1); + t0 = vmlaq_f32(vConstants, t0, x); + + vConstants = vdupq_lane_f32(vget_low_f32(AC0), 0); + t0 = vmlaq_f32(vConstants, t0, x); + t0 = vmulq_f32(t0, root); + + float32x4_t t1 = vsubq_f32(g_XMPi, t0); + t0 = vbslq_f32(nonnegative, t0, t1); + return t0; +#elif defined(_XM_SSE_INTRINSICS_) + __m128 nonnegative = _mm_cmpge_ps(V, g_XMZero); + __m128 mvalue = _mm_sub_ps(g_XMZero, V); + __m128 x = _mm_max_ps(V, mvalue); // |V| + + // Compute (1-|V|), clamp to zero to avoid sqrt of negative number. + __m128 oneMValue = _mm_sub_ps(g_XMOne, x); + __m128 clampOneMValue = _mm_max_ps(g_XMZero, oneMValue); + __m128 root = _mm_sqrt_ps(clampOneMValue); // sqrt(1-|V|) + + // Compute polynomial approximation + const XMVECTOR AC1 = g_XMArcCoefficients1; + __m128 vConstantsB = XM_PERMUTE_PS(AC1, _MM_SHUFFLE(3, 3, 3, 3)); + __m128 vConstants = XM_PERMUTE_PS(AC1, _MM_SHUFFLE(2, 2, 2, 2)); + __m128 t0 = XM_FMADD_PS(vConstantsB, x, vConstants); + + vConstants = XM_PERMUTE_PS(AC1, _MM_SHUFFLE(1, 1, 1, 1)); + t0 = XM_FMADD_PS(t0, x, vConstants); + + vConstants = XM_PERMUTE_PS(AC1, _MM_SHUFFLE(0, 0, 0, 0)); + t0 = XM_FMADD_PS(t0, x, vConstants); + + const XMVECTOR AC0 = g_XMArcCoefficients0; + vConstants = XM_PERMUTE_PS(AC0, _MM_SHUFFLE(3, 3, 3, 3)); + t0 = XM_FMADD_PS(t0, x, vConstants); + + vConstants = XM_PERMUTE_PS(AC0, _MM_SHUFFLE(2, 2, 2, 2)); + t0 = XM_FMADD_PS(t0, x, vConstants); + + vConstants = XM_PERMUTE_PS(AC0, _MM_SHUFFLE(1, 1, 1, 1)); + t0 = XM_FMADD_PS(t0, x, vConstants); + + vConstants = XM_PERMUTE_PS(AC0, _MM_SHUFFLE(0, 0, 0, 0)); + t0 = XM_FMADD_PS(t0, x, vConstants); + t0 = _mm_mul_ps(t0, root); + + __m128 t1 = _mm_sub_ps(g_XMPi, t0); + t0 = _mm_and_ps(nonnegative, t0); + t1 = _mm_andnot_ps(nonnegative, t1); + t0 = _mm_or_ps(t0, t1); + return t0; +#endif +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMVectorATan(FXMVECTOR V) noexcept +{ + // 17-degree minimax approximation + +#if defined(_XM_NO_INTRINSICS_) + XMVECTORF32 Result = { { { + atanf(V.vector4_f32[0]), + atanf(V.vector4_f32[1]), + atanf(V.vector4_f32[2]), + atanf(V.vector4_f32[3]) + } } }; + return Result.v; +#elif defined(_XM_ARM_NEON_INTRINSICS_) + float32x4_t absV = vabsq_f32(V); + float32x4_t invV = XMVectorReciprocal(V); + uint32x4_t comp = vcgtq_f32(V, g_XMOne); + uint32x4_t sign = vbslq_f32(comp, g_XMOne, g_XMNegativeOne); + comp = vcleq_f32(absV, g_XMOne); + sign = vbslq_f32(comp, g_XMZero, sign); + uint32x4_t x = vbslq_f32(comp, V, invV); + + float32x4_t x2 = vmulq_f32(x, x); + + // Compute polynomial approximation + const XMVECTOR TC1 = g_XMATanCoefficients1; + XMVECTOR vConstants = vdupq_lane_f32(vget_high_f32(TC1), 0); + XMVECTOR Result = vmlaq_lane_f32(vConstants, x2, vget_high_f32(TC1), 1); + + vConstants = vdupq_lane_f32(vget_low_f32(TC1), 1); + Result = vmlaq_f32(vConstants, Result, x2); + + vConstants = vdupq_lane_f32(vget_low_f32(TC1), 0); + Result = vmlaq_f32(vConstants, Result, x2); + + const XMVECTOR TC0 = g_XMATanCoefficients0; + vConstants = vdupq_lane_f32(vget_high_f32(TC0), 1); + Result = vmlaq_f32(vConstants, Result, x2); + + vConstants = vdupq_lane_f32(vget_high_f32(TC0), 0); + Result = vmlaq_f32(vConstants, Result, x2); + + vConstants = vdupq_lane_f32(vget_low_f32(TC0), 1); + Result = vmlaq_f32(vConstants, Result, x2); + + vConstants = vdupq_lane_f32(vget_low_f32(TC0), 0); + Result = vmlaq_f32(vConstants, Result, x2); + + Result = vmlaq_f32(g_XMOne, Result, x2); + Result = vmulq_f32(Result, x); + + float32x4_t result1 = vmulq_f32(sign, g_XMHalfPi); + result1 = vsubq_f32(result1, Result); + + comp = vceqq_f32(sign, g_XMZero); + Result = vbslq_f32(comp, Result, result1); + return Result; +#elif defined(_XM_SSE_INTRINSICS_) + __m128 absV = XMVectorAbs(V); + __m128 invV = _mm_div_ps(g_XMOne, V); + __m128 comp = _mm_cmpgt_ps(V, g_XMOne); + __m128 select0 = _mm_and_ps(comp, g_XMOne); + __m128 select1 = _mm_andnot_ps(comp, g_XMNegativeOne); + __m128 sign = _mm_or_ps(select0, select1); + comp = _mm_cmple_ps(absV, g_XMOne); + select0 = _mm_and_ps(comp, g_XMZero); + select1 = _mm_andnot_ps(comp, sign); + sign = _mm_or_ps(select0, select1); + select0 = _mm_and_ps(comp, V); + select1 = _mm_andnot_ps(comp, invV); + __m128 x = _mm_or_ps(select0, select1); + + __m128 x2 = _mm_mul_ps(x, x); + + // Compute polynomial approximation + const XMVECTOR TC1 = g_XMATanCoefficients1; + __m128 vConstantsB = XM_PERMUTE_PS(TC1, _MM_SHUFFLE(3, 3, 3, 3)); + __m128 vConstants = XM_PERMUTE_PS(TC1, _MM_SHUFFLE(2, 2, 2, 2)); + __m128 Result = XM_FMADD_PS(vConstantsB, x2, vConstants); + + vConstants = XM_PERMUTE_PS(TC1, _MM_SHUFFLE(1, 1, 1, 1)); + Result = XM_FMADD_PS(Result, x2, vConstants); + + vConstants = XM_PERMUTE_PS(TC1, _MM_SHUFFLE(0, 0, 0, 0)); + Result = XM_FMADD_PS(Result, x2, vConstants); + + const XMVECTOR TC0 = g_XMATanCoefficients0; + vConstants = XM_PERMUTE_PS(TC0, _MM_SHUFFLE(3, 3, 3, 3)); + Result = XM_FMADD_PS(Result, x2, vConstants); + + vConstants = XM_PERMUTE_PS(TC0, _MM_SHUFFLE(2, 2, 2, 2)); + Result = XM_FMADD_PS(Result, x2, vConstants); + + vConstants = XM_PERMUTE_PS(TC0, _MM_SHUFFLE(1, 1, 1, 1)); + Result = XM_FMADD_PS(Result, x2, vConstants); + + vConstants = XM_PERMUTE_PS(TC0, _MM_SHUFFLE(0, 0, 0, 0)); + Result = XM_FMADD_PS(Result, x2, vConstants); + + Result = XM_FMADD_PS(Result, x2, g_XMOne); + + Result = _mm_mul_ps(Result, x); + __m128 result1 = _mm_mul_ps(sign, g_XMHalfPi); + result1 = _mm_sub_ps(result1, Result); + + comp = _mm_cmpeq_ps(sign, g_XMZero); + select0 = _mm_and_ps(comp, Result); + select1 = _mm_andnot_ps(comp, result1); + Result = _mm_or_ps(select0, select1); + return Result; +#endif +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMVectorATan2 +( + FXMVECTOR Y, + FXMVECTOR X +) noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + XMVECTORF32 Result = { { { + atan2f(Y.vector4_f32[0], X.vector4_f32[0]), + atan2f(Y.vector4_f32[1], X.vector4_f32[1]), + atan2f(Y.vector4_f32[2], X.vector4_f32[2]), + atan2f(Y.vector4_f32[3], X.vector4_f32[3]) + } } }; + return Result.v; +#else + + // Return the inverse tangent of Y / X in the range of -Pi to Pi with the following exceptions: + + // Y == 0 and X is Negative -> Pi with the sign of Y + // y == 0 and x is positive -> 0 with the sign of y + // Y != 0 and X == 0 -> Pi / 2 with the sign of Y + // Y != 0 and X is Negative -> atan(y/x) + (PI with the sign of Y) + // X == -Infinity and Finite Y -> Pi with the sign of Y + // X == +Infinity and Finite Y -> 0 with the sign of Y + // Y == Infinity and X is Finite -> Pi / 2 with the sign of Y + // Y == Infinity and X == -Infinity -> 3Pi / 4 with the sign of Y + // Y == Infinity and X == +Infinity -> Pi / 4 with the sign of Y + + static const XMVECTORF32 ATan2Constants = { { { XM_PI, XM_PIDIV2, XM_PIDIV4, XM_PI * 3.0f / 4.0f } } }; + + XMVECTOR Zero = XMVectorZero(); + XMVECTOR ATanResultValid = XMVectorTrueInt(); + + XMVECTOR Pi = XMVectorSplatX(ATan2Constants); + XMVECTOR PiOverTwo = XMVectorSplatY(ATan2Constants); + XMVECTOR PiOverFour = XMVectorSplatZ(ATan2Constants); + XMVECTOR ThreePiOverFour = XMVectorSplatW(ATan2Constants); + + XMVECTOR YEqualsZero = XMVectorEqual(Y, Zero); + XMVECTOR XEqualsZero = XMVectorEqual(X, Zero); + XMVECTOR XIsPositive = XMVectorAndInt(X, g_XMNegativeZero.v); + XIsPositive = XMVectorEqualInt(XIsPositive, Zero); + XMVECTOR YEqualsInfinity = XMVectorIsInfinite(Y); + XMVECTOR XEqualsInfinity = XMVectorIsInfinite(X); + + XMVECTOR YSign = XMVectorAndInt(Y, g_XMNegativeZero.v); + Pi = XMVectorOrInt(Pi, YSign); + PiOverTwo = XMVectorOrInt(PiOverTwo, YSign); + PiOverFour = XMVectorOrInt(PiOverFour, YSign); + ThreePiOverFour = XMVectorOrInt(ThreePiOverFour, YSign); + + XMVECTOR R1 = XMVectorSelect(Pi, YSign, XIsPositive); + XMVECTOR R2 = XMVectorSelect(ATanResultValid, PiOverTwo, XEqualsZero); + XMVECTOR R3 = XMVectorSelect(R2, R1, YEqualsZero); + XMVECTOR R4 = XMVectorSelect(ThreePiOverFour, PiOverFour, XIsPositive); + XMVECTOR R5 = XMVectorSelect(PiOverTwo, R4, XEqualsInfinity); + XMVECTOR Result = XMVectorSelect(R3, R5, YEqualsInfinity); + ATanResultValid = XMVectorEqualInt(Result, ATanResultValid); + + XMVECTOR V = XMVectorDivide(Y, X); + + XMVECTOR R0 = XMVectorATan(V); + + R1 = XMVectorSelect(Pi, g_XMNegativeZero, XIsPositive); + R2 = XMVectorAdd(R0, R1); + + return XMVectorSelect(Result, R2, ATanResultValid); + +#endif +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMVectorSinEst(FXMVECTOR V) noexcept +{ + // 7-degree minimax approximation + +#if defined(_XM_NO_INTRINSICS_) + XMVECTORF32 Result = { { { + sinf(V.vector4_f32[0]), + sinf(V.vector4_f32[1]), + sinf(V.vector4_f32[2]), + sinf(V.vector4_f32[3]) + } } }; + return Result.v; +#elif defined(_XM_ARM_NEON_INTRINSICS_) + // Force the value within the bounds of pi + XMVECTOR x = XMVectorModAngles(V); + + // Map in [-pi/2,pi/2] with sin(y) = sin(x). + uint32x4_t sign = vandq_u32(x, g_XMNegativeZero); + uint32x4_t c = vorrq_u32(g_XMPi, sign); // pi when x >= 0, -pi when x < 0 + float32x4_t absx = vabsq_f32(x); + float32x4_t rflx = vsubq_f32(c, x); + uint32x4_t comp = vcleq_f32(absx, g_XMHalfPi); + x = vbslq_f32(comp, x, rflx); + + float32x4_t x2 = vmulq_f32(x, x); + + // Compute polynomial approximation + const XMVECTOR SEC = g_XMSinCoefficients1; + XMVECTOR vConstants = vdupq_lane_f32(vget_high_f32(SEC), 0); + XMVECTOR Result = vmlaq_lane_f32(vConstants, x2, vget_high_f32(SEC), 1); + + vConstants = vdupq_lane_f32(vget_low_f32(SEC), 1); + Result = vmlaq_f32(vConstants, Result, x2); + + Result = vmlaq_f32(g_XMOne, Result, x2); + Result = vmulq_f32(Result, x); + return Result; +#elif defined(_XM_SSE_INTRINSICS_) + // Force the value within the bounds of pi + XMVECTOR x = XMVectorModAngles(V); + + // Map in [-pi/2,pi/2] with sin(y) = sin(x). + __m128 sign = _mm_and_ps(x, g_XMNegativeZero); + __m128 c = _mm_or_ps(g_XMPi, sign); // pi when x >= 0, -pi when x < 0 + __m128 absx = _mm_andnot_ps(sign, x); // |x| + __m128 rflx = _mm_sub_ps(c, x); + __m128 comp = _mm_cmple_ps(absx, g_XMHalfPi); + __m128 select0 = _mm_and_ps(comp, x); + __m128 select1 = _mm_andnot_ps(comp, rflx); + x = _mm_or_ps(select0, select1); + + __m128 x2 = _mm_mul_ps(x, x); + + // Compute polynomial approximation + const XMVECTOR SEC = g_XMSinCoefficients1; + __m128 vConstantsB = XM_PERMUTE_PS(SEC, _MM_SHUFFLE(3, 3, 3, 3)); + __m128 vConstants = XM_PERMUTE_PS(SEC, _MM_SHUFFLE(2, 2, 2, 2)); + __m128 Result = XM_FMADD_PS(vConstantsB, x2, vConstants); + + vConstants = XM_PERMUTE_PS(SEC, _MM_SHUFFLE(1, 1, 1, 1)); + Result = XM_FMADD_PS(Result, x2, vConstants); + Result = XM_FMADD_PS(Result, x2, g_XMOne); + Result = _mm_mul_ps(Result, x); + return Result; +#endif +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMVectorCosEst(FXMVECTOR V) noexcept +{ + // 6-degree minimax approximation + +#if defined(_XM_NO_INTRINSICS_) + XMVECTORF32 Result = { { { + cosf(V.vector4_f32[0]), + cosf(V.vector4_f32[1]), + cosf(V.vector4_f32[2]), + cosf(V.vector4_f32[3]) + } } }; + return Result.v; +#elif defined(_XM_ARM_NEON_INTRINSICS_) + // Map V to x in [-pi,pi]. + XMVECTOR x = XMVectorModAngles(V); + + // Map in [-pi/2,pi/2] with cos(y) = sign*cos(x). + uint32x4_t sign = vandq_u32(x, g_XMNegativeZero); + uint32x4_t c = vorrq_u32(g_XMPi, sign); // pi when x >= 0, -pi when x < 0 + float32x4_t absx = vabsq_f32(x); + float32x4_t rflx = vsubq_f32(c, x); + uint32x4_t comp = vcleq_f32(absx, g_XMHalfPi); + x = vbslq_f32(comp, x, rflx); + sign = vbslq_f32(comp, g_XMOne, g_XMNegativeOne); + + float32x4_t x2 = vmulq_f32(x, x); + + // Compute polynomial approximation + const XMVECTOR CEC = g_XMCosCoefficients1; + XMVECTOR vConstants = vdupq_lane_f32(vget_high_f32(CEC), 0); + XMVECTOR Result = vmlaq_lane_f32(vConstants, x2, vget_high_f32(CEC), 1); + + vConstants = vdupq_lane_f32(vget_low_f32(CEC), 1); + Result = vmlaq_f32(vConstants, Result, x2); + + Result = vmlaq_f32(g_XMOne, Result, x2); + Result = vmulq_f32(Result, sign); + return Result; +#elif defined(_XM_SSE_INTRINSICS_) + // Map V to x in [-pi,pi]. + XMVECTOR x = XMVectorModAngles(V); + + // Map in [-pi/2,pi/2] with cos(y) = sign*cos(x). + XMVECTOR sign = _mm_and_ps(x, g_XMNegativeZero); + __m128 c = _mm_or_ps(g_XMPi, sign); // pi when x >= 0, -pi when x < 0 + __m128 absx = _mm_andnot_ps(sign, x); // |x| + __m128 rflx = _mm_sub_ps(c, x); + __m128 comp = _mm_cmple_ps(absx, g_XMHalfPi); + __m128 select0 = _mm_and_ps(comp, x); + __m128 select1 = _mm_andnot_ps(comp, rflx); + x = _mm_or_ps(select0, select1); + select0 = _mm_and_ps(comp, g_XMOne); + select1 = _mm_andnot_ps(comp, g_XMNegativeOne); + sign = _mm_or_ps(select0, select1); + + __m128 x2 = _mm_mul_ps(x, x); + + // Compute polynomial approximation + const XMVECTOR CEC = g_XMCosCoefficients1; + __m128 vConstantsB = XM_PERMUTE_PS(CEC, _MM_SHUFFLE(3, 3, 3, 3)); + __m128 vConstants = XM_PERMUTE_PS(CEC, _MM_SHUFFLE(2, 2, 2, 2)); + __m128 Result = XM_FMADD_PS(vConstantsB, x2, vConstants); + + vConstants = XM_PERMUTE_PS(CEC, _MM_SHUFFLE(1, 1, 1, 1)); + Result = XM_FMADD_PS(Result, x2, vConstants); + Result = XM_FMADD_PS(Result, x2, g_XMOne); + Result = _mm_mul_ps(Result, sign); + return Result; +#endif +} + +//------------------------------------------------------------------------------ + +_Use_decl_annotations_ +inline void XM_CALLCONV XMVectorSinCosEst +( + XMVECTOR* pSin, + XMVECTOR* pCos, + FXMVECTOR V +) noexcept +{ + assert(pSin != nullptr); + assert(pCos != nullptr); + + // 7/6-degree minimax approximation + +#if defined(_XM_NO_INTRINSICS_) + XMVECTORF32 Sin = { { { + sinf(V.vector4_f32[0]), + sinf(V.vector4_f32[1]), + sinf(V.vector4_f32[2]), + sinf(V.vector4_f32[3]) + } } }; + + XMVECTORF32 Cos = { { { + cosf(V.vector4_f32[0]), + cosf(V.vector4_f32[1]), + cosf(V.vector4_f32[2]), + cosf(V.vector4_f32[3]) + } } }; + + *pSin = Sin.v; + *pCos = Cos.v; +#elif defined(_XM_ARM_NEON_INTRINSICS_) + // Force the value within the bounds of pi + XMVECTOR x = XMVectorModAngles(V); + + // Map in [-pi/2,pi/2] with cos(y) = sign*cos(x). + uint32x4_t sign = vandq_u32(x, g_XMNegativeZero); + uint32x4_t c = vorrq_u32(g_XMPi, sign); // pi when x >= 0, -pi when x < 0 + float32x4_t absx = vabsq_f32(x); + float32x4_t rflx = vsubq_f32(c, x); + uint32x4_t comp = vcleq_f32(absx, g_XMHalfPi); + x = vbslq_f32(comp, x, rflx); + sign = vbslq_f32(comp, g_XMOne, g_XMNegativeOne); + + float32x4_t x2 = vmulq_f32(x, x); + + // Compute polynomial approximation for sine + const XMVECTOR SEC = g_XMSinCoefficients1; + XMVECTOR vConstants = vdupq_lane_f32(vget_high_f32(SEC), 0); + XMVECTOR Result = vmlaq_lane_f32(vConstants, x2, vget_high_f32(SEC), 1); + + vConstants = vdupq_lane_f32(vget_low_f32(SEC), 1); + Result = vmlaq_f32(vConstants, Result, x2); + + Result = vmlaq_f32(g_XMOne, Result, x2); + *pSin = vmulq_f32(Result, x); + + // Compute polynomial approximation + const XMVECTOR CEC = g_XMCosCoefficients1; + vConstants = vdupq_lane_f32(vget_high_f32(CEC), 0); + Result = vmlaq_lane_f32(vConstants, x2, vget_high_f32(CEC), 1); + + vConstants = vdupq_lane_f32(vget_low_f32(CEC), 1); + Result = vmlaq_f32(vConstants, Result, x2); + + Result = vmlaq_f32(g_XMOne, Result, x2); + *pCos = vmulq_f32(Result, sign); +#elif defined(_XM_SSE_INTRINSICS_) + // Force the value within the bounds of pi + XMVECTOR x = XMVectorModAngles(V); + + // Map in [-pi/2,pi/2] with sin(y) = sin(x), cos(y) = sign*cos(x). + XMVECTOR sign = _mm_and_ps(x, g_XMNegativeZero); + __m128 c = _mm_or_ps(g_XMPi, sign); // pi when x >= 0, -pi when x < 0 + __m128 absx = _mm_andnot_ps(sign, x); // |x| + __m128 rflx = _mm_sub_ps(c, x); + __m128 comp = _mm_cmple_ps(absx, g_XMHalfPi); + __m128 select0 = _mm_and_ps(comp, x); + __m128 select1 = _mm_andnot_ps(comp, rflx); + x = _mm_or_ps(select0, select1); + select0 = _mm_and_ps(comp, g_XMOne); + select1 = _mm_andnot_ps(comp, g_XMNegativeOne); + sign = _mm_or_ps(select0, select1); + + __m128 x2 = _mm_mul_ps(x, x); + + // Compute polynomial approximation for sine + const XMVECTOR SEC = g_XMSinCoefficients1; + __m128 vConstantsB = XM_PERMUTE_PS(SEC, _MM_SHUFFLE(3, 3, 3, 3)); + __m128 vConstants = XM_PERMUTE_PS(SEC, _MM_SHUFFLE(2, 2, 2, 2)); + __m128 Result = XM_FMADD_PS(vConstantsB, x2, vConstants); + + vConstants = XM_PERMUTE_PS(SEC, _MM_SHUFFLE(1, 1, 1, 1)); + Result = XM_FMADD_PS(Result, x2, vConstants); + Result = XM_FMADD_PS(Result, x2, g_XMOne); + Result = _mm_mul_ps(Result, x); + *pSin = Result; + + // Compute polynomial approximation for cosine + const XMVECTOR CEC = g_XMCosCoefficients1; + vConstantsB = XM_PERMUTE_PS(CEC, _MM_SHUFFLE(3, 3, 3, 3)); + vConstants = XM_PERMUTE_PS(CEC, _MM_SHUFFLE(2, 2, 2, 2)); + Result = XM_FMADD_PS(vConstantsB, x2, vConstants); + + vConstants = XM_PERMUTE_PS(CEC, _MM_SHUFFLE(1, 1, 1, 1)); + Result = XM_FMADD_PS(Result, x2, vConstants); + Result = XM_FMADD_PS(Result, x2, g_XMOne); + Result = _mm_mul_ps(Result, sign); + *pCos = Result; +#endif +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMVectorTanEst(FXMVECTOR V) noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + XMVECTORF32 Result = { { { + tanf(V.vector4_f32[0]), + tanf(V.vector4_f32[1]), + tanf(V.vector4_f32[2]), + tanf(V.vector4_f32[3]) + } } }; + return Result.v; +#else + + XMVECTOR OneOverPi = XMVectorSplatW(g_XMTanEstCoefficients.v); + + XMVECTOR V1 = XMVectorMultiply(V, OneOverPi); + V1 = XMVectorRound(V1); + + V1 = XMVectorNegativeMultiplySubtract(g_XMPi.v, V1, V); + + XMVECTOR T0 = XMVectorSplatX(g_XMTanEstCoefficients.v); + XMVECTOR T1 = XMVectorSplatY(g_XMTanEstCoefficients.v); + XMVECTOR T2 = XMVectorSplatZ(g_XMTanEstCoefficients.v); + + XMVECTOR V2T2 = XMVectorNegativeMultiplySubtract(V1, V1, T2); + XMVECTOR V2 = XMVectorMultiply(V1, V1); + XMVECTOR V1T0 = XMVectorMultiply(V1, T0); + XMVECTOR V1T1 = XMVectorMultiply(V1, T1); + + XMVECTOR D = XMVectorReciprocalEst(V2T2); + XMVECTOR N = XMVectorMultiplyAdd(V2, V1T1, V1T0); + + return XMVectorMultiply(N, D); + +#endif +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMVectorASinEst(FXMVECTOR V) noexcept +{ + // 3-degree minimax approximation + +#if defined(_XM_NO_INTRINSICS_) + XMVECTORF32 Result; + Result.f[0] = asinf(V.vector4_f32[0]); + Result.f[1] = asinf(V.vector4_f32[1]); + Result.f[2] = asinf(V.vector4_f32[2]); + Result.f[3] = asinf(V.vector4_f32[3]); + return Result.v; +#elif defined(_XM_ARM_NEON_INTRINSICS_) + uint32x4_t nonnegative = vcgeq_f32(V, g_XMZero); + float32x4_t x = vabsq_f32(V); + + // Compute (1-|V|), clamp to zero to avoid sqrt of negative number. + float32x4_t oneMValue = vsubq_f32(g_XMOne, x); + float32x4_t clampOneMValue = vmaxq_f32(g_XMZero, oneMValue); + float32x4_t root = XMVectorSqrt(clampOneMValue); + + // Compute polynomial approximation + const XMVECTOR AEC = g_XMArcEstCoefficients; + XMVECTOR vConstants = vdupq_lane_f32(vget_high_f32(AEC), 0); + XMVECTOR t0 = vmlaq_lane_f32(vConstants, x, vget_high_f32(AEC), 1); + + vConstants = vdupq_lane_f32(vget_low_f32(AEC), 1); + t0 = vmlaq_f32(vConstants, t0, x); + + vConstants = vdupq_lane_f32(vget_low_f32(AEC), 0); + t0 = vmlaq_f32(vConstants, t0, x); + t0 = vmulq_f32(t0, root); + + float32x4_t t1 = vsubq_f32(g_XMPi, t0); + t0 = vbslq_f32(nonnegative, t0, t1); + t0 = vsubq_f32(g_XMHalfPi, t0); + return t0; +#elif defined(_XM_SSE_INTRINSICS_) + __m128 nonnegative = _mm_cmpge_ps(V, g_XMZero); + __m128 mvalue = _mm_sub_ps(g_XMZero, V); + __m128 x = _mm_max_ps(V, mvalue); // |V| + + // Compute (1-|V|), clamp to zero to avoid sqrt of negative number. + __m128 oneMValue = _mm_sub_ps(g_XMOne, x); + __m128 clampOneMValue = _mm_max_ps(g_XMZero, oneMValue); + __m128 root = _mm_sqrt_ps(clampOneMValue); // sqrt(1-|V|) + + // Compute polynomial approximation + const XMVECTOR AEC = g_XMArcEstCoefficients; + __m128 vConstantsB = XM_PERMUTE_PS(AEC, _MM_SHUFFLE(3, 3, 3, 3)); + __m128 vConstants = XM_PERMUTE_PS(AEC, _MM_SHUFFLE(2, 2, 2, 2)); + __m128 t0 = XM_FMADD_PS(vConstantsB, x, vConstants); + + vConstants = XM_PERMUTE_PS(AEC, _MM_SHUFFLE(1, 1, 1, 1)); + t0 = XM_FMADD_PS(t0, x, vConstants); + + vConstants = XM_PERMUTE_PS(AEC, _MM_SHUFFLE(0, 0, 0, 0)); + t0 = XM_FMADD_PS(t0, x, vConstants); + t0 = _mm_mul_ps(t0, root); + + __m128 t1 = _mm_sub_ps(g_XMPi, t0); + t0 = _mm_and_ps(nonnegative, t0); + t1 = _mm_andnot_ps(nonnegative, t1); + t0 = _mm_or_ps(t0, t1); + t0 = _mm_sub_ps(g_XMHalfPi, t0); + return t0; +#endif +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMVectorACosEst(FXMVECTOR V) noexcept +{ + // 3-degree minimax approximation + +#if defined(_XM_NO_INTRINSICS_) + XMVECTORF32 Result = { { { + acosf(V.vector4_f32[0]), + acosf(V.vector4_f32[1]), + acosf(V.vector4_f32[2]), + acosf(V.vector4_f32[3]) + } } }; + return Result.v; +#elif defined(_XM_ARM_NEON_INTRINSICS_) + uint32x4_t nonnegative = vcgeq_f32(V, g_XMZero); + float32x4_t x = vabsq_f32(V); + + // Compute (1-|V|), clamp to zero to avoid sqrt of negative number. + float32x4_t oneMValue = vsubq_f32(g_XMOne, x); + float32x4_t clampOneMValue = vmaxq_f32(g_XMZero, oneMValue); + float32x4_t root = XMVectorSqrt(clampOneMValue); + + // Compute polynomial approximation + const XMVECTOR AEC = g_XMArcEstCoefficients; + XMVECTOR vConstants = vdupq_lane_f32(vget_high_f32(AEC), 0); + XMVECTOR t0 = vmlaq_lane_f32(vConstants, x, vget_high_f32(AEC), 1); + + vConstants = vdupq_lane_f32(vget_low_f32(AEC), 1); + t0 = vmlaq_f32(vConstants, t0, x); + + vConstants = vdupq_lane_f32(vget_low_f32(AEC), 0); + t0 = vmlaq_f32(vConstants, t0, x); + t0 = vmulq_f32(t0, root); + + float32x4_t t1 = vsubq_f32(g_XMPi, t0); + t0 = vbslq_f32(nonnegative, t0, t1); + return t0; +#elif defined(_XM_SSE_INTRINSICS_) + __m128 nonnegative = _mm_cmpge_ps(V, g_XMZero); + __m128 mvalue = _mm_sub_ps(g_XMZero, V); + __m128 x = _mm_max_ps(V, mvalue); // |V| + + // Compute (1-|V|), clamp to zero to avoid sqrt of negative number. + __m128 oneMValue = _mm_sub_ps(g_XMOne, x); + __m128 clampOneMValue = _mm_max_ps(g_XMZero, oneMValue); + __m128 root = _mm_sqrt_ps(clampOneMValue); // sqrt(1-|V|) + + // Compute polynomial approximation + const XMVECTOR AEC = g_XMArcEstCoefficients; + __m128 vConstantsB = XM_PERMUTE_PS(AEC, _MM_SHUFFLE(3, 3, 3, 3)); + __m128 vConstants = XM_PERMUTE_PS(AEC, _MM_SHUFFLE(2, 2, 2, 2)); + __m128 t0 = XM_FMADD_PS(vConstantsB, x, vConstants); + + vConstants = XM_PERMUTE_PS(AEC, _MM_SHUFFLE(1, 1, 1, 1)); + t0 = XM_FMADD_PS(t0, x, vConstants); + + vConstants = XM_PERMUTE_PS(AEC, _MM_SHUFFLE(0, 0, 0, 0)); + t0 = XM_FMADD_PS(t0, x, vConstants); + t0 = _mm_mul_ps(t0, root); + + __m128 t1 = _mm_sub_ps(g_XMPi, t0); + t0 = _mm_and_ps(nonnegative, t0); + t1 = _mm_andnot_ps(nonnegative, t1); + t0 = _mm_or_ps(t0, t1); + return t0; +#endif +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMVectorATanEst(FXMVECTOR V) noexcept +{ + // 9-degree minimax approximation + +#if defined(_XM_NO_INTRINSICS_) + XMVECTORF32 Result = { { { + atanf(V.vector4_f32[0]), + atanf(V.vector4_f32[1]), + atanf(V.vector4_f32[2]), + atanf(V.vector4_f32[3]) + } } }; + return Result.v; +#elif defined(_XM_ARM_NEON_INTRINSICS_) + float32x4_t absV = vabsq_f32(V); + float32x4_t invV = XMVectorReciprocalEst(V); + uint32x4_t comp = vcgtq_f32(V, g_XMOne); + uint32x4_t sign = vbslq_f32(comp, g_XMOne, g_XMNegativeOne); + comp = vcleq_f32(absV, g_XMOne); + sign = vbslq_f32(comp, g_XMZero, sign); + uint32x4_t x = vbslq_f32(comp, V, invV); + + float32x4_t x2 = vmulq_f32(x, x); + + // Compute polynomial approximation + const XMVECTOR AEC = g_XMATanEstCoefficients1; + XMVECTOR vConstants = vdupq_lane_f32(vget_high_f32(AEC), 0); + XMVECTOR Result = vmlaq_lane_f32(vConstants, x2, vget_high_f32(AEC), 1); + + vConstants = vdupq_lane_f32(vget_low_f32(AEC), 1); + Result = vmlaq_f32(vConstants, Result, x2); + + vConstants = vdupq_lane_f32(vget_low_f32(AEC), 0); + Result = vmlaq_f32(vConstants, Result, x2); + + // ATanEstCoefficients0 is already splatted + Result = vmlaq_f32(g_XMATanEstCoefficients0, Result, x2); + Result = vmulq_f32(Result, x); + + float32x4_t result1 = vmulq_f32(sign, g_XMHalfPi); + result1 = vsubq_f32(result1, Result); + + comp = vceqq_f32(sign, g_XMZero); + Result = vbslq_f32(comp, Result, result1); + return Result; +#elif defined(_XM_SSE_INTRINSICS_) + __m128 absV = XMVectorAbs(V); + __m128 invV = _mm_div_ps(g_XMOne, V); + __m128 comp = _mm_cmpgt_ps(V, g_XMOne); + __m128 select0 = _mm_and_ps(comp, g_XMOne); + __m128 select1 = _mm_andnot_ps(comp, g_XMNegativeOne); + __m128 sign = _mm_or_ps(select0, select1); + comp = _mm_cmple_ps(absV, g_XMOne); + select0 = _mm_and_ps(comp, g_XMZero); + select1 = _mm_andnot_ps(comp, sign); + sign = _mm_or_ps(select0, select1); + select0 = _mm_and_ps(comp, V); + select1 = _mm_andnot_ps(comp, invV); + __m128 x = _mm_or_ps(select0, select1); + + __m128 x2 = _mm_mul_ps(x, x); + + // Compute polynomial approximation + const XMVECTOR AEC = g_XMATanEstCoefficients1; + __m128 vConstantsB = XM_PERMUTE_PS(AEC, _MM_SHUFFLE(3, 3, 3, 3)); + __m128 vConstants = XM_PERMUTE_PS(AEC, _MM_SHUFFLE(2, 2, 2, 2)); + __m128 Result = XM_FMADD_PS(vConstantsB, x2, vConstants); + + vConstants = XM_PERMUTE_PS(AEC, _MM_SHUFFLE(1, 1, 1, 1)); + Result = XM_FMADD_PS(Result, x2, vConstants); + + vConstants = XM_PERMUTE_PS(AEC, _MM_SHUFFLE(0, 0, 0, 0)); + Result = XM_FMADD_PS(Result, x2, vConstants); + // ATanEstCoefficients0 is already splatted + Result = XM_FMADD_PS(Result, x2, g_XMATanEstCoefficients0); + Result = _mm_mul_ps(Result, x); + __m128 result1 = _mm_mul_ps(sign, g_XMHalfPi); + result1 = _mm_sub_ps(result1, Result); + + comp = _mm_cmpeq_ps(sign, g_XMZero); + select0 = _mm_and_ps(comp, Result); + select1 = _mm_andnot_ps(comp, result1); + Result = _mm_or_ps(select0, select1); + return Result; +#endif +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMVectorATan2Est +( + FXMVECTOR Y, + FXMVECTOR X +) noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + XMVECTORF32 Result = { { { + atan2f(Y.vector4_f32[0], X.vector4_f32[0]), + atan2f(Y.vector4_f32[1], X.vector4_f32[1]), + atan2f(Y.vector4_f32[2], X.vector4_f32[2]), + atan2f(Y.vector4_f32[3], X.vector4_f32[3]), + } } }; + return Result.v; +#else + + static const XMVECTORF32 ATan2Constants = { { { XM_PI, XM_PIDIV2, XM_PIDIV4, 2.3561944905f /* Pi*3/4 */ } } }; + + const XMVECTOR Zero = XMVectorZero(); + XMVECTOR ATanResultValid = XMVectorTrueInt(); + + XMVECTOR Pi = XMVectorSplatX(ATan2Constants); + XMVECTOR PiOverTwo = XMVectorSplatY(ATan2Constants); + XMVECTOR PiOverFour = XMVectorSplatZ(ATan2Constants); + XMVECTOR ThreePiOverFour = XMVectorSplatW(ATan2Constants); + + XMVECTOR YEqualsZero = XMVectorEqual(Y, Zero); + XMVECTOR XEqualsZero = XMVectorEqual(X, Zero); + XMVECTOR XIsPositive = XMVectorAndInt(X, g_XMNegativeZero.v); + XIsPositive = XMVectorEqualInt(XIsPositive, Zero); + XMVECTOR YEqualsInfinity = XMVectorIsInfinite(Y); + XMVECTOR XEqualsInfinity = XMVectorIsInfinite(X); + + XMVECTOR YSign = XMVectorAndInt(Y, g_XMNegativeZero.v); + Pi = XMVectorOrInt(Pi, YSign); + PiOverTwo = XMVectorOrInt(PiOverTwo, YSign); + PiOverFour = XMVectorOrInt(PiOverFour, YSign); + ThreePiOverFour = XMVectorOrInt(ThreePiOverFour, YSign); + + XMVECTOR R1 = XMVectorSelect(Pi, YSign, XIsPositive); + XMVECTOR R2 = XMVectorSelect(ATanResultValid, PiOverTwo, XEqualsZero); + XMVECTOR R3 = XMVectorSelect(R2, R1, YEqualsZero); + XMVECTOR R4 = XMVectorSelect(ThreePiOverFour, PiOverFour, XIsPositive); + XMVECTOR R5 = XMVectorSelect(PiOverTwo, R4, XEqualsInfinity); + XMVECTOR Result = XMVectorSelect(R3, R5, YEqualsInfinity); + ATanResultValid = XMVectorEqualInt(Result, ATanResultValid); + + XMVECTOR Reciprocal = XMVectorReciprocalEst(X); + XMVECTOR V = XMVectorMultiply(Y, Reciprocal); + XMVECTOR R0 = XMVectorATanEst(V); + + R1 = XMVectorSelect(Pi, g_XMNegativeZero, XIsPositive); + R2 = XMVectorAdd(R0, R1); + + Result = XMVectorSelect(Result, R2, ATanResultValid); + + return Result; + +#endif +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMVectorLerp +( + FXMVECTOR V0, + FXMVECTOR V1, + float t +) noexcept +{ + // V0 + t * (V1 - V0) + +#if defined(_XM_NO_INTRINSICS_) + + XMVECTOR Scale = XMVectorReplicate(t); + XMVECTOR Length = XMVectorSubtract(V1, V0); + return XMVectorMultiplyAdd(Length, Scale, V0); + +#elif defined(_XM_ARM_NEON_INTRINSICS_) + XMVECTOR L = vsubq_f32(V1, V0); + return vmlaq_n_f32(V0, L, t); +#elif defined(_XM_SSE_INTRINSICS_) + XMVECTOR L = _mm_sub_ps(V1, V0); + XMVECTOR S = _mm_set_ps1(t); + return XM_FMADD_PS(L, S, V0); +#endif +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMVectorLerpV +( + FXMVECTOR V0, + FXMVECTOR V1, + FXMVECTOR T +) noexcept +{ + // V0 + T * (V1 - V0) + +#if defined(_XM_NO_INTRINSICS_) + + XMVECTOR Length = XMVectorSubtract(V1, V0); + return XMVectorMultiplyAdd(Length, T, V0); + +#elif defined(_XM_ARM_NEON_INTRINSICS_) + XMVECTOR L = vsubq_f32(V1, V0); + return vmlaq_f32(V0, L, T); +#elif defined(_XM_SSE_INTRINSICS_) + XMVECTOR Length = _mm_sub_ps(V1, V0); + return XM_FMADD_PS(Length, T, V0); +#endif +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMVectorHermite +( + FXMVECTOR Position0, + FXMVECTOR Tangent0, + FXMVECTOR Position1, + GXMVECTOR Tangent1, + float t +) noexcept +{ + // Result = (2 * t^3 - 3 * t^2 + 1) * Position0 + + // (t^3 - 2 * t^2 + t) * Tangent0 + + // (-2 * t^3 + 3 * t^2) * Position1 + + // (t^3 - t^2) * Tangent1 + +#if defined(_XM_NO_INTRINSICS_) + + float t2 = t * t; + float t3 = t * t2; + + XMVECTOR P0 = XMVectorReplicate(2.0f * t3 - 3.0f * t2 + 1.0f); + XMVECTOR T0 = XMVectorReplicate(t3 - 2.0f * t2 + t); + XMVECTOR P1 = XMVectorReplicate(-2.0f * t3 + 3.0f * t2); + XMVECTOR T1 = XMVectorReplicate(t3 - t2); + + XMVECTOR Result = XMVectorMultiply(P0, Position0); + Result = XMVectorMultiplyAdd(T0, Tangent0, Result); + Result = XMVectorMultiplyAdd(P1, Position1, Result); + Result = XMVectorMultiplyAdd(T1, Tangent1, Result); + + return Result; + +#elif defined(_XM_ARM_NEON_INTRINSICS_) + float t2 = t * t; + float t3 = t * t2; + + float p0 = 2.0f * t3 - 3.0f * t2 + 1.0f; + float t0 = t3 - 2.0f * t2 + t; + float p1 = -2.0f * t3 + 3.0f * t2; + float t1 = t3 - t2; + + XMVECTOR vResult = vmulq_n_f32(Position0, p0); + vResult = vmlaq_n_f32(vResult, Tangent0, t0); + vResult = vmlaq_n_f32(vResult, Position1, p1); + vResult = vmlaq_n_f32(vResult, Tangent1, t1); + return vResult; +#elif defined(_XM_SSE_INTRINSICS_) + float t2 = t * t; + float t3 = t * t2; + + XMVECTOR P0 = _mm_set_ps1(2.0f * t3 - 3.0f * t2 + 1.0f); + XMVECTOR T0 = _mm_set_ps1(t3 - 2.0f * t2 + t); + XMVECTOR P1 = _mm_set_ps1(-2.0f * t3 + 3.0f * t2); + XMVECTOR T1 = _mm_set_ps1(t3 - t2); + + XMVECTOR vResult = _mm_mul_ps(P0, Position0); + vResult = XM_FMADD_PS(Tangent0, T0, vResult); + vResult = XM_FMADD_PS(Position1, P1, vResult); + vResult = XM_FMADD_PS(Tangent1, T1, vResult); + return vResult; +#endif +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMVectorHermiteV +( + FXMVECTOR Position0, + FXMVECTOR Tangent0, + FXMVECTOR Position1, + GXMVECTOR Tangent1, + HXMVECTOR T +) noexcept +{ + // Result = (2 * t^3 - 3 * t^2 + 1) * Position0 + + // (t^3 - 2 * t^2 + t) * Tangent0 + + // (-2 * t^3 + 3 * t^2) * Position1 + + // (t^3 - t^2) * Tangent1 + +#if defined(_XM_NO_INTRINSICS_) + + XMVECTOR T2 = XMVectorMultiply(T, T); + XMVECTOR T3 = XMVectorMultiply(T, T2); + + XMVECTOR P0 = XMVectorReplicate(2.0f * T3.vector4_f32[0] - 3.0f * T2.vector4_f32[0] + 1.0f); + XMVECTOR T0 = XMVectorReplicate(T3.vector4_f32[1] - 2.0f * T2.vector4_f32[1] + T.vector4_f32[1]); + XMVECTOR P1 = XMVectorReplicate(-2.0f * T3.vector4_f32[2] + 3.0f * T2.vector4_f32[2]); + XMVECTOR T1 = XMVectorReplicate(T3.vector4_f32[3] - T2.vector4_f32[3]); + + XMVECTOR Result = XMVectorMultiply(P0, Position0); + Result = XMVectorMultiplyAdd(T0, Tangent0, Result); + Result = XMVectorMultiplyAdd(P1, Position1, Result); + Result = XMVectorMultiplyAdd(T1, Tangent1, Result); + + return Result; + +#elif defined(_XM_ARM_NEON_INTRINSICS_) + static const XMVECTORF32 CatMulT2 = { { { -3.0f, -2.0f, 3.0f, -1.0f } } }; + static const XMVECTORF32 CatMulT3 = { { { 2.0f, 1.0f, -2.0f, 1.0f } } }; + + XMVECTOR T2 = vmulq_f32(T, T); + XMVECTOR T3 = vmulq_f32(T, T2); + // Mul by the constants against t^2 + T2 = vmulq_f32(T2, CatMulT2); + // Mul by the constants against t^3 + T3 = vmlaq_f32(T2, T3, CatMulT3); + // T3 now has the pre-result. + // I need to add t.y only + T2 = vandq_u32(T, g_XMMaskY); + T3 = vaddq_f32(T3, T2); + // Add 1.0f to x + T3 = vaddq_f32(T3, g_XMIdentityR0); + // Now, I have the constants created + // Mul the x constant to Position0 + XMVECTOR vResult = vmulq_lane_f32(Position0, vget_low_f32(T3), 0); // T3[0] + // Mul the y constant to Tangent0 + vResult = vmlaq_lane_f32(vResult, Tangent0, vget_low_f32(T3), 1); // T3[1] + // Mul the z constant to Position1 + vResult = vmlaq_lane_f32(vResult, Position1, vget_high_f32(T3), 0); // T3[2] + // Mul the w constant to Tangent1 + vResult = vmlaq_lane_f32(vResult, Tangent1, vget_high_f32(T3), 1); // T3[3] + return vResult; +#elif defined(_XM_SSE_INTRINSICS_) + static const XMVECTORF32 CatMulT2 = { { { -3.0f, -2.0f, 3.0f, -1.0f } } }; + static const XMVECTORF32 CatMulT3 = { { { 2.0f, 1.0f, -2.0f, 1.0f } } }; + + XMVECTOR T2 = _mm_mul_ps(T, T); + XMVECTOR T3 = _mm_mul_ps(T, T2); + // Mul by the constants against t^2 + T2 = _mm_mul_ps(T2, CatMulT2); + // Mul by the constants against t^3 + T3 = XM_FMADD_PS(T3, CatMulT3, T2); + // T3 now has the pre-result. + // I need to add t.y only + T2 = _mm_and_ps(T, g_XMMaskY); + T3 = _mm_add_ps(T3, T2); + // Add 1.0f to x + T3 = _mm_add_ps(T3, g_XMIdentityR0); + // Now, I have the constants created + // Mul the x constant to Position0 + XMVECTOR vResult = XM_PERMUTE_PS(T3, _MM_SHUFFLE(0, 0, 0, 0)); + vResult = _mm_mul_ps(vResult, Position0); + // Mul the y constant to Tangent0 + T2 = XM_PERMUTE_PS(T3, _MM_SHUFFLE(1, 1, 1, 1)); + vResult = XM_FMADD_PS(T2, Tangent0, vResult); + // Mul the z constant to Position1 + T2 = XM_PERMUTE_PS(T3, _MM_SHUFFLE(2, 2, 2, 2)); + vResult = XM_FMADD_PS(T2, Position1, vResult); + // Mul the w constant to Tangent1 + T3 = XM_PERMUTE_PS(T3, _MM_SHUFFLE(3, 3, 3, 3)); + vResult = XM_FMADD_PS(T3, Tangent1, vResult); + return vResult; +#endif +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMVectorCatmullRom +( + FXMVECTOR Position0, + FXMVECTOR Position1, + FXMVECTOR Position2, + GXMVECTOR Position3, + float t +) noexcept +{ + // Result = ((-t^3 + 2 * t^2 - t) * Position0 + + // (3 * t^3 - 5 * t^2 + 2) * Position1 + + // (-3 * t^3 + 4 * t^2 + t) * Position2 + + // (t^3 - t^2) * Position3) * 0.5 + +#if defined(_XM_NO_INTRINSICS_) + + float t2 = t * t; + float t3 = t * t2; + + XMVECTOR P0 = XMVectorReplicate((-t3 + 2.0f * t2 - t) * 0.5f); + XMVECTOR P1 = XMVectorReplicate((3.0f * t3 - 5.0f * t2 + 2.0f) * 0.5f); + XMVECTOR P2 = XMVectorReplicate((-3.0f * t3 + 4.0f * t2 + t) * 0.5f); + XMVECTOR P3 = XMVectorReplicate((t3 - t2) * 0.5f); + + XMVECTOR Result = XMVectorMultiply(P0, Position0); + Result = XMVectorMultiplyAdd(P1, Position1, Result); + Result = XMVectorMultiplyAdd(P2, Position2, Result); + Result = XMVectorMultiplyAdd(P3, Position3, Result); + + return Result; + +#elif defined(_XM_ARM_NEON_INTRINSICS_) + float t2 = t * t; + float t3 = t * t2; + + float p0 = (-t3 + 2.0f * t2 - t) * 0.5f; + float p1 = (3.0f * t3 - 5.0f * t2 + 2.0f) * 0.5f; + float p2 = (-3.0f * t3 + 4.0f * t2 + t) * 0.5f; + float p3 = (t3 - t2) * 0.5f; + + XMVECTOR P1 = vmulq_n_f32(Position1, p1); + XMVECTOR P0 = vmlaq_n_f32(P1, Position0, p0); + XMVECTOR P3 = vmulq_n_f32(Position3, p3); + XMVECTOR P2 = vmlaq_n_f32(P3, Position2, p2); + P0 = vaddq_f32(P0, P2); + return P0; +#elif defined(_XM_SSE_INTRINSICS_) + float t2 = t * t; + float t3 = t * t2; + + XMVECTOR P0 = _mm_set_ps1((-t3 + 2.0f * t2 - t) * 0.5f); + XMVECTOR P1 = _mm_set_ps1((3.0f * t3 - 5.0f * t2 + 2.0f) * 0.5f); + XMVECTOR P2 = _mm_set_ps1((-3.0f * t3 + 4.0f * t2 + t) * 0.5f); + XMVECTOR P3 = _mm_set_ps1((t3 - t2) * 0.5f); + + P1 = _mm_mul_ps(Position1, P1); + P0 = XM_FMADD_PS(Position0, P0, P1); + P3 = _mm_mul_ps(Position3, P3); + P2 = XM_FMADD_PS(Position2, P2, P3); + P0 = _mm_add_ps(P0, P2); + return P0; +#endif +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMVectorCatmullRomV +( + FXMVECTOR Position0, + FXMVECTOR Position1, + FXMVECTOR Position2, + GXMVECTOR Position3, + HXMVECTOR T +) noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + float fx = T.vector4_f32[0]; + float fy = T.vector4_f32[1]; + float fz = T.vector4_f32[2]; + float fw = T.vector4_f32[3]; + XMVECTORF32 vResult = { { { + 0.5f * ((-fx * fx * fx + 2 * fx * fx - fx) * Position0.vector4_f32[0] + + (3 * fx * fx * fx - 5 * fx * fx + 2) * Position1.vector4_f32[0] + + (-3 * fx * fx * fx + 4 * fx * fx + fx) * Position2.vector4_f32[0] + + (fx * fx * fx - fx * fx) * Position3.vector4_f32[0]), + + 0.5f * ((-fy * fy * fy + 2 * fy * fy - fy) * Position0.vector4_f32[1] + + (3 * fy * fy * fy - 5 * fy * fy + 2) * Position1.vector4_f32[1] + + (-3 * fy * fy * fy + 4 * fy * fy + fy) * Position2.vector4_f32[1] + + (fy * fy * fy - fy * fy) * Position3.vector4_f32[1]), + + 0.5f * ((-fz * fz * fz + 2 * fz * fz - fz) * Position0.vector4_f32[2] + + (3 * fz * fz * fz - 5 * fz * fz + 2) * Position1.vector4_f32[2] + + (-3 * fz * fz * fz + 4 * fz * fz + fz) * Position2.vector4_f32[2] + + (fz * fz * fz - fz * fz) * Position3.vector4_f32[2]), + + 0.5f * ((-fw * fw * fw + 2 * fw * fw - fw) * Position0.vector4_f32[3] + + (3 * fw * fw * fw - 5 * fw * fw + 2) * Position1.vector4_f32[3] + + (-3 * fw * fw * fw + 4 * fw * fw + fw) * Position2.vector4_f32[3] + + (fw * fw * fw - fw * fw) * Position3.vector4_f32[3]) + } } }; + return vResult.v; +#elif defined(_XM_ARM_NEON_INTRINSICS_) + static const XMVECTORF32 Catmul2 = { { { 2.0f, 2.0f, 2.0f, 2.0f } } }; + static const XMVECTORF32 Catmul3 = { { { 3.0f, 3.0f, 3.0f, 3.0f } } }; + static const XMVECTORF32 Catmul4 = { { { 4.0f, 4.0f, 4.0f, 4.0f } } }; + static const XMVECTORF32 Catmul5 = { { { 5.0f, 5.0f, 5.0f, 5.0f } } }; + // Cache T^2 and T^3 + XMVECTOR T2 = vmulq_f32(T, T); + XMVECTOR T3 = vmulq_f32(T, T2); + // Perform the Position0 term + XMVECTOR vResult = vaddq_f32(T2, T2); + vResult = vsubq_f32(vResult, T); + vResult = vsubq_f32(vResult, T3); + vResult = vmulq_f32(vResult, Position0); + // Perform the Position1 term and add + XMVECTOR vTemp = vmulq_f32(T3, Catmul3); + vTemp = vmlsq_f32(vTemp, T2, Catmul5); + vTemp = vaddq_f32(vTemp, Catmul2); + vResult = vmlaq_f32(vResult, vTemp, Position1); + // Perform the Position2 term and add + vTemp = vmulq_f32(T2, Catmul4); + vTemp = vmlsq_f32(vTemp, T3, Catmul3); + vTemp = vaddq_f32(vTemp, T); + vResult = vmlaq_f32(vResult, vTemp, Position2); + // Position3 is the last term + T3 = vsubq_f32(T3, T2); + vResult = vmlaq_f32(vResult, T3, Position3); + // Multiply by 0.5f and exit + vResult = vmulq_f32(vResult, g_XMOneHalf); + return vResult; +#elif defined(_XM_SSE_INTRINSICS_) + static const XMVECTORF32 Catmul2 = { { { 2.0f, 2.0f, 2.0f, 2.0f } } }; + static const XMVECTORF32 Catmul3 = { { { 3.0f, 3.0f, 3.0f, 3.0f } } }; + static const XMVECTORF32 Catmul4 = { { { 4.0f, 4.0f, 4.0f, 4.0f } } }; + static const XMVECTORF32 Catmul5 = { { { 5.0f, 5.0f, 5.0f, 5.0f } } }; + // Cache T^2 and T^3 + XMVECTOR T2 = _mm_mul_ps(T, T); + XMVECTOR T3 = _mm_mul_ps(T, T2); + // Perform the Position0 term + XMVECTOR vResult = _mm_add_ps(T2, T2); + vResult = _mm_sub_ps(vResult, T); + vResult = _mm_sub_ps(vResult, T3); + vResult = _mm_mul_ps(vResult, Position0); + // Perform the Position1 term and add + XMVECTOR vTemp = _mm_mul_ps(T3, Catmul3); + vTemp = XM_FNMADD_PS(T2, Catmul5, vTemp); + vTemp = _mm_add_ps(vTemp, Catmul2); + vResult = XM_FMADD_PS(vTemp, Position1, vResult); + // Perform the Position2 term and add + vTemp = _mm_mul_ps(T2, Catmul4); + vTemp = XM_FNMADD_PS(T3, Catmul3, vTemp); + vTemp = _mm_add_ps(vTemp, T); + vResult = XM_FMADD_PS(vTemp, Position2, vResult); + // Position3 is the last term + T3 = _mm_sub_ps(T3, T2); + vResult = XM_FMADD_PS(T3, Position3, vResult); + // Multiply by 0.5f and exit + vResult = _mm_mul_ps(vResult, g_XMOneHalf); + return vResult; +#endif +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMVectorBaryCentric +( + FXMVECTOR Position0, + FXMVECTOR Position1, + FXMVECTOR Position2, + float f, + float g +) noexcept +{ + // Result = Position0 + f * (Position1 - Position0) + g * (Position2 - Position0) + +#if defined(_XM_NO_INTRINSICS_) + + XMVECTOR P10 = XMVectorSubtract(Position1, Position0); + XMVECTOR ScaleF = XMVectorReplicate(f); + + XMVECTOR P20 = XMVectorSubtract(Position2, Position0); + XMVECTOR ScaleG = XMVectorReplicate(g); + + XMVECTOR Result = XMVectorMultiplyAdd(P10, ScaleF, Position0); + Result = XMVectorMultiplyAdd(P20, ScaleG, Result); + + return Result; + +#elif defined(_XM_ARM_NEON_INTRINSICS_) + XMVECTOR R1 = vsubq_f32(Position1, Position0); + XMVECTOR R2 = vsubq_f32(Position2, Position0); + R1 = vmlaq_n_f32(Position0, R1, f); + return vmlaq_n_f32(R1, R2, g); +#elif defined(_XM_SSE_INTRINSICS_) + XMVECTOR R1 = _mm_sub_ps(Position1, Position0); + XMVECTOR R2 = _mm_sub_ps(Position2, Position0); + XMVECTOR SF = _mm_set_ps1(f); + R1 = XM_FMADD_PS(R1, SF, Position0); + XMVECTOR SG = _mm_set_ps1(g); + return XM_FMADD_PS(R2, SG, R1); +#endif +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMVectorBaryCentricV +( + FXMVECTOR Position0, + FXMVECTOR Position1, + FXMVECTOR Position2, + GXMVECTOR F, + HXMVECTOR G +) noexcept +{ + // Result = Position0 + f * (Position1 - Position0) + g * (Position2 - Position0) + +#if defined(_XM_NO_INTRINSICS_) + + XMVECTOR P10 = XMVectorSubtract(Position1, Position0); + XMVECTOR P20 = XMVectorSubtract(Position2, Position0); + + XMVECTOR Result = XMVectorMultiplyAdd(P10, F, Position0); + Result = XMVectorMultiplyAdd(P20, G, Result); + + return Result; + +#elif defined(_XM_ARM_NEON_INTRINSICS_) + XMVECTOR R1 = vsubq_f32(Position1, Position0); + XMVECTOR R2 = vsubq_f32(Position2, Position0); + R1 = vmlaq_f32(Position0, R1, F); + return vmlaq_f32(R1, R2, G); +#elif defined(_XM_SSE_INTRINSICS_) + XMVECTOR R1 = _mm_sub_ps(Position1, Position0); + XMVECTOR R2 = _mm_sub_ps(Position2, Position0); + R1 = XM_FMADD_PS(R1, F, Position0); + return XM_FMADD_PS(R2, G, R1); +#endif +} + +/**************************************************************************** + * + * 2D Vector + * + ****************************************************************************/ + + //------------------------------------------------------------------------------ + // Comparison operations + //------------------------------------------------------------------------------ + + //------------------------------------------------------------------------------ + +inline bool XM_CALLCONV XMVector2Equal +( + FXMVECTOR V1, + FXMVECTOR V2 +) noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + return (((V1.vector4_f32[0] == V2.vector4_f32[0]) && (V1.vector4_f32[1] == V2.vector4_f32[1])) != 0); +#elif defined(_XM_ARM_NEON_INTRINSICS_) + uint32x2_t vTemp = vceq_f32(vget_low_f32(V1), vget_low_f32(V2)); + return (vget_lane_u64(vTemp, 0) == 0xFFFFFFFFFFFFFFFFU); +#elif defined(_XM_SSE_INTRINSICS_) + XMVECTOR vTemp = _mm_cmpeq_ps(V1, V2); + // z and w are don't care + return (((_mm_movemask_ps(vTemp) & 3) == 3) != 0); +#endif +} + + +//------------------------------------------------------------------------------ + +inline uint32_t XM_CALLCONV XMVector2EqualR +( + FXMVECTOR V1, + FXMVECTOR V2 +) noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + + uint32_t CR = 0; + if ((V1.vector4_f32[0] == V2.vector4_f32[0]) && + (V1.vector4_f32[1] == V2.vector4_f32[1])) + { + CR = XM_CRMASK_CR6TRUE; + } + else if ((V1.vector4_f32[0] != V2.vector4_f32[0]) && + (V1.vector4_f32[1] != V2.vector4_f32[1])) + { + CR = XM_CRMASK_CR6FALSE; + } + return CR; + +#elif defined(_XM_ARM_NEON_INTRINSICS_) + uint32x2_t vTemp = vceq_f32(vget_low_f32(V1), vget_low_f32(V2)); + uint64_t r = vget_lane_u64(vTemp, 0); + uint32_t CR = 0; + if (r == 0xFFFFFFFFFFFFFFFFU) + { + CR = XM_CRMASK_CR6TRUE; + } + else if (!r) + { + CR = XM_CRMASK_CR6FALSE; + } + return CR; +#elif defined(_XM_SSE_INTRINSICS_) + XMVECTOR vTemp = _mm_cmpeq_ps(V1, V2); + // z and w are don't care + int iTest = _mm_movemask_ps(vTemp) & 3; + uint32_t CR = 0; + if (iTest == 3) + { + CR = XM_CRMASK_CR6TRUE; + } + else if (!iTest) + { + CR = XM_CRMASK_CR6FALSE; + } + return CR; +#endif +} + +//------------------------------------------------------------------------------ + +inline bool XM_CALLCONV XMVector2EqualInt +( + FXMVECTOR V1, + FXMVECTOR V2 +) noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + return (((V1.vector4_u32[0] == V2.vector4_u32[0]) && (V1.vector4_u32[1] == V2.vector4_u32[1])) != 0); +#elif defined(_XM_ARM_NEON_INTRINSICS_) + uint32x2_t vTemp = vceq_u32(vget_low_u32(V1), vget_low_u32(V2)); + return (vget_lane_u64(vTemp, 0) == 0xFFFFFFFFFFFFFFFFU); +#elif defined(_XM_SSE_INTRINSICS_) + __m128i vTemp = _mm_cmpeq_epi32(_mm_castps_si128(V1), _mm_castps_si128(V2)); + return (((_mm_movemask_ps(_mm_castsi128_ps(vTemp)) & 3) == 3) != 0); +#endif +} + +//------------------------------------------------------------------------------ + +inline uint32_t XM_CALLCONV XMVector2EqualIntR +( + FXMVECTOR V1, + FXMVECTOR V2 +) noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + + uint32_t CR = 0; + if ((V1.vector4_u32[0] == V2.vector4_u32[0]) && + (V1.vector4_u32[1] == V2.vector4_u32[1])) + { + CR = XM_CRMASK_CR6TRUE; + } + else if ((V1.vector4_u32[0] != V2.vector4_u32[0]) && + (V1.vector4_u32[1] != V2.vector4_u32[1])) + { + CR = XM_CRMASK_CR6FALSE; + } + return CR; + +#elif defined(_XM_ARM_NEON_INTRINSICS_) + uint32x2_t vTemp = vceq_u32(vget_low_u32(V1), vget_low_u32(V2)); + uint64_t r = vget_lane_u64(vTemp, 0); + uint32_t CR = 0; + if (r == 0xFFFFFFFFFFFFFFFFU) + { + CR = XM_CRMASK_CR6TRUE; + } + else if (!r) + { + CR = XM_CRMASK_CR6FALSE; + } + return CR; +#elif defined(_XM_SSE_INTRINSICS_) + __m128i vTemp = _mm_cmpeq_epi32(_mm_castps_si128(V1), _mm_castps_si128(V2)); + int iTest = _mm_movemask_ps(_mm_castsi128_ps(vTemp)) & 3; + uint32_t CR = 0; + if (iTest == 3) + { + CR = XM_CRMASK_CR6TRUE; + } + else if (!iTest) + { + CR = XM_CRMASK_CR6FALSE; + } + return CR; +#endif +} + +//------------------------------------------------------------------------------ + +inline bool XM_CALLCONV XMVector2NearEqual +( + FXMVECTOR V1, + FXMVECTOR V2, + FXMVECTOR Epsilon +) noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + float dx = fabsf(V1.vector4_f32[0] - V2.vector4_f32[0]); + float dy = fabsf(V1.vector4_f32[1] - V2.vector4_f32[1]); + return ((dx <= Epsilon.vector4_f32[0]) && + (dy <= Epsilon.vector4_f32[1])); +#elif defined(_XM_ARM_NEON_INTRINSICS_) + float32x2_t vDelta = vsub_f32(vget_low_u32(V1), vget_low_u32(V2)); +#ifdef _MSC_VER + uint32x2_t vTemp = vacle_f32(vDelta, vget_low_u32(Epsilon)); +#else + uint32x2_t vTemp = vcle_f32(vabs_f32(vDelta), vget_low_u32(Epsilon)); +#endif + uint64_t r = vget_lane_u64(vTemp, 0); + return (r == 0xFFFFFFFFFFFFFFFFU); +#elif defined(_XM_SSE_INTRINSICS_) + // Get the difference + XMVECTOR vDelta = _mm_sub_ps(V1, V2); + // Get the absolute value of the difference + XMVECTOR vTemp = _mm_setzero_ps(); + vTemp = _mm_sub_ps(vTemp, vDelta); + vTemp = _mm_max_ps(vTemp, vDelta); + vTemp = _mm_cmple_ps(vTemp, Epsilon); + // z and w are don't care + return (((_mm_movemask_ps(vTemp) & 3) == 0x3) != 0); +#endif +} + +//------------------------------------------------------------------------------ + +inline bool XM_CALLCONV XMVector2NotEqual +( + FXMVECTOR V1, + FXMVECTOR V2 +) noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + return (((V1.vector4_f32[0] != V2.vector4_f32[0]) || (V1.vector4_f32[1] != V2.vector4_f32[1])) != 0); +#elif defined(_XM_ARM_NEON_INTRINSICS_) + uint32x2_t vTemp = vceq_f32(vget_low_f32(V1), vget_low_f32(V2)); + return (vget_lane_u64(vTemp, 0) != 0xFFFFFFFFFFFFFFFFU); +#elif defined(_XM_SSE_INTRINSICS_) + XMVECTOR vTemp = _mm_cmpeq_ps(V1, V2); + // z and w are don't care + return (((_mm_movemask_ps(vTemp) & 3) != 3) != 0); +#endif +} + +//------------------------------------------------------------------------------ + +inline bool XM_CALLCONV XMVector2NotEqualInt +( + FXMVECTOR V1, + FXMVECTOR V2 +) noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + return (((V1.vector4_u32[0] != V2.vector4_u32[0]) || (V1.vector4_u32[1] != V2.vector4_u32[1])) != 0); +#elif defined(_XM_ARM_NEON_INTRINSICS_) + uint32x2_t vTemp = vceq_u32(vget_low_u32(V1), vget_low_u32(V2)); + return (vget_lane_u64(vTemp, 0) != 0xFFFFFFFFFFFFFFFFU); +#elif defined(_XM_SSE_INTRINSICS_) + __m128i vTemp = _mm_cmpeq_epi32(_mm_castps_si128(V1), _mm_castps_si128(V2)); + return (((_mm_movemask_ps(_mm_castsi128_ps(vTemp)) & 3) != 3) != 0); +#endif +} + +//------------------------------------------------------------------------------ + +inline bool XM_CALLCONV XMVector2Greater +( + FXMVECTOR V1, + FXMVECTOR V2 +) noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + return (((V1.vector4_f32[0] > V2.vector4_f32[0]) && (V1.vector4_f32[1] > V2.vector4_f32[1])) != 0); +#elif defined(_XM_ARM_NEON_INTRINSICS_) + uint32x2_t vTemp = vcgt_f32(vget_low_f32(V1), vget_low_f32(V2)); + return (vget_lane_u64(vTemp, 0) == 0xFFFFFFFFFFFFFFFFU); +#elif defined(_XM_SSE_INTRINSICS_) + XMVECTOR vTemp = _mm_cmpgt_ps(V1, V2); + // z and w are don't care + return (((_mm_movemask_ps(vTemp) & 3) == 3) != 0); +#endif +} + +//------------------------------------------------------------------------------ + +inline uint32_t XM_CALLCONV XMVector2GreaterR +( + FXMVECTOR V1, + FXMVECTOR V2 +) noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + + uint32_t CR = 0; + if ((V1.vector4_f32[0] > V2.vector4_f32[0]) && + (V1.vector4_f32[1] > V2.vector4_f32[1])) + { + CR = XM_CRMASK_CR6TRUE; + } + else if ((V1.vector4_f32[0] <= V2.vector4_f32[0]) && + (V1.vector4_f32[1] <= V2.vector4_f32[1])) + { + CR = XM_CRMASK_CR6FALSE; + } + return CR; + +#elif defined(_XM_ARM_NEON_INTRINSICS_) + uint32x2_t vTemp = vcgt_f32(vget_low_f32(V1), vget_low_f32(V2)); + uint64_t r = vget_lane_u64(vTemp, 0); + uint32_t CR = 0; + if (r == 0xFFFFFFFFFFFFFFFFU) + { + CR = XM_CRMASK_CR6TRUE; + } + else if (!r) + { + CR = XM_CRMASK_CR6FALSE; + } + return CR; +#elif defined(_XM_SSE_INTRINSICS_) + XMVECTOR vTemp = _mm_cmpgt_ps(V1, V2); + int iTest = _mm_movemask_ps(vTemp) & 3; + uint32_t CR = 0; + if (iTest == 3) + { + CR = XM_CRMASK_CR6TRUE; + } + else if (!iTest) + { + CR = XM_CRMASK_CR6FALSE; + } + return CR; +#endif +} + +//------------------------------------------------------------------------------ + +inline bool XM_CALLCONV XMVector2GreaterOrEqual +( + FXMVECTOR V1, + FXMVECTOR V2 +) noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + return (((V1.vector4_f32[0] >= V2.vector4_f32[0]) && (V1.vector4_f32[1] >= V2.vector4_f32[1])) != 0); +#elif defined(_XM_ARM_NEON_INTRINSICS_) + uint32x2_t vTemp = vcge_f32(vget_low_f32(V1), vget_low_f32(V2)); + return (vget_lane_u64(vTemp, 0) == 0xFFFFFFFFFFFFFFFFU); +#elif defined(_XM_SSE_INTRINSICS_) + XMVECTOR vTemp = _mm_cmpge_ps(V1, V2); + return (((_mm_movemask_ps(vTemp) & 3) == 3) != 0); +#endif +} + +//------------------------------------------------------------------------------ + +inline uint32_t XM_CALLCONV XMVector2GreaterOrEqualR +( + FXMVECTOR V1, + FXMVECTOR V2 +) noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + + uint32_t CR = 0; + if ((V1.vector4_f32[0] >= V2.vector4_f32[0]) && + (V1.vector4_f32[1] >= V2.vector4_f32[1])) + { + CR = XM_CRMASK_CR6TRUE; + } + else if ((V1.vector4_f32[0] < V2.vector4_f32[0]) && + (V1.vector4_f32[1] < V2.vector4_f32[1])) + { + CR = XM_CRMASK_CR6FALSE; + } + return CR; + +#elif defined(_XM_ARM_NEON_INTRINSICS_) + uint32x2_t vTemp = vcge_f32(vget_low_f32(V1), vget_low_f32(V2)); + uint64_t r = vget_lane_u64(vTemp, 0); + uint32_t CR = 0; + if (r == 0xFFFFFFFFFFFFFFFFU) + { + CR = XM_CRMASK_CR6TRUE; + } + else if (!r) + { + CR = XM_CRMASK_CR6FALSE; + } + return CR; +#elif defined(_XM_SSE_INTRINSICS_) + XMVECTOR vTemp = _mm_cmpge_ps(V1, V2); + int iTest = _mm_movemask_ps(vTemp) & 3; + uint32_t CR = 0; + if (iTest == 3) + { + CR = XM_CRMASK_CR6TRUE; + } + else if (!iTest) + { + CR = XM_CRMASK_CR6FALSE; + } + return CR; +#endif +} + +//------------------------------------------------------------------------------ + +inline bool XM_CALLCONV XMVector2Less +( + FXMVECTOR V1, + FXMVECTOR V2 +) noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + return (((V1.vector4_f32[0] < V2.vector4_f32[0]) && (V1.vector4_f32[1] < V2.vector4_f32[1])) != 0); +#elif defined(_XM_ARM_NEON_INTRINSICS_) + uint32x2_t vTemp = vclt_f32(vget_low_f32(V1), vget_low_f32(V2)); + return (vget_lane_u64(vTemp, 0) == 0xFFFFFFFFFFFFFFFFU); +#elif defined(_XM_SSE_INTRINSICS_) + XMVECTOR vTemp = _mm_cmplt_ps(V1, V2); + return (((_mm_movemask_ps(vTemp) & 3) == 3) != 0); +#endif +} + +//------------------------------------------------------------------------------ + +inline bool XM_CALLCONV XMVector2LessOrEqual +( + FXMVECTOR V1, + FXMVECTOR V2 +) noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + return (((V1.vector4_f32[0] <= V2.vector4_f32[0]) && (V1.vector4_f32[1] <= V2.vector4_f32[1])) != 0); +#elif defined(_XM_ARM_NEON_INTRINSICS_) + uint32x2_t vTemp = vcle_f32(vget_low_f32(V1), vget_low_f32(V2)); + return (vget_lane_u64(vTemp, 0) == 0xFFFFFFFFFFFFFFFFU); +#elif defined(_XM_SSE_INTRINSICS_) + XMVECTOR vTemp = _mm_cmple_ps(V1, V2); + return (((_mm_movemask_ps(vTemp) & 3) == 3) != 0); +#endif +} + +//------------------------------------------------------------------------------ + +inline bool XM_CALLCONV XMVector2InBounds +( + FXMVECTOR V, + FXMVECTOR Bounds +) noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + return (((V.vector4_f32[0] <= Bounds.vector4_f32[0] && V.vector4_f32[0] >= -Bounds.vector4_f32[0]) && + (V.vector4_f32[1] <= Bounds.vector4_f32[1] && V.vector4_f32[1] >= -Bounds.vector4_f32[1])) != 0); +#elif defined(_XM_ARM_NEON_INTRINSICS_) + float32x2_t VL = vget_low_f32(V); + float32x2_t B = vget_low_f32(Bounds); + // Test if less than or equal + uint32x2_t ivTemp1 = vcle_f32(VL, B); + // Negate the bounds + float32x2_t vTemp2 = vneg_f32(B); + // Test if greater or equal (Reversed) + uint32x2_t ivTemp2 = vcle_f32(vTemp2, VL); + // Blend answers + ivTemp1 = vand_u32(ivTemp1, ivTemp2); + // x and y in bounds? + return (vget_lane_u64(ivTemp1, 0) == 0xFFFFFFFFFFFFFFFFU); +#elif defined(_XM_SSE_INTRINSICS_) + // Test if less than or equal + XMVECTOR vTemp1 = _mm_cmple_ps(V, Bounds); + // Negate the bounds + XMVECTOR vTemp2 = _mm_mul_ps(Bounds, g_XMNegativeOne); + // Test if greater or equal (Reversed) + vTemp2 = _mm_cmple_ps(vTemp2, V); + // Blend answers + vTemp1 = _mm_and_ps(vTemp1, vTemp2); + // x and y in bounds? (z and w are don't care) + return (((_mm_movemask_ps(vTemp1) & 0x3) == 0x3) != 0); +#endif +} + +//------------------------------------------------------------------------------ + +#if !defined(_XM_NO_INTRINSICS_) && defined(_MSC_VER) && !defined(__clang__) && !defined(__INTEL_COMPILER) +#pragma float_control(push) +#pragma float_control(precise, on) +#endif + +inline bool XM_CALLCONV XMVector2IsNaN(FXMVECTOR V) noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + return (XMISNAN(V.vector4_f32[0]) || + XMISNAN(V.vector4_f32[1])); +#elif defined(_XM_ARM_NEON_INTRINSICS_) + float32x2_t VL = vget_low_f32(V); + // Test against itself. NaN is always not equal + uint32x2_t vTempNan = vceq_f32(VL, VL); + // If x or y are NaN, the mask is zero + return (vget_lane_u64(vTempNan, 0) != 0xFFFFFFFFFFFFFFFFU); +#elif defined(_XM_SSE_INTRINSICS_) + // Test against itself. NaN is always not equal + XMVECTOR vTempNan = _mm_cmpneq_ps(V, V); + // If x or y are NaN, the mask is non-zero + return ((_mm_movemask_ps(vTempNan) & 3) != 0); +#endif +} + +#if !defined(_XM_NO_INTRINSICS_) && defined(_MSC_VER) && !defined(__clang__) && !defined(__INTEL_COMPILER) +#pragma float_control(pop) +#endif + +//------------------------------------------------------------------------------ + +inline bool XM_CALLCONV XMVector2IsInfinite(FXMVECTOR V) noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + + return (XMISINF(V.vector4_f32[0]) || + XMISINF(V.vector4_f32[1])); +#elif defined(_XM_ARM_NEON_INTRINSICS_) + // Mask off the sign bit + uint32x2_t vTemp = vand_u32(vget_low_f32(V), vget_low_f32(g_XMAbsMask)); + // Compare to infinity + vTemp = vceq_f32(vTemp, vget_low_f32(g_XMInfinity)); + // If any are infinity, the signs are true. + return vget_lane_u64(vTemp, 0) != 0; +#elif defined(_XM_SSE_INTRINSICS_) + // Mask off the sign bit + __m128 vTemp = _mm_and_ps(V, g_XMAbsMask); + // Compare to infinity + vTemp = _mm_cmpeq_ps(vTemp, g_XMInfinity); + // If x or z are infinity, the signs are true. + return ((_mm_movemask_ps(vTemp) & 3) != 0); +#endif +} + +//------------------------------------------------------------------------------ +// Computation operations +//------------------------------------------------------------------------------ + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMVector2Dot +( + FXMVECTOR V1, + FXMVECTOR V2 +) noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + + XMVECTORF32 Result; + Result.f[0] = + Result.f[1] = + Result.f[2] = + Result.f[3] = V1.vector4_f32[0] * V2.vector4_f32[0] + V1.vector4_f32[1] * V2.vector4_f32[1]; + return Result.v; + +#elif defined(_XM_ARM_NEON_INTRINSICS_) + // Perform the dot product on x and y + float32x2_t vTemp = vmul_f32(vget_low_f32(V1), vget_low_f32(V2)); + vTemp = vpadd_f32(vTemp, vTemp); + return vcombine_f32(vTemp, vTemp); +#elif defined(_XM_SSE4_INTRINSICS_) + return _mm_dp_ps(V1, V2, 0x3f); +#elif defined(_XM_SSE3_INTRINSICS_) + XMVECTOR vDot = _mm_mul_ps(V1, V2); + vDot = _mm_hadd_ps(vDot, vDot); + vDot = _mm_moveldup_ps(vDot); + return vDot; +#elif defined(_XM_SSE_INTRINSICS_) + // Perform the dot product on x and y + XMVECTOR vLengthSq = _mm_mul_ps(V1, V2); + // vTemp has y splatted + XMVECTOR vTemp = XM_PERMUTE_PS(vLengthSq, _MM_SHUFFLE(1, 1, 1, 1)); + // x+y + vLengthSq = _mm_add_ss(vLengthSq, vTemp); + vLengthSq = XM_PERMUTE_PS(vLengthSq, _MM_SHUFFLE(0, 0, 0, 0)); + return vLengthSq; +#endif +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMVector2Cross +( + FXMVECTOR V1, + FXMVECTOR V2 +) noexcept +{ + // [ V1.x*V2.y - V1.y*V2.x, V1.x*V2.y - V1.y*V2.x ] + +#if defined(_XM_NO_INTRINSICS_) + float fCross = (V1.vector4_f32[0] * V2.vector4_f32[1]) - (V1.vector4_f32[1] * V2.vector4_f32[0]); + XMVECTORF32 vResult; + vResult.f[0] = + vResult.f[1] = + vResult.f[2] = + vResult.f[3] = fCross; + return vResult.v; +#elif defined(_XM_ARM_NEON_INTRINSICS_) + static const XMVECTORF32 Negate = { { { 1.f, -1.f, 0, 0 } } }; + + float32x2_t vTemp = vmul_f32(vget_low_f32(V1), vrev64_f32(vget_low_f32(V2))); + vTemp = vmul_f32(vTemp, vget_low_f32(Negate)); + vTemp = vpadd_f32(vTemp, vTemp); + return vcombine_f32(vTemp, vTemp); +#elif defined(_XM_SSE_INTRINSICS_) + // Swap x and y + XMVECTOR vResult = XM_PERMUTE_PS(V2, _MM_SHUFFLE(0, 1, 0, 1)); + // Perform the muls + vResult = _mm_mul_ps(vResult, V1); + // Splat y + XMVECTOR vTemp = XM_PERMUTE_PS(vResult, _MM_SHUFFLE(1, 1, 1, 1)); + // Sub the values + vResult = _mm_sub_ss(vResult, vTemp); + // Splat the cross product + vResult = XM_PERMUTE_PS(vResult, _MM_SHUFFLE(0, 0, 0, 0)); + return vResult; +#endif +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMVector2LengthSq(FXMVECTOR V) noexcept +{ + return XMVector2Dot(V, V); +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMVector2ReciprocalLengthEst(FXMVECTOR V) noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + + XMVECTOR Result; + Result = XMVector2LengthSq(V); + Result = XMVectorReciprocalSqrtEst(Result); + return Result; + +#elif defined(_XM_ARM_NEON_INTRINSICS_) + float32x2_t VL = vget_low_f32(V); + // Dot2 + float32x2_t vTemp = vmul_f32(VL, VL); + vTemp = vpadd_f32(vTemp, vTemp); + // Reciprocal sqrt (estimate) + vTemp = vrsqrte_f32(vTemp); + return vcombine_f32(vTemp, vTemp); +#elif defined(_XM_SSE4_INTRINSICS_) + XMVECTOR vTemp = _mm_dp_ps(V, V, 0x3f); + return _mm_rsqrt_ps(vTemp); +#elif defined(_XM_SSE3_INTRINSICS_) + XMVECTOR vLengthSq = _mm_mul_ps(V, V); + XMVECTOR vTemp = _mm_hadd_ps(vLengthSq, vLengthSq); + vLengthSq = _mm_rsqrt_ss(vTemp); + vLengthSq = XM_PERMUTE_PS(vLengthSq, _MM_SHUFFLE(0, 0, 0, 0)); + return vLengthSq; +#elif defined(_XM_SSE_INTRINSICS_) + // Perform the dot product on x and y + XMVECTOR vLengthSq = _mm_mul_ps(V, V); + // vTemp has y splatted + XMVECTOR vTemp = XM_PERMUTE_PS(vLengthSq, _MM_SHUFFLE(1, 1, 1, 1)); + // x+y + vLengthSq = _mm_add_ss(vLengthSq, vTemp); + vLengthSq = _mm_rsqrt_ss(vLengthSq); + vLengthSq = XM_PERMUTE_PS(vLengthSq, _MM_SHUFFLE(0, 0, 0, 0)); + return vLengthSq; +#endif +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMVector2ReciprocalLength(FXMVECTOR V) noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + + XMVECTOR Result; + Result = XMVector2LengthSq(V); + Result = XMVectorReciprocalSqrt(Result); + return Result; + +#elif defined(_XM_ARM_NEON_INTRINSICS_) + float32x2_t VL = vget_low_f32(V); + // Dot2 + float32x2_t vTemp = vmul_f32(VL, VL); + vTemp = vpadd_f32(vTemp, vTemp); + // Reciprocal sqrt + float32x2_t S0 = vrsqrte_f32(vTemp); + float32x2_t P0 = vmul_f32(vTemp, S0); + float32x2_t R0 = vrsqrts_f32(P0, S0); + float32x2_t S1 = vmul_f32(S0, R0); + float32x2_t P1 = vmul_f32(vTemp, S1); + float32x2_t R1 = vrsqrts_f32(P1, S1); + float32x2_t Result = vmul_f32(S1, R1); + return vcombine_f32(Result, Result); +#elif defined(_XM_SSE4_INTRINSICS_) + XMVECTOR vTemp = _mm_dp_ps(V, V, 0x3f); + XMVECTOR vLengthSq = _mm_sqrt_ps(vTemp); + return _mm_div_ps(g_XMOne, vLengthSq); +#elif defined(_XM_SSE3_INTRINSICS_) + XMVECTOR vLengthSq = _mm_mul_ps(V, V); + XMVECTOR vTemp = _mm_hadd_ps(vLengthSq, vLengthSq); + vLengthSq = _mm_sqrt_ss(vTemp); + vLengthSq = _mm_div_ss(g_XMOne, vLengthSq); + vLengthSq = XM_PERMUTE_PS(vLengthSq, _MM_SHUFFLE(0, 0, 0, 0)); + return vLengthSq; +#elif defined(_XM_SSE_INTRINSICS_) + // Perform the dot product on x and y + XMVECTOR vLengthSq = _mm_mul_ps(V, V); + // vTemp has y splatted + XMVECTOR vTemp = XM_PERMUTE_PS(vLengthSq, _MM_SHUFFLE(1, 1, 1, 1)); + // x+y + vLengthSq = _mm_add_ss(vLengthSq, vTemp); + vLengthSq = _mm_sqrt_ss(vLengthSq); + vLengthSq = _mm_div_ss(g_XMOne, vLengthSq); + vLengthSq = XM_PERMUTE_PS(vLengthSq, _MM_SHUFFLE(0, 0, 0, 0)); + return vLengthSq; +#endif +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMVector2LengthEst(FXMVECTOR V) noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + + XMVECTOR Result; + Result = XMVector2LengthSq(V); + Result = XMVectorSqrtEst(Result); + return Result; + +#elif defined(_XM_ARM_NEON_INTRINSICS_) + float32x2_t VL = vget_low_f32(V); + // Dot2 + float32x2_t vTemp = vmul_f32(VL, VL); + vTemp = vpadd_f32(vTemp, vTemp); + const float32x2_t zero = vdup_n_f32(0); + uint32x2_t VEqualsZero = vceq_f32(vTemp, zero); + // Sqrt (estimate) + float32x2_t Result = vrsqrte_f32(vTemp); + Result = vmul_f32(vTemp, Result); + Result = vbsl_f32(VEqualsZero, zero, Result); + return vcombine_f32(Result, Result); +#elif defined(_XM_SSE4_INTRINSICS_) + XMVECTOR vTemp = _mm_dp_ps(V, V, 0x3f); + return _mm_sqrt_ps(vTemp); +#elif defined(_XM_SSE3_INTRINSICS_) + XMVECTOR vLengthSq = _mm_mul_ps(V, V); + XMVECTOR vTemp = _mm_hadd_ps(vLengthSq, vLengthSq); + vLengthSq = _mm_sqrt_ss(vTemp); + vLengthSq = XM_PERMUTE_PS(vLengthSq, _MM_SHUFFLE(0, 0, 0, 0)); + return vLengthSq; +#elif defined(_XM_SSE_INTRINSICS_) + // Perform the dot product on x and y + XMVECTOR vLengthSq = _mm_mul_ps(V, V); + // vTemp has y splatted + XMVECTOR vTemp = XM_PERMUTE_PS(vLengthSq, _MM_SHUFFLE(1, 1, 1, 1)); + // x+y + vLengthSq = _mm_add_ss(vLengthSq, vTemp); + vLengthSq = _mm_sqrt_ss(vLengthSq); + vLengthSq = XM_PERMUTE_PS(vLengthSq, _MM_SHUFFLE(0, 0, 0, 0)); + return vLengthSq; +#endif +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMVector2Length(FXMVECTOR V) noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + + XMVECTOR Result; + Result = XMVector2LengthSq(V); + Result = XMVectorSqrt(Result); + return Result; + +#elif defined(_XM_ARM_NEON_INTRINSICS_) + float32x2_t VL = vget_low_f32(V); + // Dot2 + float32x2_t vTemp = vmul_f32(VL, VL); + vTemp = vpadd_f32(vTemp, vTemp); + const float32x2_t zero = vdup_n_f32(0); + uint32x2_t VEqualsZero = vceq_f32(vTemp, zero); + // Sqrt + float32x2_t S0 = vrsqrte_f32(vTemp); + float32x2_t P0 = vmul_f32(vTemp, S0); + float32x2_t R0 = vrsqrts_f32(P0, S0); + float32x2_t S1 = vmul_f32(S0, R0); + float32x2_t P1 = vmul_f32(vTemp, S1); + float32x2_t R1 = vrsqrts_f32(P1, S1); + float32x2_t Result = vmul_f32(S1, R1); + Result = vmul_f32(vTemp, Result); + Result = vbsl_f32(VEqualsZero, zero, Result); + return vcombine_f32(Result, Result); +#elif defined(_XM_SSE4_INTRINSICS_) + XMVECTOR vTemp = _mm_dp_ps(V, V, 0x3f); + return _mm_sqrt_ps(vTemp); +#elif defined(_XM_SSE3_INTRINSICS_) + XMVECTOR vLengthSq = _mm_mul_ps(V, V); + XMVECTOR vTemp = _mm_hadd_ps(vLengthSq, vLengthSq); + vLengthSq = _mm_sqrt_ss(vTemp); + vLengthSq = XM_PERMUTE_PS(vLengthSq, _MM_SHUFFLE(0, 0, 0, 0)); + return vLengthSq; +#elif defined(_XM_SSE_INTRINSICS_) + // Perform the dot product on x and y + XMVECTOR vLengthSq = _mm_mul_ps(V, V); + // vTemp has y splatted + XMVECTOR vTemp = XM_PERMUTE_PS(vLengthSq, _MM_SHUFFLE(1, 1, 1, 1)); + // x+y + vLengthSq = _mm_add_ss(vLengthSq, vTemp); + vLengthSq = XM_PERMUTE_PS(vLengthSq, _MM_SHUFFLE(0, 0, 0, 0)); + vLengthSq = _mm_sqrt_ps(vLengthSq); + return vLengthSq; +#endif +} + +//------------------------------------------------------------------------------ +// XMVector2NormalizeEst uses a reciprocal estimate and +// returns QNaN on zero and infinite vectors. + +inline XMVECTOR XM_CALLCONV XMVector2NormalizeEst(FXMVECTOR V) noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + + XMVECTOR Result; + Result = XMVector2ReciprocalLength(V); + Result = XMVectorMultiply(V, Result); + return Result; + +#elif defined(_XM_ARM_NEON_INTRINSICS_) + float32x2_t VL = vget_low_f32(V); + // Dot2 + float32x2_t vTemp = vmul_f32(VL, VL); + vTemp = vpadd_f32(vTemp, vTemp); + // Reciprocal sqrt (estimate) + vTemp = vrsqrte_f32(vTemp); + // Normalize + float32x2_t Result = vmul_f32(VL, vTemp); + return vcombine_f32(Result, Result); +#elif defined(_XM_SSE4_INTRINSICS_) + XMVECTOR vTemp = _mm_dp_ps(V, V, 0x3f); + XMVECTOR vResult = _mm_rsqrt_ps(vTemp); + return _mm_mul_ps(vResult, V); +#elif defined(_XM_SSE3_INTRINSICS_) + XMVECTOR vLengthSq = _mm_mul_ps(V, V); + vLengthSq = _mm_hadd_ps(vLengthSq, vLengthSq); + vLengthSq = _mm_rsqrt_ss(vLengthSq); + vLengthSq = XM_PERMUTE_PS(vLengthSq, _MM_SHUFFLE(0, 0, 0, 0)); + vLengthSq = _mm_mul_ps(vLengthSq, V); + return vLengthSq; +#elif defined(_XM_SSE_INTRINSICS_) + // Perform the dot product on x and y + XMVECTOR vLengthSq = _mm_mul_ps(V, V); + // vTemp has y splatted + XMVECTOR vTemp = XM_PERMUTE_PS(vLengthSq, _MM_SHUFFLE(1, 1, 1, 1)); + // x+y + vLengthSq = _mm_add_ss(vLengthSq, vTemp); + vLengthSq = _mm_rsqrt_ss(vLengthSq); + vLengthSq = XM_PERMUTE_PS(vLengthSq, _MM_SHUFFLE(0, 0, 0, 0)); + vLengthSq = _mm_mul_ps(vLengthSq, V); + return vLengthSq; +#endif +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMVector2Normalize(FXMVECTOR V) noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + + XMVECTOR vResult = XMVector2Length(V); + float fLength = vResult.vector4_f32[0]; + + // Prevent divide by zero + if (fLength > 0) + { + fLength = 1.0f / fLength; + } + + vResult.vector4_f32[0] = V.vector4_f32[0] * fLength; + vResult.vector4_f32[1] = V.vector4_f32[1] * fLength; + vResult.vector4_f32[2] = V.vector4_f32[2] * fLength; + vResult.vector4_f32[3] = V.vector4_f32[3] * fLength; + return vResult; + +#elif defined(_XM_ARM_NEON_INTRINSICS_) + float32x2_t VL = vget_low_f32(V); + // Dot2 + float32x2_t vTemp = vmul_f32(VL, VL); + vTemp = vpadd_f32(vTemp, vTemp); + uint32x2_t VEqualsZero = vceq_f32(vTemp, vdup_n_f32(0)); + uint32x2_t VEqualsInf = vceq_f32(vTemp, vget_low_f32(g_XMInfinity)); + // Reciprocal sqrt (2 iterations of Newton-Raphson) + float32x2_t S0 = vrsqrte_f32(vTemp); + float32x2_t P0 = vmul_f32(vTemp, S0); + float32x2_t R0 = vrsqrts_f32(P0, S0); + float32x2_t S1 = vmul_f32(S0, R0); + float32x2_t P1 = vmul_f32(vTemp, S1); + float32x2_t R1 = vrsqrts_f32(P1, S1); + vTemp = vmul_f32(S1, R1); + // Normalize + float32x2_t Result = vmul_f32(VL, vTemp); + Result = vbsl_f32(VEqualsZero, vdup_n_f32(0), Result); + Result = vbsl_f32(VEqualsInf, vget_low_f32(g_XMQNaN), Result); + return vcombine_f32(Result, Result); +#elif defined(_XM_SSE4_INTRINSICS_) + XMVECTOR vLengthSq = _mm_dp_ps(V, V, 0x3f); + // Prepare for the division + XMVECTOR vResult = _mm_sqrt_ps(vLengthSq); + // Create zero with a single instruction + XMVECTOR vZeroMask = _mm_setzero_ps(); + // Test for a divide by zero (Must be FP to detect -0.0) + vZeroMask = _mm_cmpneq_ps(vZeroMask, vResult); + // Failsafe on zero (Or epsilon) length planes + // If the length is infinity, set the elements to zero + vLengthSq = _mm_cmpneq_ps(vLengthSq, g_XMInfinity); + // Reciprocal mul to perform the normalization + vResult = _mm_div_ps(V, vResult); + // Any that are infinity, set to zero + vResult = _mm_and_ps(vResult, vZeroMask); + // Select qnan or result based on infinite length + XMVECTOR vTemp1 = _mm_andnot_ps(vLengthSq, g_XMQNaN); + XMVECTOR vTemp2 = _mm_and_ps(vResult, vLengthSq); + vResult = _mm_or_ps(vTemp1, vTemp2); + return vResult; +#elif defined(_XM_SSE3_INTRINSICS_) + // Perform the dot product on x and y only + XMVECTOR vLengthSq = _mm_mul_ps(V, V); + vLengthSq = _mm_hadd_ps(vLengthSq, vLengthSq); + vLengthSq = _mm_moveldup_ps(vLengthSq); + // Prepare for the division + XMVECTOR vResult = _mm_sqrt_ps(vLengthSq); + // Create zero with a single instruction + XMVECTOR vZeroMask = _mm_setzero_ps(); + // Test for a divide by zero (Must be FP to detect -0.0) + vZeroMask = _mm_cmpneq_ps(vZeroMask, vResult); + // Failsafe on zero (Or epsilon) length planes + // If the length is infinity, set the elements to zero + vLengthSq = _mm_cmpneq_ps(vLengthSq, g_XMInfinity); + // Reciprocal mul to perform the normalization + vResult = _mm_div_ps(V, vResult); + // Any that are infinity, set to zero + vResult = _mm_and_ps(vResult, vZeroMask); + // Select qnan or result based on infinite length + XMVECTOR vTemp1 = _mm_andnot_ps(vLengthSq, g_XMQNaN); + XMVECTOR vTemp2 = _mm_and_ps(vResult, vLengthSq); + vResult = _mm_or_ps(vTemp1, vTemp2); + return vResult; +#elif defined(_XM_SSE_INTRINSICS_) + // Perform the dot product on x and y only + XMVECTOR vLengthSq = _mm_mul_ps(V, V); + XMVECTOR vTemp = XM_PERMUTE_PS(vLengthSq, _MM_SHUFFLE(1, 1, 1, 1)); + vLengthSq = _mm_add_ss(vLengthSq, vTemp); + vLengthSq = XM_PERMUTE_PS(vLengthSq, _MM_SHUFFLE(0, 0, 0, 0)); + // Prepare for the division + XMVECTOR vResult = _mm_sqrt_ps(vLengthSq); + // Create zero with a single instruction + XMVECTOR vZeroMask = _mm_setzero_ps(); + // Test for a divide by zero (Must be FP to detect -0.0) + vZeroMask = _mm_cmpneq_ps(vZeroMask, vResult); + // Failsafe on zero (Or epsilon) length planes + // If the length is infinity, set the elements to zero + vLengthSq = _mm_cmpneq_ps(vLengthSq, g_XMInfinity); + // Reciprocal mul to perform the normalization + vResult = _mm_div_ps(V, vResult); + // Any that are infinity, set to zero + vResult = _mm_and_ps(vResult, vZeroMask); + // Select qnan or result based on infinite length + XMVECTOR vTemp1 = _mm_andnot_ps(vLengthSq, g_XMQNaN); + XMVECTOR vTemp2 = _mm_and_ps(vResult, vLengthSq); + vResult = _mm_or_ps(vTemp1, vTemp2); + return vResult; +#endif +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMVector2ClampLength +( + FXMVECTOR V, + float LengthMin, + float LengthMax +) noexcept +{ + XMVECTOR ClampMax = XMVectorReplicate(LengthMax); + XMVECTOR ClampMin = XMVectorReplicate(LengthMin); + return XMVector2ClampLengthV(V, ClampMin, ClampMax); +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMVector2ClampLengthV +( + FXMVECTOR V, + FXMVECTOR LengthMin, + FXMVECTOR LengthMax +) noexcept +{ + assert((XMVectorGetY(LengthMin) == XMVectorGetX(LengthMin))); + assert((XMVectorGetY(LengthMax) == XMVectorGetX(LengthMax))); + assert(XMVector2GreaterOrEqual(LengthMin, g_XMZero)); + assert(XMVector2GreaterOrEqual(LengthMax, g_XMZero)); + assert(XMVector2GreaterOrEqual(LengthMax, LengthMin)); + + XMVECTOR LengthSq = XMVector2LengthSq(V); + + const XMVECTOR Zero = XMVectorZero(); + + XMVECTOR RcpLength = XMVectorReciprocalSqrt(LengthSq); + + XMVECTOR InfiniteLength = XMVectorEqualInt(LengthSq, g_XMInfinity.v); + XMVECTOR ZeroLength = XMVectorEqual(LengthSq, Zero); + + XMVECTOR Length = XMVectorMultiply(LengthSq, RcpLength); + + XMVECTOR Normal = XMVectorMultiply(V, RcpLength); + + XMVECTOR Select = XMVectorEqualInt(InfiniteLength, ZeroLength); + Length = XMVectorSelect(LengthSq, Length, Select); + Normal = XMVectorSelect(LengthSq, Normal, Select); + + XMVECTOR ControlMax = XMVectorGreater(Length, LengthMax); + XMVECTOR ControlMin = XMVectorLess(Length, LengthMin); + + XMVECTOR ClampLength = XMVectorSelect(Length, LengthMax, ControlMax); + ClampLength = XMVectorSelect(ClampLength, LengthMin, ControlMin); + + XMVECTOR Result = XMVectorMultiply(Normal, ClampLength); + + // Preserve the original vector (with no precision loss) if the length falls within the given range + XMVECTOR Control = XMVectorEqualInt(ControlMax, ControlMin); + Result = XMVectorSelect(Result, V, Control); + + return Result; +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMVector2Reflect +( + FXMVECTOR Incident, + FXMVECTOR Normal +) noexcept +{ + // Result = Incident - (2 * dot(Incident, Normal)) * Normal + + XMVECTOR Result; + Result = XMVector2Dot(Incident, Normal); + Result = XMVectorAdd(Result, Result); + Result = XMVectorNegativeMultiplySubtract(Result, Normal, Incident); + return Result; +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMVector2Refract +( + FXMVECTOR Incident, + FXMVECTOR Normal, + float RefractionIndex +) noexcept +{ + XMVECTOR Index = XMVectorReplicate(RefractionIndex); + return XMVector2RefractV(Incident, Normal, Index); +} + +//------------------------------------------------------------------------------ + +// Return the refraction of a 2D vector +inline XMVECTOR XM_CALLCONV XMVector2RefractV +( + FXMVECTOR Incident, + FXMVECTOR Normal, + FXMVECTOR RefractionIndex +) noexcept +{ + // Result = RefractionIndex * Incident - Normal * (RefractionIndex * dot(Incident, Normal) + + // sqrt(1 - RefractionIndex * RefractionIndex * (1 - dot(Incident, Normal) * dot(Incident, Normal)))) + +#if defined(_XM_NO_INTRINSICS_) + + float IDotN = (Incident.vector4_f32[0] * Normal.vector4_f32[0]) + (Incident.vector4_f32[1] * Normal.vector4_f32[1]); + // R = 1.0f - RefractionIndex * RefractionIndex * (1.0f - IDotN * IDotN) + float RY = 1.0f - (IDotN * IDotN); + float RX = 1.0f - (RY * RefractionIndex.vector4_f32[0] * RefractionIndex.vector4_f32[0]); + RY = 1.0f - (RY * RefractionIndex.vector4_f32[1] * RefractionIndex.vector4_f32[1]); + if (RX >= 0.0f) + { + RX = (RefractionIndex.vector4_f32[0] * Incident.vector4_f32[0]) - (Normal.vector4_f32[0] * ((RefractionIndex.vector4_f32[0] * IDotN) + sqrtf(RX))); + } + else + { + RX = 0.0f; + } + if (RY >= 0.0f) + { + RY = (RefractionIndex.vector4_f32[1] * Incident.vector4_f32[1]) - (Normal.vector4_f32[1] * ((RefractionIndex.vector4_f32[1] * IDotN) + sqrtf(RY))); + } + else + { + RY = 0.0f; + } + + XMVECTOR vResult; + vResult.vector4_f32[0] = RX; + vResult.vector4_f32[1] = RY; + vResult.vector4_f32[2] = 0.0f; + vResult.vector4_f32[3] = 0.0f; + return vResult; + +#elif defined(_XM_ARM_NEON_INTRINSICS_) + float32x2_t IL = vget_low_f32(Incident); + float32x2_t NL = vget_low_f32(Normal); + float32x2_t RIL = vget_low_f32(RefractionIndex); + // Get the 2D Dot product of Incident-Normal + float32x2_t vTemp = vmul_f32(IL, NL); + float32x2_t IDotN = vpadd_f32(vTemp, vTemp); + // vTemp = 1.0f - RefractionIndex * RefractionIndex * (1.0f - IDotN * IDotN) + vTemp = vmls_f32(vget_low_f32(g_XMOne), IDotN, IDotN); + vTemp = vmul_f32(vTemp, RIL); + vTemp = vmls_f32(vget_low_f32(g_XMOne), vTemp, RIL); + // If any terms are <=0, sqrt() will fail, punt to zero + uint32x2_t vMask = vcgt_f32(vTemp, vget_low_f32(g_XMZero)); + // Sqrt(vTemp) + float32x2_t S0 = vrsqrte_f32(vTemp); + float32x2_t P0 = vmul_f32(vTemp, S0); + float32x2_t R0 = vrsqrts_f32(P0, S0); + float32x2_t S1 = vmul_f32(S0, R0); + float32x2_t P1 = vmul_f32(vTemp, S1); + float32x2_t R1 = vrsqrts_f32(P1, S1); + float32x2_t S2 = vmul_f32(S1, R1); + vTemp = vmul_f32(vTemp, S2); + // R = RefractionIndex * IDotN + sqrt(R) + vTemp = vmla_f32(vTemp, RIL, IDotN); + // Result = RefractionIndex * Incident - Normal * R + float32x2_t vResult = vmul_f32(RIL, IL); + vResult = vmls_f32(vResult, vTemp, NL); + vResult = vand_u32(vResult, vMask); + return vcombine_f32(vResult, vResult); +#elif defined(_XM_SSE_INTRINSICS_) + // Result = RefractionIndex * Incident - Normal * (RefractionIndex * dot(Incident, Normal) + + // sqrt(1 - RefractionIndex * RefractionIndex * (1 - dot(Incident, Normal) * dot(Incident, Normal)))) + // Get the 2D Dot product of Incident-Normal + XMVECTOR IDotN = XMVector2Dot(Incident, Normal); + // vTemp = 1.0f - RefractionIndex * RefractionIndex * (1.0f - IDotN * IDotN) + XMVECTOR vTemp = XM_FNMADD_PS(IDotN, IDotN, g_XMOne); + vTemp = _mm_mul_ps(vTemp, RefractionIndex); + vTemp = XM_FNMADD_PS(vTemp, RefractionIndex, g_XMOne); + // If any terms are <=0, sqrt() will fail, punt to zero + XMVECTOR vMask = _mm_cmpgt_ps(vTemp, g_XMZero); + // R = RefractionIndex * IDotN + sqrt(R) + vTemp = _mm_sqrt_ps(vTemp); + vTemp = XM_FMADD_PS(RefractionIndex, IDotN, vTemp); + // Result = RefractionIndex * Incident - Normal * R + XMVECTOR vResult = _mm_mul_ps(RefractionIndex, Incident); + vResult = XM_FNMADD_PS(vTemp, Normal, vResult); + vResult = _mm_and_ps(vResult, vMask); + return vResult; +#endif +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMVector2Orthogonal(FXMVECTOR V) noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + + XMVECTORF32 Result = { { { + -V.vector4_f32[1], + V.vector4_f32[0], + 0.f, + 0.f + } } }; + return Result.v; + +#elif defined(_XM_ARM_NEON_INTRINSICS_) + static const XMVECTORF32 Negate = { { { -1.f, 1.f, 0, 0 } } }; + const float32x2_t zero = vdup_n_f32(0); + + float32x2_t VL = vget_low_f32(V); + float32x2_t Result = vmul_f32(vrev64_f32(VL), vget_low_f32(Negate)); + return vcombine_f32(Result, zero); +#elif defined(_XM_SSE_INTRINSICS_) + XMVECTOR vResult = XM_PERMUTE_PS(V, _MM_SHUFFLE(3, 2, 0, 1)); + vResult = _mm_mul_ps(vResult, g_XMNegateX); + return vResult; +#endif +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMVector2AngleBetweenNormalsEst +( + FXMVECTOR N1, + FXMVECTOR N2 +) noexcept +{ + XMVECTOR Result = XMVector2Dot(N1, N2); + Result = XMVectorClamp(Result, g_XMNegativeOne.v, g_XMOne.v); + Result = XMVectorACosEst(Result); + return Result; +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMVector2AngleBetweenNormals +( + FXMVECTOR N1, + FXMVECTOR N2 +) noexcept +{ + XMVECTOR Result = XMVector2Dot(N1, N2); + Result = XMVectorClamp(Result, g_XMNegativeOne, g_XMOne); + Result = XMVectorACos(Result); + return Result; +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMVector2AngleBetweenVectors +( + FXMVECTOR V1, + FXMVECTOR V2 +) noexcept +{ + XMVECTOR L1 = XMVector2ReciprocalLength(V1); + XMVECTOR L2 = XMVector2ReciprocalLength(V2); + + XMVECTOR Dot = XMVector2Dot(V1, V2); + + L1 = XMVectorMultiply(L1, L2); + + XMVECTOR CosAngle = XMVectorMultiply(Dot, L1); + CosAngle = XMVectorClamp(CosAngle, g_XMNegativeOne.v, g_XMOne.v); + + return XMVectorACos(CosAngle); +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMVector2LinePointDistance +( + FXMVECTOR LinePoint1, + FXMVECTOR LinePoint2, + FXMVECTOR Point +) noexcept +{ + // Given a vector PointVector from LinePoint1 to Point and a vector + // LineVector from LinePoint1 to LinePoint2, the scaled distance + // PointProjectionScale from LinePoint1 to the perpendicular projection + // of PointVector onto the line is defined as: + // + // PointProjectionScale = dot(PointVector, LineVector) / LengthSq(LineVector) + + XMVECTOR PointVector = XMVectorSubtract(Point, LinePoint1); + XMVECTOR LineVector = XMVectorSubtract(LinePoint2, LinePoint1); + + XMVECTOR LengthSq = XMVector2LengthSq(LineVector); + + XMVECTOR PointProjectionScale = XMVector2Dot(PointVector, LineVector); + PointProjectionScale = XMVectorDivide(PointProjectionScale, LengthSq); + + XMVECTOR DistanceVector = XMVectorMultiply(LineVector, PointProjectionScale); + DistanceVector = XMVectorSubtract(PointVector, DistanceVector); + + return XMVector2Length(DistanceVector); +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMVector2IntersectLine +( + FXMVECTOR Line1Point1, + FXMVECTOR Line1Point2, + FXMVECTOR Line2Point1, + GXMVECTOR Line2Point2 +) noexcept +{ +#if defined(_XM_NO_INTRINSICS_) || defined(_XM_ARM_NEON_INTRINSICS_) + + XMVECTOR V1 = XMVectorSubtract(Line1Point2, Line1Point1); + XMVECTOR V2 = XMVectorSubtract(Line2Point2, Line2Point1); + XMVECTOR V3 = XMVectorSubtract(Line1Point1, Line2Point1); + + XMVECTOR C1 = XMVector2Cross(V1, V2); + XMVECTOR C2 = XMVector2Cross(V2, V3); + + XMVECTOR Result; + const XMVECTOR Zero = XMVectorZero(); + if (XMVector2NearEqual(C1, Zero, g_XMEpsilon.v)) + { + if (XMVector2NearEqual(C2, Zero, g_XMEpsilon.v)) + { + // Coincident + Result = g_XMInfinity.v; + } + else + { + // Parallel + Result = g_XMQNaN.v; + } + } + else + { + // Intersection point = Line1Point1 + V1 * (C2 / C1) + XMVECTOR Scale = XMVectorReciprocal(C1); + Scale = XMVectorMultiply(C2, Scale); + Result = XMVectorMultiplyAdd(V1, Scale, Line1Point1); + } + + return Result; + +#elif defined(_XM_SSE_INTRINSICS_) + XMVECTOR V1 = _mm_sub_ps(Line1Point2, Line1Point1); + XMVECTOR V2 = _mm_sub_ps(Line2Point2, Line2Point1); + XMVECTOR V3 = _mm_sub_ps(Line1Point1, Line2Point1); + // Generate the cross products + XMVECTOR C1 = XMVector2Cross(V1, V2); + XMVECTOR C2 = XMVector2Cross(V2, V3); + // If C1 is not close to epsilon, use the calculated value + XMVECTOR vResultMask = _mm_setzero_ps(); + vResultMask = _mm_sub_ps(vResultMask, C1); + vResultMask = _mm_max_ps(vResultMask, C1); + // 0xFFFFFFFF if the calculated value is to be used + vResultMask = _mm_cmpgt_ps(vResultMask, g_XMEpsilon); + // If C1 is close to epsilon, which fail type is it? INFINITY or NAN? + XMVECTOR vFailMask = _mm_setzero_ps(); + vFailMask = _mm_sub_ps(vFailMask, C2); + vFailMask = _mm_max_ps(vFailMask, C2); + vFailMask = _mm_cmple_ps(vFailMask, g_XMEpsilon); + XMVECTOR vFail = _mm_and_ps(vFailMask, g_XMInfinity); + vFailMask = _mm_andnot_ps(vFailMask, g_XMQNaN); + // vFail is NAN or INF + vFail = _mm_or_ps(vFail, vFailMask); + // Intersection point = Line1Point1 + V1 * (C2 / C1) + XMVECTOR vResult = _mm_div_ps(C2, C1); + vResult = XM_FMADD_PS(vResult, V1, Line1Point1); + // Use result, or failure value + vResult = _mm_and_ps(vResult, vResultMask); + vResultMask = _mm_andnot_ps(vResultMask, vFail); + vResult = _mm_or_ps(vResult, vResultMask); + return vResult; +#endif +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMVector2Transform +( + FXMVECTOR V, + FXMMATRIX M +) noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + + XMVECTOR Y = XMVectorSplatY(V); + XMVECTOR X = XMVectorSplatX(V); + + XMVECTOR Result = XMVectorMultiplyAdd(Y, M.r[1], M.r[3]); + Result = XMVectorMultiplyAdd(X, M.r[0], Result); + + return Result; + +#elif defined(_XM_ARM_NEON_INTRINSICS_) + float32x2_t VL = vget_low_f32(V); + float32x4_t Result = vmlaq_lane_f32(M.r[3], M.r[1], VL, 1); // Y + return vmlaq_lane_f32(Result, M.r[0], VL, 0); // X +#elif defined(_XM_SSE_INTRINSICS_) + XMVECTOR vResult = XM_PERMUTE_PS(V, _MM_SHUFFLE(1, 1, 1, 1)); // Y + vResult = XM_FMADD_PS(vResult, M.r[1], M.r[3]); + XMVECTOR vTemp = XM_PERMUTE_PS(V, _MM_SHUFFLE(0, 0, 0, 0)); // X + vResult = XM_FMADD_PS(vTemp, M.r[0], vResult); + return vResult; +#endif +} + +//------------------------------------------------------------------------------ + +_Use_decl_annotations_ +inline XMFLOAT4* XM_CALLCONV XMVector2TransformStream +( + XMFLOAT4* pOutputStream, + size_t OutputStride, + const XMFLOAT2* pInputStream, + size_t InputStride, + size_t VectorCount, + FXMMATRIX M +) noexcept +{ + assert(pOutputStream != nullptr); + assert(pInputStream != nullptr); + + assert(InputStride >= sizeof(XMFLOAT2)); + _Analysis_assume_(InputStride >= sizeof(XMFLOAT2)); + + assert(OutputStride >= sizeof(XMFLOAT4)); + _Analysis_assume_(OutputStride >= sizeof(XMFLOAT4)); + +#if defined(_XM_NO_INTRINSICS_) + + auto pInputVector = reinterpret_cast(pInputStream); + auto pOutputVector = reinterpret_cast(pOutputStream); + + const XMVECTOR row0 = M.r[0]; + const XMVECTOR row1 = M.r[1]; + const XMVECTOR row3 = M.r[3]; + + for (size_t i = 0; i < VectorCount; i++) + { + XMVECTOR V = XMLoadFloat2(reinterpret_cast(pInputVector)); + XMVECTOR Y = XMVectorSplatY(V); + XMVECTOR X = XMVectorSplatX(V); + + XMVECTOR Result = XMVectorMultiplyAdd(Y, row1, row3); + Result = XMVectorMultiplyAdd(X, row0, Result); + +#ifdef _PREFAST_ +#pragma prefast(push) +#pragma prefast(disable : 26015, "PREfast noise: Esp:1307" ) +#endif + + XMStoreFloat4(reinterpret_cast(pOutputVector), Result); + +#ifdef _PREFAST_ +#pragma prefast(pop) +#endif + + pInputVector += InputStride; + pOutputVector += OutputStride; + } + + return pOutputStream; + +#elif defined(_XM_ARM_NEON_INTRINSICS_) + auto pInputVector = reinterpret_cast(pInputStream); + auto pOutputVector = reinterpret_cast(pOutputStream); + + const XMVECTOR row0 = M.r[0]; + const XMVECTOR row1 = M.r[1]; + const XMVECTOR row3 = M.r[3]; + + size_t i = 0; + size_t four = VectorCount >> 2; + if (four > 0) + { + if ((InputStride == sizeof(XMFLOAT2)) && (OutputStride == sizeof(XMFLOAT4))) + { + for (size_t j = 0; j < four; ++j) + { + float32x4x2_t V = vld2q_f32(reinterpret_cast(pInputVector)); + pInputVector += sizeof(XMFLOAT2) * 4; + + float32x2_t r3 = vget_low_f32(row3); + float32x2_t r = vget_low_f32(row0); + XMVECTOR vResult0 = vmlaq_lane_f32(vdupq_lane_f32(r3, 0), V.val[0], r, 0); // Ax+M + XMVECTOR vResult1 = vmlaq_lane_f32(vdupq_lane_f32(r3, 1), V.val[0], r, 1); // Bx+N + + XM_PREFETCH(pInputVector); + + r3 = vget_high_f32(row3); + r = vget_high_f32(row0); + XMVECTOR vResult2 = vmlaq_lane_f32(vdupq_lane_f32(r3, 0), V.val[0], r, 0); // Cx+O + XMVECTOR vResult3 = vmlaq_lane_f32(vdupq_lane_f32(r3, 1), V.val[0], r, 1); // Dx+P + + XM_PREFETCH(pInputVector + XM_CACHE_LINE_SIZE); + + r = vget_low_f32(row1); + vResult0 = vmlaq_lane_f32(vResult0, V.val[1], r, 0); // Ax+Ey+M + vResult1 = vmlaq_lane_f32(vResult1, V.val[1], r, 1); // Bx+Fy+N + + XM_PREFETCH(pInputVector + (XM_CACHE_LINE_SIZE * 2)); + + r = vget_high_f32(row1); + vResult2 = vmlaq_lane_f32(vResult2, V.val[1], r, 0); // Cx+Gy+O + vResult3 = vmlaq_lane_f32(vResult3, V.val[1], r, 1); // Dx+Hy+P + + XM_PREFETCH(pInputVector + (XM_CACHE_LINE_SIZE * 3)); + + float32x4x4_t R; + R.val[0] = vResult0; + R.val[1] = vResult1; + R.val[2] = vResult2; + R.val[3] = vResult3; + + vst4q_f32(reinterpret_cast(pOutputVector), R); + pOutputVector += sizeof(XMFLOAT4) * 4; + + i += 4; + } + } + } + + for (; i < VectorCount; i++) + { + float32x2_t V = vld1_f32(reinterpret_cast(pInputVector)); + pInputVector += InputStride; + + XMVECTOR vResult = vmlaq_lane_f32(row3, row0, V, 0); // X + vResult = vmlaq_lane_f32(vResult, row1, V, 1); // Y + + vst1q_f32(reinterpret_cast(pOutputVector), vResult); + pOutputVector += OutputStride; + } + + return pOutputStream; +#elif defined(_XM_AVX2_INTRINSICS_) + auto pInputVector = reinterpret_cast(pInputStream); + auto pOutputVector = reinterpret_cast(pOutputStream); + + size_t i = 0; + size_t four = VectorCount >> 2; + if (four > 0) + { + __m256 row0 = _mm256_broadcast_ps(&M.r[0]); + __m256 row1 = _mm256_broadcast_ps(&M.r[1]); + __m256 row3 = _mm256_broadcast_ps(&M.r[3]); + + if (InputStride == sizeof(XMFLOAT2)) + { + if (OutputStride == sizeof(XMFLOAT4)) + { + if (!(reinterpret_cast(pOutputStream) & 0x1F)) + { + // Packed input, aligned & packed output + for (size_t j = 0; j < four; ++j) + { + __m256 VV = _mm256_loadu_ps(reinterpret_cast(pInputVector)); + pInputVector += sizeof(XMFLOAT2) * 4; + + __m256 Y2 = _mm256_shuffle_ps(VV, VV, _MM_SHUFFLE(3, 3, 3, 3)); + __m256 X2 = _mm256_shuffle_ps(VV, VV, _MM_SHUFFLE(2, 2, 2, 2)); + __m256 Y1 = _mm256_shuffle_ps(VV, VV, _MM_SHUFFLE(1, 1, 1, 1)); + __m256 X1 = _mm256_shuffle_ps(VV, VV, _MM_SHUFFLE(0, 0, 0, 0)); + + __m256 vTempB = _mm256_fmadd_ps(Y1, row1, row3); + __m256 vTempB2 = _mm256_fmadd_ps(Y2, row1, row3); + __m256 vTempA = _mm256_mul_ps(X1, row0); + __m256 vTempA2 = _mm256_mul_ps(X2, row0); + vTempA = _mm256_add_ps(vTempA, vTempB); + vTempA2 = _mm256_add_ps(vTempA2, vTempB2); + + X1 = _mm256_insertf128_ps(vTempA, _mm256_castps256_ps128(vTempA2), 1); + XM256_STREAM_PS(reinterpret_cast(pOutputVector), X1); + pOutputVector += sizeof(XMFLOAT4) * 2; + + X2 = _mm256_insertf128_ps(vTempA2, _mm256_extractf128_ps(vTempA, 1), 0); + XM256_STREAM_PS(reinterpret_cast(pOutputVector), X2); + pOutputVector += sizeof(XMFLOAT4) * 2; + + i += 4; + } + } + else + { + // Packed input, packed output + for (size_t j = 0; j < four; ++j) + { + __m256 VV = _mm256_loadu_ps(reinterpret_cast(pInputVector)); + pInputVector += sizeof(XMFLOAT2) * 4; + + __m256 Y2 = _mm256_shuffle_ps(VV, VV, _MM_SHUFFLE(3, 3, 3, 3)); + __m256 X2 = _mm256_shuffle_ps(VV, VV, _MM_SHUFFLE(2, 2, 2, 2)); + __m256 Y1 = _mm256_shuffle_ps(VV, VV, _MM_SHUFFLE(1, 1, 1, 1)); + __m256 X1 = _mm256_shuffle_ps(VV, VV, _MM_SHUFFLE(0, 0, 0, 0)); + + __m256 vTempB = _mm256_fmadd_ps(Y1, row1, row3); + __m256 vTempB2 = _mm256_fmadd_ps(Y2, row1, row3); + __m256 vTempA = _mm256_mul_ps(X1, row0); + __m256 vTempA2 = _mm256_mul_ps(X2, row0); + vTempA = _mm256_add_ps(vTempA, vTempB); + vTempA2 = _mm256_add_ps(vTempA2, vTempB2); + + X1 = _mm256_insertf128_ps(vTempA, _mm256_castps256_ps128(vTempA2), 1); + _mm256_storeu_ps(reinterpret_cast(pOutputVector), X1); + pOutputVector += sizeof(XMFLOAT4) * 2; + + X2 = _mm256_insertf128_ps(vTempA2, _mm256_extractf128_ps(vTempA, 1), 0); + _mm256_storeu_ps(reinterpret_cast(pOutputVector), X2); + pOutputVector += sizeof(XMFLOAT4) * 2; + + i += 4; + } + } + } + else + { + // Packed input, unpacked output + for (size_t j = 0; j < four; ++j) + { + __m256 VV = _mm256_loadu_ps(reinterpret_cast(pInputVector)); + pInputVector += sizeof(XMFLOAT2) * 4; + + __m256 Y2 = _mm256_shuffle_ps(VV, VV, _MM_SHUFFLE(3, 3, 3, 3)); + __m256 X2 = _mm256_shuffle_ps(VV, VV, _MM_SHUFFLE(2, 2, 2, 2)); + __m256 Y1 = _mm256_shuffle_ps(VV, VV, _MM_SHUFFLE(1, 1, 1, 1)); + __m256 X1 = _mm256_shuffle_ps(VV, VV, _MM_SHUFFLE(0, 0, 0, 0)); + + __m256 vTempB = _mm256_fmadd_ps(Y1, row1, row3); + __m256 vTempB2 = _mm256_fmadd_ps(Y2, row1, row3); + __m256 vTempA = _mm256_mul_ps(X1, row0); + __m256 vTempA2 = _mm256_mul_ps(X2, row0); + vTempA = _mm256_add_ps(vTempA, vTempB); + vTempA2 = _mm256_add_ps(vTempA2, vTempB2); + + _mm_storeu_ps(reinterpret_cast(pOutputVector), _mm256_castps256_ps128(vTempA)); + pOutputVector += OutputStride; + + _mm_storeu_ps(reinterpret_cast(pOutputVector), _mm256_castps256_ps128(vTempA2)); + pOutputVector += OutputStride; + + _mm_storeu_ps(reinterpret_cast(pOutputVector), _mm256_extractf128_ps(vTempA, 1)); + pOutputVector += OutputStride; + + _mm_storeu_ps(reinterpret_cast(pOutputVector), _mm256_extractf128_ps(vTempA2, 1)); + pOutputVector += OutputStride; + + i += 4; + } + } + } + } + + if (i < VectorCount) + { + const XMVECTOR row0 = M.r[0]; + const XMVECTOR row1 = M.r[1]; + const XMVECTOR row3 = M.r[3]; + + for (; i < VectorCount; i++) + { + __m128 xy = _mm_castpd_ps(_mm_load_sd(reinterpret_cast(pInputVector))); + pInputVector += InputStride; + + XMVECTOR Y = XM_PERMUTE_PS(xy, _MM_SHUFFLE(1, 1, 1, 1)); + XMVECTOR X = XM_PERMUTE_PS(xy, _MM_SHUFFLE(0, 0, 0, 0)); + + XMVECTOR vTemp = XM_FMADD_PS(Y, row1, row3); + XMVECTOR vTemp2 = _mm_mul_ps(X, row0); + vTemp = _mm_add_ps(vTemp, vTemp2); + + _mm_storeu_ps(reinterpret_cast(pOutputVector), vTemp); + pOutputVector += OutputStride; + } + } + + XM_SFENCE(); + + return pOutputStream; +#elif defined(_XM_SSE_INTRINSICS_) + auto pInputVector = reinterpret_cast(pInputStream); + auto pOutputVector = reinterpret_cast(pOutputStream); + + const XMVECTOR row0 = M.r[0]; + const XMVECTOR row1 = M.r[1]; + const XMVECTOR row3 = M.r[3]; + + size_t i = 0; + size_t two = VectorCount >> 1; + if (two > 0) + { + if (InputStride == sizeof(XMFLOAT2)) + { + if (!(reinterpret_cast(pOutputStream) & 0xF) && !(OutputStride & 0xF)) + { + // Packed input, aligned output + for (size_t j = 0; j < two; ++j) + { + XMVECTOR V = _mm_loadu_ps(reinterpret_cast(pInputVector)); + pInputVector += sizeof(XMFLOAT2) * 2; + + XMVECTOR Y = XM_PERMUTE_PS(V, _MM_SHUFFLE(1, 1, 1, 1)); + XMVECTOR X = XM_PERMUTE_PS(V, _MM_SHUFFLE(0, 0, 0, 0)); + + XMVECTOR vTemp = XM_FMADD_PS(Y, row1, row3); + XMVECTOR vTemp2 = _mm_mul_ps(X, row0); + vTemp = _mm_add_ps(vTemp, vTemp2); + + XM_STREAM_PS(reinterpret_cast(pOutputVector), vTemp); + pOutputVector += OutputStride; + + Y = XM_PERMUTE_PS(V, _MM_SHUFFLE(3, 3, 3, 3)); + X = XM_PERMUTE_PS(V, _MM_SHUFFLE(2, 2, 2, 2)); + + vTemp = XM_FMADD_PS(Y, row1, row3); + vTemp2 = _mm_mul_ps(X, row0); + vTemp = _mm_add_ps(vTemp, vTemp2); + + XM_STREAM_PS(reinterpret_cast(pOutputVector), vTemp); + pOutputVector += OutputStride; + + i += 2; + } + } + else + { + // Packed input, unaligned output + for (size_t j = 0; j < two; ++j) + { + XMVECTOR V = _mm_loadu_ps(reinterpret_cast(pInputVector)); + pInputVector += sizeof(XMFLOAT2) * 2; + + XMVECTOR Y = XM_PERMUTE_PS(V, _MM_SHUFFLE(1, 1, 1, 1)); + XMVECTOR X = XM_PERMUTE_PS(V, _MM_SHUFFLE(0, 0, 0, 0)); + + XMVECTOR vTemp = XM_FMADD_PS(Y, row1, row3); + XMVECTOR vTemp2 = _mm_mul_ps(X, row0); + vTemp = _mm_add_ps(vTemp, vTemp2); + + _mm_storeu_ps(reinterpret_cast(pOutputVector), vTemp); + pOutputVector += OutputStride; + + Y = XM_PERMUTE_PS(V, _MM_SHUFFLE(3, 3, 3, 3)); + X = XM_PERMUTE_PS(V, _MM_SHUFFLE(2, 2, 2, 2)); + + vTemp = XM_FMADD_PS(Y, row1, row3); + vTemp2 = _mm_mul_ps(X, row0); + vTemp = _mm_add_ps(vTemp, vTemp2); + + _mm_storeu_ps(reinterpret_cast(pOutputVector), vTemp); + pOutputVector += OutputStride; + + i += 2; + } + } + } + } + + if (!(reinterpret_cast(pInputVector) & 0xF) && !(InputStride & 0xF)) + { + if (!(reinterpret_cast(pOutputStream) & 0xF) && !(OutputStride & 0xF)) + { + // Aligned input, aligned output + for (; i < VectorCount; i++) + { + XMVECTOR V = _mm_castsi128_ps(_mm_loadl_epi64(reinterpret_cast(pInputVector))); + pInputVector += InputStride; + + XMVECTOR Y = XM_PERMUTE_PS(V, _MM_SHUFFLE(1, 1, 1, 1)); + XMVECTOR X = XM_PERMUTE_PS(V, _MM_SHUFFLE(0, 0, 0, 0)); + + XMVECTOR vTemp = XM_FMADD_PS(Y, row1, row3); + XMVECTOR vTemp2 = _mm_mul_ps(X, row0); + vTemp = _mm_add_ps(vTemp, vTemp2); + + XM_STREAM_PS(reinterpret_cast(pOutputVector), vTemp); + pOutputVector += OutputStride; + } + } + else + { + // Aligned input, unaligned output + for (; i < VectorCount; i++) + { + XMVECTOR V = _mm_castsi128_ps(_mm_loadl_epi64(reinterpret_cast(pInputVector))); + pInputVector += InputStride; + + XMVECTOR Y = XM_PERMUTE_PS(V, _MM_SHUFFLE(1, 1, 1, 1)); + XMVECTOR X = XM_PERMUTE_PS(V, _MM_SHUFFLE(0, 0, 0, 0)); + + XMVECTOR vTemp = XM_FMADD_PS(Y, row1, row3); + XMVECTOR vTemp2 = _mm_mul_ps(X, row0); + vTemp = _mm_add_ps(vTemp, vTemp2); + + _mm_storeu_ps(reinterpret_cast(pOutputVector), vTemp); + pOutputVector += OutputStride; + } + } + } + else + { + // Unaligned input + for (; i < VectorCount; i++) + { + __m128 xy = _mm_castpd_ps(_mm_load_sd(reinterpret_cast(pInputVector))); + pInputVector += InputStride; + + XMVECTOR Y = XM_PERMUTE_PS(xy, _MM_SHUFFLE(1, 1, 1, 1)); + XMVECTOR X = XM_PERMUTE_PS(xy, _MM_SHUFFLE(0, 0, 0, 0)); + + XMVECTOR vTemp = XM_FMADD_PS(Y, row1, row3); + XMVECTOR vTemp2 = _mm_mul_ps(X, row0); + vTemp = _mm_add_ps(vTemp, vTemp2); + + _mm_storeu_ps(reinterpret_cast(pOutputVector), vTemp); + pOutputVector += OutputStride; + } + } + + XM_SFENCE(); + + return pOutputStream; +#endif +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMVector2TransformCoord +( + FXMVECTOR V, + FXMMATRIX M +) noexcept +{ + XMVECTOR Y = XMVectorSplatY(V); + XMVECTOR X = XMVectorSplatX(V); + + XMVECTOR Result = XMVectorMultiplyAdd(Y, M.r[1], M.r[3]); + Result = XMVectorMultiplyAdd(X, M.r[0], Result); + + XMVECTOR W = XMVectorSplatW(Result); + return XMVectorDivide(Result, W); +} + +//------------------------------------------------------------------------------ + +_Use_decl_annotations_ +inline XMFLOAT2* XM_CALLCONV XMVector2TransformCoordStream +( + XMFLOAT2* pOutputStream, + size_t OutputStride, + const XMFLOAT2* pInputStream, + size_t InputStride, + size_t VectorCount, + FXMMATRIX M +) noexcept +{ + assert(pOutputStream != nullptr); + assert(pInputStream != nullptr); + + assert(InputStride >= sizeof(XMFLOAT2)); + _Analysis_assume_(InputStride >= sizeof(XMFLOAT2)); + + assert(OutputStride >= sizeof(XMFLOAT2)); + _Analysis_assume_(OutputStride >= sizeof(XMFLOAT2)); + +#if defined(_XM_NO_INTRINSICS_) + + auto pInputVector = reinterpret_cast(pInputStream); + auto pOutputVector = reinterpret_cast(pOutputStream); + + const XMVECTOR row0 = M.r[0]; + const XMVECTOR row1 = M.r[1]; + const XMVECTOR row3 = M.r[3]; + + for (size_t i = 0; i < VectorCount; i++) + { + XMVECTOR V = XMLoadFloat2(reinterpret_cast(pInputVector)); + XMVECTOR Y = XMVectorSplatY(V); + XMVECTOR X = XMVectorSplatX(V); + + XMVECTOR Result = XMVectorMultiplyAdd(Y, row1, row3); + Result = XMVectorMultiplyAdd(X, row0, Result); + + XMVECTOR W = XMVectorSplatW(Result); + + Result = XMVectorDivide(Result, W); + +#ifdef _PREFAST_ +#pragma prefast(push) +#pragma prefast(disable : 26015, "PREfast noise: Esp:1307" ) +#endif + + XMStoreFloat2(reinterpret_cast(pOutputVector), Result); + +#ifdef _PREFAST_ +#pragma prefast(pop) +#endif + + pInputVector += InputStride; + pOutputVector += OutputStride; + } + + return pOutputStream; + +#elif defined(_XM_ARM_NEON_INTRINSICS_) + auto pInputVector = reinterpret_cast(pInputStream); + auto pOutputVector = reinterpret_cast(pOutputStream); + + const XMVECTOR row0 = M.r[0]; + const XMVECTOR row1 = M.r[1]; + const XMVECTOR row3 = M.r[3]; + + size_t i = 0; + size_t four = VectorCount >> 2; + if (four > 0) + { + if ((InputStride == sizeof(XMFLOAT2)) && (OutputStride == sizeof(XMFLOAT2))) + { + for (size_t j = 0; j < four; ++j) + { + float32x4x2_t V = vld2q_f32(reinterpret_cast(pInputVector)); + pInputVector += sizeof(XMFLOAT2) * 4; + + float32x2_t r3 = vget_low_f32(row3); + float32x2_t r = vget_low_f32(row0); + XMVECTOR vResult0 = vmlaq_lane_f32(vdupq_lane_f32(r3, 0), V.val[0], r, 0); // Ax+M + XMVECTOR vResult1 = vmlaq_lane_f32(vdupq_lane_f32(r3, 1), V.val[0], r, 1); // Bx+N + + XM_PREFETCH(pInputVector); + + r3 = vget_high_f32(row3); + r = vget_high_f32(row0); + XMVECTOR W = vmlaq_lane_f32(vdupq_lane_f32(r3, 1), V.val[0], r, 1); // Dx+P + + XM_PREFETCH(pInputVector + XM_CACHE_LINE_SIZE); + + r = vget_low_f32(row1); + vResult0 = vmlaq_lane_f32(vResult0, V.val[1], r, 0); // Ax+Ey+M + vResult1 = vmlaq_lane_f32(vResult1, V.val[1], r, 1); // Bx+Fy+N + + XM_PREFETCH(pInputVector + (XM_CACHE_LINE_SIZE * 2)); + + r = vget_high_f32(row1); + W = vmlaq_lane_f32(W, V.val[1], r, 1); // Dx+Hy+P + + XM_PREFETCH(pInputVector + (XM_CACHE_LINE_SIZE * 3)); + +#if defined(_M_ARM64) || defined(_M_HYBRID_X86_ARM64) || __aarch64__ + V.val[0] = vdivq_f32(vResult0, W); + V.val[1] = vdivq_f32(vResult1, W); +#else + // 2 iterations of Newton-Raphson refinement of reciprocal + float32x4_t Reciprocal = vrecpeq_f32(W); + float32x4_t S = vrecpsq_f32(Reciprocal, W); + Reciprocal = vmulq_f32(S, Reciprocal); + S = vrecpsq_f32(Reciprocal, W); + Reciprocal = vmulq_f32(S, Reciprocal); + + V.val[0] = vmulq_f32(vResult0, Reciprocal); + V.val[1] = vmulq_f32(vResult1, Reciprocal); +#endif + + vst2q_f32(reinterpret_cast(pOutputVector), V); + pOutputVector += sizeof(XMFLOAT2) * 4; + + i += 4; + } + } + } + + for (; i < VectorCount; i++) + { + float32x2_t V = vld1_f32(reinterpret_cast(pInputVector)); + pInputVector += InputStride; + + XMVECTOR vResult = vmlaq_lane_f32(row3, row0, V, 0); // X + vResult = vmlaq_lane_f32(vResult, row1, V, 1); // Y + + V = vget_high_f32(vResult); + float32x2_t W = vdup_lane_f32(V, 1); + +#if defined(_M_ARM64) || defined(_M_HYBRID_X86_ARM64) || __aarch64__ + V = vget_low_f32(vResult); + V = vdiv_f32(V, W); +#else + // 2 iterations of Newton-Raphson refinement of reciprocal for W + float32x2_t Reciprocal = vrecpe_f32(W); + float32x2_t S = vrecps_f32(Reciprocal, W); + Reciprocal = vmul_f32(S, Reciprocal); + S = vrecps_f32(Reciprocal, W); + Reciprocal = vmul_f32(S, Reciprocal); + + V = vget_low_f32(vResult); + V = vmul_f32(V, Reciprocal); +#endif + + vst1_f32(reinterpret_cast(pOutputVector), V); + pOutputVector += OutputStride; + } + + return pOutputStream; +#elif defined(_XM_AVX2_INTRINSICS_) + auto pInputVector = reinterpret_cast(pInputStream); + auto pOutputVector = reinterpret_cast(pOutputStream); + + size_t i = 0; + size_t four = VectorCount >> 2; + if (four > 0) + { + __m256 row0 = _mm256_broadcast_ps(&M.r[0]); + __m256 row1 = _mm256_broadcast_ps(&M.r[1]); + __m256 row3 = _mm256_broadcast_ps(&M.r[3]); + + if (InputStride == sizeof(XMFLOAT2)) + { + if (OutputStride == sizeof(XMFLOAT2)) + { + if (!(reinterpret_cast(pOutputStream) & 0x1F)) + { + // Packed input, aligned & packed output + for (size_t j = 0; j < four; ++j) + { + __m256 VV = _mm256_loadu_ps(reinterpret_cast(pInputVector)); + pInputVector += sizeof(XMFLOAT2) * 4; + + __m256 Y2 = _mm256_shuffle_ps(VV, VV, _MM_SHUFFLE(3, 3, 3, 3)); + __m256 X2 = _mm256_shuffle_ps(VV, VV, _MM_SHUFFLE(2, 2, 2, 2)); + __m256 Y1 = _mm256_shuffle_ps(VV, VV, _MM_SHUFFLE(1, 1, 1, 1)); + __m256 X1 = _mm256_shuffle_ps(VV, VV, _MM_SHUFFLE(0, 0, 0, 0)); + + __m256 vTempB = _mm256_fmadd_ps(Y1, row1, row3); + __m256 vTempB2 = _mm256_fmadd_ps(Y2, row1, row3); + __m256 vTempA = _mm256_mul_ps(X1, row0); + __m256 vTempA2 = _mm256_mul_ps(X2, row0); + vTempA = _mm256_add_ps(vTempA, vTempB); + vTempA2 = _mm256_add_ps(vTempA2, vTempB2); + + __m256 W = _mm256_shuffle_ps(vTempA, vTempA, _MM_SHUFFLE(3, 3, 3, 3)); + vTempA = _mm256_div_ps(vTempA, W); + + W = _mm256_shuffle_ps(vTempA2, vTempA2, _MM_SHUFFLE(3, 3, 3, 3)); + vTempA2 = _mm256_div_ps(vTempA2, W); + + X1 = _mm256_shuffle_ps(vTempA, vTempA2, 0x44); + XM256_STREAM_PS(reinterpret_cast(pOutputVector), X1); + pOutputVector += sizeof(XMFLOAT2) * 4; + + i += 4; + } + } + else + { + // Packed input, packed output + for (size_t j = 0; j < four; ++j) + { + __m256 VV = _mm256_loadu_ps(reinterpret_cast(pInputVector)); + pInputVector += sizeof(XMFLOAT2) * 4; + + __m256 Y2 = _mm256_shuffle_ps(VV, VV, _MM_SHUFFLE(3, 3, 3, 3)); + __m256 X2 = _mm256_shuffle_ps(VV, VV, _MM_SHUFFLE(2, 2, 2, 2)); + __m256 Y1 = _mm256_shuffle_ps(VV, VV, _MM_SHUFFLE(1, 1, 1, 1)); + __m256 X1 = _mm256_shuffle_ps(VV, VV, _MM_SHUFFLE(0, 0, 0, 0)); + + __m256 vTempB = _mm256_fmadd_ps(Y1, row1, row3); + __m256 vTempB2 = _mm256_fmadd_ps(Y2, row1, row3); + __m256 vTempA = _mm256_mul_ps(X1, row0); + __m256 vTempA2 = _mm256_mul_ps(X2, row0); + vTempA = _mm256_add_ps(vTempA, vTempB); + vTempA2 = _mm256_add_ps(vTempA2, vTempB2); + + __m256 W = _mm256_shuffle_ps(vTempA, vTempA, _MM_SHUFFLE(3, 3, 3, 3)); + vTempA = _mm256_div_ps(vTempA, W); + + W = _mm256_shuffle_ps(vTempA2, vTempA2, _MM_SHUFFLE(3, 3, 3, 3)); + vTempA2 = _mm256_div_ps(vTempA2, W); + + X1 = _mm256_shuffle_ps(vTempA, vTempA2, 0x44); + _mm256_storeu_ps(reinterpret_cast(pOutputVector), X1); + pOutputVector += sizeof(XMFLOAT2) * 4; + + i += 4; + } + } + } + else + { + // Packed input, unpacked output + for (size_t j = 0; j < four; ++j) + { + __m256 VV = _mm256_loadu_ps(reinterpret_cast(pInputVector)); + pInputVector += sizeof(XMFLOAT2) * 4; + + __m256 Y2 = _mm256_shuffle_ps(VV, VV, _MM_SHUFFLE(3, 3, 3, 3)); + __m256 X2 = _mm256_shuffle_ps(VV, VV, _MM_SHUFFLE(2, 2, 2, 2)); + __m256 Y1 = _mm256_shuffle_ps(VV, VV, _MM_SHUFFLE(1, 1, 1, 1)); + __m256 X1 = _mm256_shuffle_ps(VV, VV, _MM_SHUFFLE(0, 0, 0, 0)); + + __m256 vTempB = _mm256_fmadd_ps(Y1, row1, row3); + __m256 vTempB2 = _mm256_fmadd_ps(Y2, row1, row3); + __m256 vTempA = _mm256_mul_ps(X1, row0); + __m256 vTempA2 = _mm256_mul_ps(X2, row0); + vTempA = _mm256_add_ps(vTempA, vTempB); + vTempA2 = _mm256_add_ps(vTempA2, vTempB2); + + __m256 W = _mm256_shuffle_ps(vTempA, vTempA, _MM_SHUFFLE(3, 3, 3, 3)); + vTempA = _mm256_div_ps(vTempA, W); + + W = _mm256_shuffle_ps(vTempA2, vTempA2, _MM_SHUFFLE(3, 3, 3, 3)); + vTempA2 = _mm256_div_ps(vTempA2, W); + + _mm_store_sd(reinterpret_cast(pOutputVector), + _mm_castps_pd(_mm256_castps256_ps128(vTempA))); + pOutputVector += OutputStride; + + _mm_store_sd(reinterpret_cast(pOutputVector), + _mm_castps_pd(_mm256_castps256_ps128(vTempA2))); + pOutputVector += OutputStride; + + _mm_store_sd(reinterpret_cast(pOutputVector), + _mm_castps_pd(_mm256_extractf128_ps(vTempA, 1))); + pOutputVector += OutputStride; + + _mm_store_sd(reinterpret_cast(pOutputVector), + _mm_castps_pd(_mm256_extractf128_ps(vTempA2, 1))); + pOutputVector += OutputStride; + + i += 4; + } + } + } + } + + if (i < VectorCount) + { + const XMVECTOR row0 = M.r[0]; + const XMVECTOR row1 = M.r[1]; + const XMVECTOR row3 = M.r[3]; + + for (; i < VectorCount; i++) + { + __m128 xy = _mm_castpd_ps(_mm_load_sd(reinterpret_cast(pInputVector))); + pInputVector += InputStride; + + XMVECTOR Y = XM_PERMUTE_PS(xy, _MM_SHUFFLE(1, 1, 1, 1)); + XMVECTOR X = XM_PERMUTE_PS(xy, _MM_SHUFFLE(0, 0, 0, 0)); + + XMVECTOR vTemp = XM_FMADD_PS(Y, row1, row3); + XMVECTOR vTemp2 = _mm_mul_ps(X, row0); + vTemp = _mm_add_ps(vTemp, vTemp2); + + XMVECTOR W = XM_PERMUTE_PS(vTemp, _MM_SHUFFLE(3, 3, 3, 3)); + vTemp = _mm_div_ps(vTemp, W); + + _mm_store_sd(reinterpret_cast(pOutputVector), _mm_castps_pd(vTemp)); + pOutputVector += OutputStride; + } + } + + XM_SFENCE(); + + return pOutputStream; +#elif defined(_XM_SSE_INTRINSICS_) + auto pInputVector = reinterpret_cast(pInputStream); + auto pOutputVector = reinterpret_cast(pOutputStream); + + const XMVECTOR row0 = M.r[0]; + const XMVECTOR row1 = M.r[1]; + const XMVECTOR row3 = M.r[3]; + + size_t i = 0; + size_t two = VectorCount >> 1; + if (two > 0) + { + if (InputStride == sizeof(XMFLOAT2)) + { + if (OutputStride == sizeof(XMFLOAT2)) + { + if (!(reinterpret_cast(pOutputStream) & 0xF)) + { + // Packed input, aligned & packed output + for (size_t j = 0; j < two; ++j) + { + XMVECTOR V = _mm_loadu_ps(reinterpret_cast(pInputVector)); + pInputVector += sizeof(XMFLOAT2) * 2; + + // Result 1 + XMVECTOR Y = XM_PERMUTE_PS(V, _MM_SHUFFLE(1, 1, 1, 1)); + XMVECTOR X = XM_PERMUTE_PS(V, _MM_SHUFFLE(0, 0, 0, 0)); + + XMVECTOR vTemp = XM_FMADD_PS(Y, row1, row3); + XMVECTOR vTemp2 = _mm_mul_ps(X, row0); + vTemp = _mm_add_ps(vTemp, vTemp2); + + XMVECTOR W = XM_PERMUTE_PS(vTemp, _MM_SHUFFLE(3, 3, 3, 3)); + + XMVECTOR V1 = _mm_div_ps(vTemp, W); + + // Result 2 + Y = XM_PERMUTE_PS(V, _MM_SHUFFLE(3, 3, 3, 3)); + X = XM_PERMUTE_PS(V, _MM_SHUFFLE(2, 2, 2, 2)); + + vTemp = XM_FMADD_PS(Y, row1, row3); + vTemp2 = _mm_mul_ps(X, row0); + vTemp = _mm_add_ps(vTemp, vTemp2); + + W = XM_PERMUTE_PS(vTemp, _MM_SHUFFLE(3, 3, 3, 3)); + + XMVECTOR V2 = _mm_div_ps(vTemp, W); + + vTemp = _mm_movelh_ps(V1, V2); + + XM_STREAM_PS(reinterpret_cast(pOutputVector), vTemp); + pOutputVector += sizeof(XMFLOAT2) * 2; + + i += 2; + } + } + else + { + // Packed input, unaligned & packed output + for (size_t j = 0; j < two; ++j) + { + XMVECTOR V = _mm_loadu_ps(reinterpret_cast(pInputVector)); + pInputVector += sizeof(XMFLOAT2) * 2; + + // Result 1 + XMVECTOR Y = XM_PERMUTE_PS(V, _MM_SHUFFLE(1, 1, 1, 1)); + XMVECTOR X = XM_PERMUTE_PS(V, _MM_SHUFFLE(0, 0, 0, 0)); + + XMVECTOR vTemp = XM_FMADD_PS(Y, row1, row3); + XMVECTOR vTemp2 = _mm_mul_ps(X, row0); + vTemp = _mm_add_ps(vTemp, vTemp2); + + XMVECTOR W = XM_PERMUTE_PS(vTemp, _MM_SHUFFLE(3, 3, 3, 3)); + + XMVECTOR V1 = _mm_div_ps(vTemp, W); + + // Result 2 + Y = XM_PERMUTE_PS(V, _MM_SHUFFLE(3, 3, 3, 3)); + X = XM_PERMUTE_PS(V, _MM_SHUFFLE(2, 2, 2, 2)); + + vTemp = XM_FMADD_PS(Y, row1, row3); + vTemp2 = _mm_mul_ps(X, row0); + vTemp = _mm_add_ps(vTemp, vTemp2); + + W = XM_PERMUTE_PS(vTemp, _MM_SHUFFLE(3, 3, 3, 3)); + + XMVECTOR V2 = _mm_div_ps(vTemp, W); + + vTemp = _mm_movelh_ps(V1, V2); + + _mm_storeu_ps(reinterpret_cast(pOutputVector), vTemp); + pOutputVector += sizeof(XMFLOAT2) * 2; + + i += 2; + } + } + } + else + { + // Packed input, unpacked output + for (size_t j = 0; j < two; ++j) + { + XMVECTOR V = _mm_loadu_ps(reinterpret_cast(pInputVector)); + pInputVector += sizeof(XMFLOAT2) * 2; + + // Result 1 + XMVECTOR Y = XM_PERMUTE_PS(V, _MM_SHUFFLE(1, 1, 1, 1)); + XMVECTOR X = XM_PERMUTE_PS(V, _MM_SHUFFLE(0, 0, 0, 0)); + + XMVECTOR vTemp = XM_FMADD_PS(Y, row1, row3); + XMVECTOR vTemp2 = _mm_mul_ps(X, row0); + vTemp = _mm_add_ps(vTemp, vTemp2); + + XMVECTOR W = XM_PERMUTE_PS(vTemp, _MM_SHUFFLE(3, 3, 3, 3)); + + vTemp = _mm_div_ps(vTemp, W); + + _mm_store_sd(reinterpret_cast(pOutputVector), _mm_castps_pd(vTemp)); + pOutputVector += OutputStride; + + // Result 2 + Y = XM_PERMUTE_PS(V, _MM_SHUFFLE(3, 3, 3, 3)); + X = XM_PERMUTE_PS(V, _MM_SHUFFLE(2, 2, 2, 2)); + + vTemp = XM_FMADD_PS(Y, row1, row3); + vTemp2 = _mm_mul_ps(X, row0); + vTemp = _mm_add_ps(vTemp, vTemp2); + + W = XM_PERMUTE_PS(vTemp, _MM_SHUFFLE(3, 3, 3, 3)); + + vTemp = _mm_div_ps(vTemp, W); + + _mm_store_sd(reinterpret_cast(pOutputVector), _mm_castps_pd(vTemp)); + pOutputVector += OutputStride; + + i += 2; + } + } + } + } + + if (!(reinterpret_cast(pInputVector) & 0xF) && !(InputStride & 0xF)) + { + // Aligned input + for (; i < VectorCount; i++) + { + XMVECTOR V = _mm_castsi128_ps(_mm_loadl_epi64(reinterpret_cast(pInputVector))); + pInputVector += InputStride; + + XMVECTOR Y = XM_PERMUTE_PS(V, _MM_SHUFFLE(1, 1, 1, 1)); + XMVECTOR X = XM_PERMUTE_PS(V, _MM_SHUFFLE(0, 0, 0, 0)); + + XMVECTOR vTemp = XM_FMADD_PS(Y, row1, row3); + XMVECTOR vTemp2 = _mm_mul_ps(X, row0); + vTemp = _mm_add_ps(vTemp, vTemp2); + + XMVECTOR W = XM_PERMUTE_PS(vTemp, _MM_SHUFFLE(3, 3, 3, 3)); + + vTemp = _mm_div_ps(vTemp, W); + + _mm_store_sd(reinterpret_cast(pOutputVector), _mm_castps_pd(vTemp)); + pOutputVector += OutputStride; + } + } + else + { + // Unaligned input + for (; i < VectorCount; i++) + { + __m128 xy = _mm_castpd_ps(_mm_load_sd(reinterpret_cast(pInputVector))); + pInputVector += InputStride; + + XMVECTOR Y = XM_PERMUTE_PS(xy, _MM_SHUFFLE(1, 1, 1, 1)); + XMVECTOR X = XM_PERMUTE_PS(xy, _MM_SHUFFLE(0, 0, 0, 0)); + + XMVECTOR vTemp = XM_FMADD_PS(Y, row1, row3); + XMVECTOR vTemp2 = _mm_mul_ps(X, row0); + vTemp = _mm_add_ps(vTemp, vTemp2); + + XMVECTOR W = XM_PERMUTE_PS(vTemp, _MM_SHUFFLE(3, 3, 3, 3)); + + vTemp = _mm_div_ps(vTemp, W); + + _mm_store_sd(reinterpret_cast(pOutputVector), _mm_castps_pd(vTemp)); + pOutputVector += OutputStride; + } + } + + XM_SFENCE(); + + return pOutputStream; +#endif +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMVector2TransformNormal +( + FXMVECTOR V, + FXMMATRIX M +) noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + + XMVECTOR Y = XMVectorSplatY(V); + XMVECTOR X = XMVectorSplatX(V); + + XMVECTOR Result = XMVectorMultiply(Y, M.r[1]); + Result = XMVectorMultiplyAdd(X, M.r[0], Result); + + return Result; + +#elif defined(_XM_ARM_NEON_INTRINSICS_) + float32x2_t VL = vget_low_f32(V); + float32x4_t Result = vmulq_lane_f32(M.r[1], VL, 1); // Y + return vmlaq_lane_f32(Result, M.r[0], VL, 0); // X +#elif defined(_XM_SSE_INTRINSICS_) + XMVECTOR vResult = XM_PERMUTE_PS(V, _MM_SHUFFLE(1, 1, 1, 1)); // Y + vResult = _mm_mul_ps(vResult, M.r[1]); + XMVECTOR vTemp = XM_PERMUTE_PS(V, _MM_SHUFFLE(0, 0, 0, 0)); // X + vResult = XM_FMADD_PS(vTemp, M.r[0], vResult); + return vResult; +#endif +} + +//------------------------------------------------------------------------------ + +_Use_decl_annotations_ +inline XMFLOAT2* XM_CALLCONV XMVector2TransformNormalStream +( + XMFLOAT2* pOutputStream, + size_t OutputStride, + const XMFLOAT2* pInputStream, + size_t InputStride, + size_t VectorCount, + FXMMATRIX M +) noexcept +{ + assert(pOutputStream != nullptr); + assert(pInputStream != nullptr); + + assert(InputStride >= sizeof(XMFLOAT2)); + _Analysis_assume_(InputStride >= sizeof(XMFLOAT2)); + + assert(OutputStride >= sizeof(XMFLOAT2)); + _Analysis_assume_(OutputStride >= sizeof(XMFLOAT2)); + +#if defined(_XM_NO_INTRINSICS_) + + auto pInputVector = reinterpret_cast(pInputStream); + auto pOutputVector = reinterpret_cast(pOutputStream); + + const XMVECTOR row0 = M.r[0]; + const XMVECTOR row1 = M.r[1]; + + for (size_t i = 0; i < VectorCount; i++) + { + XMVECTOR V = XMLoadFloat2(reinterpret_cast(pInputVector)); + XMVECTOR Y = XMVectorSplatY(V); + XMVECTOR X = XMVectorSplatX(V); + + XMVECTOR Result = XMVectorMultiply(Y, row1); + Result = XMVectorMultiplyAdd(X, row0, Result); + +#ifdef _PREFAST_ +#pragma prefast(push) +#pragma prefast(disable : 26015, "PREfast noise: Esp:1307" ) +#endif + + XMStoreFloat2(reinterpret_cast(pOutputVector), Result); + +#ifdef _PREFAST_ +#pragma prefast(pop) +#endif + + pInputVector += InputStride; + pOutputVector += OutputStride; + } + + return pOutputStream; + +#elif defined(_XM_ARM_NEON_INTRINSICS_) + auto pInputVector = reinterpret_cast(pInputStream); + auto pOutputVector = reinterpret_cast(pOutputStream); + + const XMVECTOR row0 = M.r[0]; + const XMVECTOR row1 = M.r[1]; + + size_t i = 0; + size_t four = VectorCount >> 2; + if (four > 0) + { + if ((InputStride == sizeof(XMFLOAT2)) && (OutputStride == sizeof(XMFLOAT2))) + { + for (size_t j = 0; j < four; ++j) + { + float32x4x2_t V = vld2q_f32(reinterpret_cast(pInputVector)); + pInputVector += sizeof(XMFLOAT2) * 4; + + float32x2_t r = vget_low_f32(row0); + XMVECTOR vResult0 = vmulq_lane_f32(V.val[0], r, 0); // Ax + XMVECTOR vResult1 = vmulq_lane_f32(V.val[0], r, 1); // Bx + + XM_PREFETCH(pInputVector); + XM_PREFETCH(pInputVector + XM_CACHE_LINE_SIZE); + + r = vget_low_f32(row1); + vResult0 = vmlaq_lane_f32(vResult0, V.val[1], r, 0); // Ax+Ey + vResult1 = vmlaq_lane_f32(vResult1, V.val[1], r, 1); // Bx+Fy + + XM_PREFETCH(pInputVector + (XM_CACHE_LINE_SIZE * 2)); + XM_PREFETCH(pInputVector + (XM_CACHE_LINE_SIZE * 3)); + + V.val[0] = vResult0; + V.val[1] = vResult1; + + vst2q_f32(reinterpret_cast(pOutputVector), V); + pOutputVector += sizeof(XMFLOAT2) * 4; + + i += 4; + } + } + } + + for (; i < VectorCount; i++) + { + float32x2_t V = vld1_f32(reinterpret_cast(pInputVector)); + pInputVector += InputStride; + + XMVECTOR vResult = vmulq_lane_f32(row0, V, 0); // X + vResult = vmlaq_lane_f32(vResult, row1, V, 1); // Y + + V = vget_low_f32(vResult); + vst1_f32(reinterpret_cast(pOutputVector), V); + pOutputVector += OutputStride; + } + + return pOutputStream; +#elif defined(_XM_AVX2_INTRINSICS_) + auto pInputVector = reinterpret_cast(pInputStream); + auto pOutputVector = reinterpret_cast(pOutputStream); + + size_t i = 0; + size_t four = VectorCount >> 2; + if (four > 0) + { + __m256 row0 = _mm256_broadcast_ps(&M.r[0]); + __m256 row1 = _mm256_broadcast_ps(&M.r[1]); + + if (InputStride == sizeof(XMFLOAT2)) + { + if (OutputStride == sizeof(XMFLOAT2)) + { + if (!(reinterpret_cast(pOutputStream) & 0x1F)) + { + // Packed input, aligned & packed output + for (size_t j = 0; j < four; ++j) + { + __m256 VV = _mm256_loadu_ps(reinterpret_cast(pInputVector)); + pInputVector += sizeof(XMFLOAT2) * 4; + + __m256 Y2 = _mm256_shuffle_ps(VV, VV, _MM_SHUFFLE(3, 3, 3, 3)); + __m256 X2 = _mm256_shuffle_ps(VV, VV, _MM_SHUFFLE(2, 2, 2, 2)); + __m256 Y1 = _mm256_shuffle_ps(VV, VV, _MM_SHUFFLE(1, 1, 1, 1)); + __m256 X1 = _mm256_shuffle_ps(VV, VV, _MM_SHUFFLE(0, 0, 0, 0)); + + __m256 vTempA = _mm256_mul_ps(Y1, row1); + __m256 vTempB = _mm256_mul_ps(Y2, row1); + vTempA = _mm256_fmadd_ps(X1, row0, vTempA); + vTempB = _mm256_fmadd_ps(X2, row0, vTempB); + + X1 = _mm256_shuffle_ps(vTempA, vTempB, 0x44); + XM256_STREAM_PS(reinterpret_cast(pOutputVector), X1); + pOutputVector += sizeof(XMFLOAT2) * 4; + + i += 4; + } + } + else + { + // Packed input, packed output + for (size_t j = 0; j < four; ++j) + { + __m256 VV = _mm256_loadu_ps(reinterpret_cast(pInputVector)); + pInputVector += sizeof(XMFLOAT2) * 4; + + __m256 Y2 = _mm256_shuffle_ps(VV, VV, _MM_SHUFFLE(3, 3, 3, 3)); + __m256 X2 = _mm256_shuffle_ps(VV, VV, _MM_SHUFFLE(2, 2, 2, 2)); + __m256 Y1 = _mm256_shuffle_ps(VV, VV, _MM_SHUFFLE(1, 1, 1, 1)); + __m256 X1 = _mm256_shuffle_ps(VV, VV, _MM_SHUFFLE(0, 0, 0, 0)); + + __m256 vTempA = _mm256_mul_ps(Y1, row1); + __m256 vTempB = _mm256_mul_ps(Y2, row1); + vTempA = _mm256_fmadd_ps(X1, row0, vTempA); + vTempB = _mm256_fmadd_ps(X2, row0, vTempB); + + X1 = _mm256_shuffle_ps(vTempA, vTempB, 0x44); + _mm256_storeu_ps(reinterpret_cast(pOutputVector), X1); + pOutputVector += sizeof(XMFLOAT2) * 4; + + i += 4; + } + } + } + else + { + // Packed input, unpacked output + for (size_t j = 0; j < four; ++j) + { + __m256 VV = _mm256_loadu_ps(reinterpret_cast(pInputVector)); + pInputVector += sizeof(XMFLOAT2) * 4; + + __m256 Y2 = _mm256_shuffle_ps(VV, VV, _MM_SHUFFLE(3, 3, 3, 3)); + __m256 X2 = _mm256_shuffle_ps(VV, VV, _MM_SHUFFLE(2, 2, 2, 2)); + __m256 Y1 = _mm256_shuffle_ps(VV, VV, _MM_SHUFFLE(1, 1, 1, 1)); + __m256 X1 = _mm256_shuffle_ps(VV, VV, _MM_SHUFFLE(0, 0, 0, 0)); + + __m256 vTempA = _mm256_mul_ps(Y1, row1); + __m256 vTempB = _mm256_mul_ps(Y2, row1); + vTempA = _mm256_fmadd_ps(X1, row0, vTempA); + vTempB = _mm256_fmadd_ps(X2, row0, vTempB); + + _mm_store_sd(reinterpret_cast(pOutputVector), + _mm_castps_pd(_mm256_castps256_ps128(vTempA))); + pOutputVector += OutputStride; + + _mm_store_sd(reinterpret_cast(pOutputVector), + _mm_castps_pd(_mm256_castps256_ps128(vTempB))); + pOutputVector += OutputStride; + + _mm_store_sd(reinterpret_cast(pOutputVector), + _mm_castps_pd(_mm256_extractf128_ps(vTempA, 1))); + pOutputVector += OutputStride; + + _mm_store_sd(reinterpret_cast(pOutputVector), + _mm_castps_pd(_mm256_extractf128_ps(vTempB, 1))); + pOutputVector += OutputStride; + + i += 4; + } + } + } + } + + if (i < VectorCount) + { + const XMVECTOR row0 = M.r[0]; + const XMVECTOR row1 = M.r[1]; + + for (; i < VectorCount; i++) + { + __m128 xy = _mm_castpd_ps(_mm_load_sd(reinterpret_cast(pInputVector))); + pInputVector += InputStride; + + XMVECTOR Y = XM_PERMUTE_PS(xy, _MM_SHUFFLE(1, 1, 1, 1)); + XMVECTOR X = XM_PERMUTE_PS(xy, _MM_SHUFFLE(0, 0, 0, 0)); + + XMVECTOR vTemp = _mm_mul_ps(Y, row1); + vTemp = XM_FMADD_PS(X, row0, vTemp); + + _mm_store_sd(reinterpret_cast(pOutputVector), _mm_castps_pd(vTemp)); + pOutputVector += OutputStride; + } + } + + XM_SFENCE(); + + return pOutputStream; +#elif defined(_XM_SSE_INTRINSICS_) + auto pInputVector = reinterpret_cast(pInputStream); + auto pOutputVector = reinterpret_cast(pOutputStream); + + const XMVECTOR row0 = M.r[0]; + const XMVECTOR row1 = M.r[1]; + + size_t i = 0; + size_t two = VectorCount >> 1; + if (two > 0) + { + if (InputStride == sizeof(XMFLOAT2)) + { + if (OutputStride == sizeof(XMFLOAT2)) + { + if (!(reinterpret_cast(pOutputStream) & 0xF)) + { + // Packed input, aligned & packed output + for (size_t j = 0; j < two; ++j) + { + XMVECTOR V = _mm_loadu_ps(reinterpret_cast(pInputVector)); + pInputVector += sizeof(XMFLOAT2) * 2; + + // Result 1 + XMVECTOR Y = XM_PERMUTE_PS(V, _MM_SHUFFLE(1, 1, 1, 1)); + XMVECTOR X = XM_PERMUTE_PS(V, _MM_SHUFFLE(0, 0, 0, 0)); + + XMVECTOR vTemp = _mm_mul_ps(Y, row1); + XMVECTOR V1 = XM_FMADD_PS(X, row0, vTemp); + + // Result 2 + Y = XM_PERMUTE_PS(V, _MM_SHUFFLE(3, 3, 3, 3)); + X = XM_PERMUTE_PS(V, _MM_SHUFFLE(2, 2, 2, 2)); + + vTemp = _mm_mul_ps(Y, row1); + XMVECTOR V2 = XM_FMADD_PS(X, row0, vTemp); + + vTemp = _mm_movelh_ps(V1, V2); + + XM_STREAM_PS(reinterpret_cast(pOutputVector), vTemp); + pOutputVector += sizeof(XMFLOAT2) * 2; + + i += 2; + } + } + else + { + // Packed input, unaligned & packed output + for (size_t j = 0; j < two; ++j) + { + XMVECTOR V = _mm_loadu_ps(reinterpret_cast(pInputVector)); + pInputVector += sizeof(XMFLOAT2) * 2; + + // Result 1 + XMVECTOR Y = XM_PERMUTE_PS(V, _MM_SHUFFLE(1, 1, 1, 1)); + XMVECTOR X = XM_PERMUTE_PS(V, _MM_SHUFFLE(0, 0, 0, 0)); + + XMVECTOR vTemp = _mm_mul_ps(Y, row1); + XMVECTOR V1 = XM_FMADD_PS(X, row0, vTemp); + + // Result 2 + Y = XM_PERMUTE_PS(V, _MM_SHUFFLE(3, 3, 3, 3)); + X = XM_PERMUTE_PS(V, _MM_SHUFFLE(2, 2, 2, 2)); + + vTemp = _mm_mul_ps(Y, row1); + XMVECTOR V2 = XM_FMADD_PS(X, row0, vTemp); + + vTemp = _mm_movelh_ps(V1, V2); + + _mm_storeu_ps(reinterpret_cast(pOutputVector), vTemp); + pOutputVector += sizeof(XMFLOAT2) * 2; + + i += 2; + } + } + } + else + { + // Packed input, unpacked output + for (size_t j = 0; j < two; ++j) + { + XMVECTOR V = _mm_loadu_ps(reinterpret_cast(pInputVector)); + pInputVector += sizeof(XMFLOAT2) * 2; + + // Result 1 + XMVECTOR Y = XM_PERMUTE_PS(V, _MM_SHUFFLE(1, 1, 1, 1)); + XMVECTOR X = XM_PERMUTE_PS(V, _MM_SHUFFLE(0, 0, 0, 0)); + + XMVECTOR vTemp = _mm_mul_ps(Y, row1); + vTemp = XM_FMADD_PS(X, row0, vTemp); + + _mm_store_sd(reinterpret_cast(pOutputVector), _mm_castps_pd(vTemp)); + pOutputVector += OutputStride; + + // Result 2 + Y = XM_PERMUTE_PS(V, _MM_SHUFFLE(3, 3, 3, 3)); + X = XM_PERMUTE_PS(V, _MM_SHUFFLE(2, 2, 2, 2)); + + vTemp = _mm_mul_ps(Y, row1); + vTemp = XM_FMADD_PS(X, row0, vTemp); + + _mm_store_sd(reinterpret_cast(pOutputVector), _mm_castps_pd(vTemp)); + pOutputVector += OutputStride; + + i += 2; + } + } + } + } + + if (!(reinterpret_cast(pInputVector) & 0xF) && !(InputStride & 0xF)) + { + // Aligned input + for (; i < VectorCount; i++) + { + XMVECTOR V = _mm_castsi128_ps(_mm_loadl_epi64(reinterpret_cast(pInputVector))); + pInputVector += InputStride; + + XMVECTOR Y = XM_PERMUTE_PS(V, _MM_SHUFFLE(1, 1, 1, 1)); + XMVECTOR X = XM_PERMUTE_PS(V, _MM_SHUFFLE(0, 0, 0, 0)); + + XMVECTOR vTemp = _mm_mul_ps(Y, row1); + vTemp = XM_FMADD_PS(X, row0, vTemp); + + _mm_store_sd(reinterpret_cast(pOutputVector), _mm_castps_pd(vTemp)); + pOutputVector += OutputStride; + } + } + else + { + // Unaligned input + for (; i < VectorCount; i++) + { + __m128 xy = _mm_castpd_ps(_mm_load_sd(reinterpret_cast(pInputVector))); + pInputVector += InputStride; + + XMVECTOR Y = XM_PERMUTE_PS(xy, _MM_SHUFFLE(1, 1, 1, 1)); + XMVECTOR X = XM_PERMUTE_PS(xy, _MM_SHUFFLE(0, 0, 0, 0)); + + XMVECTOR vTemp = _mm_mul_ps(Y, row1); + vTemp = XM_FMADD_PS(X, row0, vTemp); + + _mm_store_sd(reinterpret_cast(pOutputVector), _mm_castps_pd(vTemp)); + pOutputVector += OutputStride; + } + } + + XM_SFENCE(); + + return pOutputStream; +#endif +} + +/**************************************************************************** + * + * 3D Vector + * + ****************************************************************************/ + + //------------------------------------------------------------------------------ + // Comparison operations + //------------------------------------------------------------------------------ + + //------------------------------------------------------------------------------ + +inline bool XM_CALLCONV XMVector3Equal +( + FXMVECTOR V1, + FXMVECTOR V2 +) noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + return (((V1.vector4_f32[0] == V2.vector4_f32[0]) && (V1.vector4_f32[1] == V2.vector4_f32[1]) && (V1.vector4_f32[2] == V2.vector4_f32[2])) != 0); +#elif defined(_XM_ARM_NEON_INTRINSICS_) + uint32x4_t vResult = vceqq_f32(V1, V2); + uint8x8x2_t vTemp = vzip_u8(vget_low_u8(vResult), vget_high_u8(vResult)); + uint16x4x2_t vTemp2 = vzip_u16(vTemp.val[0], vTemp.val[1]); + return ((vget_lane_u32(vTemp2.val[1], 1) & 0xFFFFFFU) == 0xFFFFFFU); +#elif defined(_XM_SSE_INTRINSICS_) + XMVECTOR vTemp = _mm_cmpeq_ps(V1, V2); + return (((_mm_movemask_ps(vTemp) & 7) == 7) != 0); +#endif +} + +//------------------------------------------------------------------------------ + +inline uint32_t XM_CALLCONV XMVector3EqualR +( + FXMVECTOR V1, + FXMVECTOR V2 +) noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + uint32_t CR = 0; + if ((V1.vector4_f32[0] == V2.vector4_f32[0]) && + (V1.vector4_f32[1] == V2.vector4_f32[1]) && + (V1.vector4_f32[2] == V2.vector4_f32[2])) + { + CR = XM_CRMASK_CR6TRUE; + } + else if ((V1.vector4_f32[0] != V2.vector4_f32[0]) && + (V1.vector4_f32[1] != V2.vector4_f32[1]) && + (V1.vector4_f32[2] != V2.vector4_f32[2])) + { + CR = XM_CRMASK_CR6FALSE; + } + return CR; +#elif defined(_XM_ARM_NEON_INTRINSICS_) + uint32x4_t vResult = vceqq_f32(V1, V2); + uint8x8x2_t vTemp = vzip_u8(vget_low_u8(vResult), vget_high_u8(vResult)); + uint16x4x2_t vTemp2 = vzip_u16(vTemp.val[0], vTemp.val[1]); + uint32_t r = vget_lane_u32(vTemp2.val[1], 1) & 0xFFFFFFU; + + uint32_t CR = 0; + if (r == 0xFFFFFFU) + { + CR = XM_CRMASK_CR6TRUE; + } + else if (!r) + { + CR = XM_CRMASK_CR6FALSE; + } + return CR; +#elif defined(_XM_SSE_INTRINSICS_) + XMVECTOR vTemp = _mm_cmpeq_ps(V1, V2); + int iTest = _mm_movemask_ps(vTemp) & 7; + uint32_t CR = 0; + if (iTest == 7) + { + CR = XM_CRMASK_CR6TRUE; + } + else if (!iTest) + { + CR = XM_CRMASK_CR6FALSE; + } + return CR; +#endif +} + +//------------------------------------------------------------------------------ + +inline bool XM_CALLCONV XMVector3EqualInt +( + FXMVECTOR V1, + FXMVECTOR V2 +) noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + return (((V1.vector4_u32[0] == V2.vector4_u32[0]) && (V1.vector4_u32[1] == V2.vector4_u32[1]) && (V1.vector4_u32[2] == V2.vector4_u32[2])) != 0); +#elif defined(_XM_ARM_NEON_INTRINSICS_) + uint32x4_t vResult = vceqq_u32(V1, V2); + uint8x8x2_t vTemp = vzip_u8(vget_low_u8(vResult), vget_high_u8(vResult)); + uint16x4x2_t vTemp2 = vzip_u16(vTemp.val[0], vTemp.val[1]); + return ((vget_lane_u32(vTemp2.val[1], 1) & 0xFFFFFFU) == 0xFFFFFFU); +#elif defined(_XM_SSE_INTRINSICS_) + __m128i vTemp = _mm_cmpeq_epi32(_mm_castps_si128(V1), _mm_castps_si128(V2)); + return (((_mm_movemask_ps(_mm_castsi128_ps(vTemp)) & 7) == 7) != 0); +#endif +} + +//------------------------------------------------------------------------------ + +inline uint32_t XM_CALLCONV XMVector3EqualIntR +( + FXMVECTOR V1, + FXMVECTOR V2 +) noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + uint32_t CR = 0; + if ((V1.vector4_u32[0] == V2.vector4_u32[0]) && + (V1.vector4_u32[1] == V2.vector4_u32[1]) && + (V1.vector4_u32[2] == V2.vector4_u32[2])) + { + CR = XM_CRMASK_CR6TRUE; + } + else if ((V1.vector4_u32[0] != V2.vector4_u32[0]) && + (V1.vector4_u32[1] != V2.vector4_u32[1]) && + (V1.vector4_u32[2] != V2.vector4_u32[2])) + { + CR = XM_CRMASK_CR6FALSE; + } + return CR; +#elif defined(_XM_ARM_NEON_INTRINSICS_) + uint32x4_t vResult = vceqq_u32(V1, V2); + uint8x8x2_t vTemp = vzip_u8(vget_low_u8(vResult), vget_high_u8(vResult)); + uint16x4x2_t vTemp2 = vzip_u16(vTemp.val[0], vTemp.val[1]); + uint32_t r = vget_lane_u32(vTemp2.val[1], 1) & 0xFFFFFFU; + + uint32_t CR = 0; + if (r == 0xFFFFFFU) + { + CR = XM_CRMASK_CR6TRUE; + } + else if (!r) + { + CR = XM_CRMASK_CR6FALSE; + } + return CR; +#elif defined(_XM_SSE_INTRINSICS_) + __m128i vTemp = _mm_cmpeq_epi32(_mm_castps_si128(V1), _mm_castps_si128(V2)); + int iTemp = _mm_movemask_ps(_mm_castsi128_ps(vTemp)) & 7; + uint32_t CR = 0; + if (iTemp == 7) + { + CR = XM_CRMASK_CR6TRUE; + } + else if (!iTemp) + { + CR = XM_CRMASK_CR6FALSE; + } + return CR; +#endif +} + +//------------------------------------------------------------------------------ + +inline bool XM_CALLCONV XMVector3NearEqual +( + FXMVECTOR V1, + FXMVECTOR V2, + FXMVECTOR Epsilon +) noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + float dx, dy, dz; + + dx = fabsf(V1.vector4_f32[0] - V2.vector4_f32[0]); + dy = fabsf(V1.vector4_f32[1] - V2.vector4_f32[1]); + dz = fabsf(V1.vector4_f32[2] - V2.vector4_f32[2]); + return (((dx <= Epsilon.vector4_f32[0]) && + (dy <= Epsilon.vector4_f32[1]) && + (dz <= Epsilon.vector4_f32[2])) != 0); +#elif defined(_XM_ARM_NEON_INTRINSICS_) + float32x4_t vDelta = vsubq_f32(V1, V2); +#ifdef _MSC_VER + uint32x4_t vResult = vacleq_f32(vDelta, Epsilon); +#else + uint32x4_t vResult = vcleq_f32(vabsq_f32(vDelta), Epsilon); +#endif + uint8x8x2_t vTemp = vzip_u8(vget_low_u8(vResult), vget_high_u8(vResult)); + uint16x4x2_t vTemp2 = vzip_u16(vTemp.val[0], vTemp.val[1]); + return ((vget_lane_u32(vTemp2.val[1], 1) & 0xFFFFFFU) == 0xFFFFFFU); +#elif defined(_XM_SSE_INTRINSICS_) + // Get the difference + XMVECTOR vDelta = _mm_sub_ps(V1, V2); + // Get the absolute value of the difference + XMVECTOR vTemp = _mm_setzero_ps(); + vTemp = _mm_sub_ps(vTemp, vDelta); + vTemp = _mm_max_ps(vTemp, vDelta); + vTemp = _mm_cmple_ps(vTemp, Epsilon); + // w is don't care + return (((_mm_movemask_ps(vTemp) & 7) == 0x7) != 0); +#endif +} + +//------------------------------------------------------------------------------ + +inline bool XM_CALLCONV XMVector3NotEqual +( + FXMVECTOR V1, + FXMVECTOR V2 +) noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + return (((V1.vector4_f32[0] != V2.vector4_f32[0]) || (V1.vector4_f32[1] != V2.vector4_f32[1]) || (V1.vector4_f32[2] != V2.vector4_f32[2])) != 0); +#elif defined(_XM_ARM_NEON_INTRINSICS_) + uint32x4_t vResult = vceqq_f32(V1, V2); + uint8x8x2_t vTemp = vzip_u8(vget_low_u8(vResult), vget_high_u8(vResult)); + uint16x4x2_t vTemp2 = vzip_u16(vTemp.val[0], vTemp.val[1]); + return ((vget_lane_u32(vTemp2.val[1], 1) & 0xFFFFFFU) != 0xFFFFFFU); +#elif defined(_XM_SSE_INTRINSICS_) + XMVECTOR vTemp = _mm_cmpeq_ps(V1, V2); + return (((_mm_movemask_ps(vTemp) & 7) != 7) != 0); +#endif +} + +//------------------------------------------------------------------------------ + +inline bool XM_CALLCONV XMVector3NotEqualInt +( + FXMVECTOR V1, + FXMVECTOR V2 +) noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + return (((V1.vector4_u32[0] != V2.vector4_u32[0]) || (V1.vector4_u32[1] != V2.vector4_u32[1]) || (V1.vector4_u32[2] != V2.vector4_u32[2])) != 0); +#elif defined(_XM_ARM_NEON_INTRINSICS_) + uint32x4_t vResult = vceqq_u32(V1, V2); + uint8x8x2_t vTemp = vzip_u8(vget_low_u8(vResult), vget_high_u8(vResult)); + uint16x4x2_t vTemp2 = vzip_u16(vTemp.val[0], vTemp.val[1]); + return ((vget_lane_u32(vTemp2.val[1], 1) & 0xFFFFFFU) != 0xFFFFFFU); +#elif defined(_XM_SSE_INTRINSICS_) + __m128i vTemp = _mm_cmpeq_epi32(_mm_castps_si128(V1), _mm_castps_si128(V2)); + return (((_mm_movemask_ps(_mm_castsi128_ps(vTemp)) & 7) != 7) != 0); +#endif +} + +//------------------------------------------------------------------------------ + +inline bool XM_CALLCONV XMVector3Greater +( + FXMVECTOR V1, + FXMVECTOR V2 +) noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + return (((V1.vector4_f32[0] > V2.vector4_f32[0]) && (V1.vector4_f32[1] > V2.vector4_f32[1]) && (V1.vector4_f32[2] > V2.vector4_f32[2])) != 0); +#elif defined(_XM_ARM_NEON_INTRINSICS_) + uint32x4_t vResult = vcgtq_f32(V1, V2); + uint8x8x2_t vTemp = vzip_u8(vget_low_u8(vResult), vget_high_u8(vResult)); + uint16x4x2_t vTemp2 = vzip_u16(vTemp.val[0], vTemp.val[1]); + return ((vget_lane_u32(vTemp2.val[1], 1) & 0xFFFFFFU) == 0xFFFFFFU); +#elif defined(_XM_SSE_INTRINSICS_) + XMVECTOR vTemp = _mm_cmpgt_ps(V1, V2); + return (((_mm_movemask_ps(vTemp) & 7) == 7) != 0); +#endif +} + +//------------------------------------------------------------------------------ + +inline uint32_t XM_CALLCONV XMVector3GreaterR +( + FXMVECTOR V1, + FXMVECTOR V2 +) noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + uint32_t CR = 0; + if ((V1.vector4_f32[0] > V2.vector4_f32[0]) && + (V1.vector4_f32[1] > V2.vector4_f32[1]) && + (V1.vector4_f32[2] > V2.vector4_f32[2])) + { + CR = XM_CRMASK_CR6TRUE; + } + else if ((V1.vector4_f32[0] <= V2.vector4_f32[0]) && + (V1.vector4_f32[1] <= V2.vector4_f32[1]) && + (V1.vector4_f32[2] <= V2.vector4_f32[2])) + { + CR = XM_CRMASK_CR6FALSE; + } + return CR; + +#elif defined(_XM_ARM_NEON_INTRINSICS_) + uint32x4_t vResult = vcgtq_f32(V1, V2); + uint8x8x2_t vTemp = vzip_u8(vget_low_u8(vResult), vget_high_u8(vResult)); + uint16x4x2_t vTemp2 = vzip_u16(vTemp.val[0], vTemp.val[1]); + uint32_t r = vget_lane_u32(vTemp2.val[1], 1) & 0xFFFFFFU; + + uint32_t CR = 0; + if (r == 0xFFFFFFU) + { + CR = XM_CRMASK_CR6TRUE; + } + else if (!r) + { + CR = XM_CRMASK_CR6FALSE; + } + return CR; +#elif defined(_XM_SSE_INTRINSICS_) + XMVECTOR vTemp = _mm_cmpgt_ps(V1, V2); + uint32_t CR = 0; + int iTest = _mm_movemask_ps(vTemp) & 7; + if (iTest == 7) + { + CR = XM_CRMASK_CR6TRUE; + } + else if (!iTest) + { + CR = XM_CRMASK_CR6FALSE; + } + return CR; +#endif +} + +//------------------------------------------------------------------------------ + +inline bool XM_CALLCONV XMVector3GreaterOrEqual +( + FXMVECTOR V1, + FXMVECTOR V2 +) noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + return (((V1.vector4_f32[0] >= V2.vector4_f32[0]) && (V1.vector4_f32[1] >= V2.vector4_f32[1]) && (V1.vector4_f32[2] >= V2.vector4_f32[2])) != 0); +#elif defined(_XM_ARM_NEON_INTRINSICS_) + uint32x4_t vResult = vcgeq_f32(V1, V2); + uint8x8x2_t vTemp = vzip_u8(vget_low_u8(vResult), vget_high_u8(vResult)); + uint16x4x2_t vTemp2 = vzip_u16(vTemp.val[0], vTemp.val[1]); + return ((vget_lane_u32(vTemp2.val[1], 1) & 0xFFFFFFU) == 0xFFFFFFU); +#elif defined(_XM_SSE_INTRINSICS_) + XMVECTOR vTemp = _mm_cmpge_ps(V1, V2); + return (((_mm_movemask_ps(vTemp) & 7) == 7) != 0); +#endif +} + +//------------------------------------------------------------------------------ + +inline uint32_t XM_CALLCONV XMVector3GreaterOrEqualR +( + FXMVECTOR V1, + FXMVECTOR V2 +) noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + + uint32_t CR = 0; + if ((V1.vector4_f32[0] >= V2.vector4_f32[0]) && + (V1.vector4_f32[1] >= V2.vector4_f32[1]) && + (V1.vector4_f32[2] >= V2.vector4_f32[2])) + { + CR = XM_CRMASK_CR6TRUE; + } + else if ((V1.vector4_f32[0] < V2.vector4_f32[0]) && + (V1.vector4_f32[1] < V2.vector4_f32[1]) && + (V1.vector4_f32[2] < V2.vector4_f32[2])) + { + CR = XM_CRMASK_CR6FALSE; + } + return CR; + +#elif defined(_XM_ARM_NEON_INTRINSICS_) + uint32x4_t vResult = vcgeq_f32(V1, V2); + uint8x8x2_t vTemp = vzip_u8(vget_low_u8(vResult), vget_high_u8(vResult)); + uint16x4x2_t vTemp2 = vzip_u16(vTemp.val[0], vTemp.val[1]); + uint32_t r = vget_lane_u32(vTemp2.val[1], 1) & 0xFFFFFFU; + + uint32_t CR = 0; + if (r == 0xFFFFFFU) + { + CR = XM_CRMASK_CR6TRUE; + } + else if (!r) + { + CR = XM_CRMASK_CR6FALSE; + } + return CR; +#elif defined(_XM_SSE_INTRINSICS_) + XMVECTOR vTemp = _mm_cmpge_ps(V1, V2); + uint32_t CR = 0; + int iTest = _mm_movemask_ps(vTemp) & 7; + if (iTest == 7) + { + CR = XM_CRMASK_CR6TRUE; + } + else if (!iTest) + { + CR = XM_CRMASK_CR6FALSE; + } + return CR; +#endif +} + +//------------------------------------------------------------------------------ + +inline bool XM_CALLCONV XMVector3Less +( + FXMVECTOR V1, + FXMVECTOR V2 +) noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + return (((V1.vector4_f32[0] < V2.vector4_f32[0]) && (V1.vector4_f32[1] < V2.vector4_f32[1]) && (V1.vector4_f32[2] < V2.vector4_f32[2])) != 0); +#elif defined(_XM_ARM_NEON_INTRINSICS_) + uint32x4_t vResult = vcltq_f32(V1, V2); + uint8x8x2_t vTemp = vzip_u8(vget_low_u8(vResult), vget_high_u8(vResult)); + uint16x4x2_t vTemp2 = vzip_u16(vTemp.val[0], vTemp.val[1]); + return ((vget_lane_u32(vTemp2.val[1], 1) & 0xFFFFFFU) == 0xFFFFFFU); +#elif defined(_XM_SSE_INTRINSICS_) + XMVECTOR vTemp = _mm_cmplt_ps(V1, V2); + return (((_mm_movemask_ps(vTemp) & 7) == 7) != 0); +#endif +} + +//------------------------------------------------------------------------------ + +inline bool XM_CALLCONV XMVector3LessOrEqual +( + FXMVECTOR V1, + FXMVECTOR V2 +) noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + return (((V1.vector4_f32[0] <= V2.vector4_f32[0]) && (V1.vector4_f32[1] <= V2.vector4_f32[1]) && (V1.vector4_f32[2] <= V2.vector4_f32[2])) != 0); +#elif defined(_XM_ARM_NEON_INTRINSICS_) + uint32x4_t vResult = vcleq_f32(V1, V2); + uint8x8x2_t vTemp = vzip_u8(vget_low_u8(vResult), vget_high_u8(vResult)); + uint16x4x2_t vTemp2 = vzip_u16(vTemp.val[0], vTemp.val[1]); + return ((vget_lane_u32(vTemp2.val[1], 1) & 0xFFFFFFU) == 0xFFFFFFU); +#elif defined(_XM_SSE_INTRINSICS_) + XMVECTOR vTemp = _mm_cmple_ps(V1, V2); + return (((_mm_movemask_ps(vTemp) & 7) == 7) != 0); +#endif +} + +//------------------------------------------------------------------------------ + +inline bool XM_CALLCONV XMVector3InBounds +( + FXMVECTOR V, + FXMVECTOR Bounds +) noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + return (((V.vector4_f32[0] <= Bounds.vector4_f32[0] && V.vector4_f32[0] >= -Bounds.vector4_f32[0]) && + (V.vector4_f32[1] <= Bounds.vector4_f32[1] && V.vector4_f32[1] >= -Bounds.vector4_f32[1]) && + (V.vector4_f32[2] <= Bounds.vector4_f32[2] && V.vector4_f32[2] >= -Bounds.vector4_f32[2])) != 0); +#elif defined(_XM_ARM_NEON_INTRINSICS_) + // Test if less than or equal + uint32x4_t ivTemp1 = vcleq_f32(V, Bounds); + // Negate the bounds + float32x4_t vTemp2 = vnegq_f32(Bounds); + // Test if greater or equal (Reversed) + uint32x4_t ivTemp2 = vcleq_f32(vTemp2, V); + // Blend answers + ivTemp1 = vandq_u32(ivTemp1, ivTemp2); + // in bounds? + uint8x8x2_t vTemp = vzip_u8(vget_low_u8(ivTemp1), vget_high_u8(ivTemp1)); + uint16x4x2_t vTemp3 = vzip_u16(vTemp.val[0], vTemp.val[1]); + return ((vget_lane_u32(vTemp3.val[1], 1) & 0xFFFFFFU) == 0xFFFFFFU); +#elif defined(_XM_SSE_INTRINSICS_) + // Test if less than or equal + XMVECTOR vTemp1 = _mm_cmple_ps(V, Bounds); + // Negate the bounds + XMVECTOR vTemp2 = _mm_mul_ps(Bounds, g_XMNegativeOne); + // Test if greater or equal (Reversed) + vTemp2 = _mm_cmple_ps(vTemp2, V); + // Blend answers + vTemp1 = _mm_and_ps(vTemp1, vTemp2); + // x,y and z in bounds? (w is don't care) + return (((_mm_movemask_ps(vTemp1) & 0x7) == 0x7) != 0); +#else + return XMComparisonAllInBounds(XMVector3InBoundsR(V, Bounds)); +#endif +} + +//------------------------------------------------------------------------------ + +#if !defined(_XM_NO_INTRINSICS_) && defined(_MSC_VER) && !defined(__clang__) && !defined(__INTEL_COMPILER) +#pragma float_control(push) +#pragma float_control(precise, on) +#endif + +inline bool XM_CALLCONV XMVector3IsNaN(FXMVECTOR V) noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + + return (XMISNAN(V.vector4_f32[0]) || + XMISNAN(V.vector4_f32[1]) || + XMISNAN(V.vector4_f32[2])); + +#elif defined(_XM_ARM_NEON_INTRINSICS_) + // Test against itself. NaN is always not equal + uint32x4_t vTempNan = vceqq_f32(V, V); + uint8x8x2_t vTemp = vzip_u8(vget_low_u8(vTempNan), vget_high_u8(vTempNan)); + uint16x4x2_t vTemp2 = vzip_u16(vTemp.val[0], vTemp.val[1]); + // If x or y or z are NaN, the mask is zero + return ((vget_lane_u32(vTemp2.val[1], 1) & 0xFFFFFFU) != 0xFFFFFFU); +#elif defined(_XM_SSE_INTRINSICS_) + // Test against itself. NaN is always not equal + XMVECTOR vTempNan = _mm_cmpneq_ps(V, V); + // If x or y or z are NaN, the mask is non-zero + return ((_mm_movemask_ps(vTempNan) & 7) != 0); +#endif +} + +#if !defined(_XM_NO_INTRINSICS_) && defined(_MSC_VER) && !defined(__clang__) && !defined(__INTEL_COMPILER) +#pragma float_control(pop) +#endif + +//------------------------------------------------------------------------------ + +inline bool XM_CALLCONV XMVector3IsInfinite(FXMVECTOR V) noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + return (XMISINF(V.vector4_f32[0]) || + XMISINF(V.vector4_f32[1]) || + XMISINF(V.vector4_f32[2])); +#elif defined(_XM_ARM_NEON_INTRINSICS_) + // Mask off the sign bit + uint32x4_t vTempInf = vandq_u32(V, g_XMAbsMask); + // Compare to infinity + vTempInf = vceqq_f32(vTempInf, g_XMInfinity); + // If any are infinity, the signs are true. + uint8x8x2_t vTemp = vzip_u8(vget_low_u8(vTempInf), vget_high_u8(vTempInf)); + uint16x4x2_t vTemp2 = vzip_u16(vTemp.val[0], vTemp.val[1]); + return ((vget_lane_u32(vTemp2.val[1], 1) & 0xFFFFFFU) != 0); +#elif defined(_XM_SSE_INTRINSICS_) + // Mask off the sign bit + __m128 vTemp = _mm_and_ps(V, g_XMAbsMask); + // Compare to infinity + vTemp = _mm_cmpeq_ps(vTemp, g_XMInfinity); + // If x,y or z are infinity, the signs are true. + return ((_mm_movemask_ps(vTemp) & 7) != 0); +#endif +} + +//------------------------------------------------------------------------------ +// Computation operations +//------------------------------------------------------------------------------ + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMVector3Dot +( + FXMVECTOR V1, + FXMVECTOR V2 +) noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + float fValue = V1.vector4_f32[0] * V2.vector4_f32[0] + V1.vector4_f32[1] * V2.vector4_f32[1] + V1.vector4_f32[2] * V2.vector4_f32[2]; + XMVECTORF32 vResult; + vResult.f[0] = + vResult.f[1] = + vResult.f[2] = + vResult.f[3] = fValue; + return vResult.v; +#elif defined(_XM_ARM_NEON_INTRINSICS_) + float32x4_t vTemp = vmulq_f32(V1, V2); + float32x2_t v1 = vget_low_f32(vTemp); + float32x2_t v2 = vget_high_f32(vTemp); + v1 = vpadd_f32(v1, v1); + v2 = vdup_lane_f32(v2, 0); + v1 = vadd_f32(v1, v2); + return vcombine_f32(v1, v1); +#elif defined(_XM_SSE4_INTRINSICS_) + return _mm_dp_ps(V1, V2, 0x7f); +#elif defined(_XM_SSE3_INTRINSICS_) + XMVECTOR vTemp = _mm_mul_ps(V1, V2); + vTemp = _mm_and_ps(vTemp, g_XMMask3); + vTemp = _mm_hadd_ps(vTemp, vTemp); + return _mm_hadd_ps(vTemp, vTemp); +#elif defined(_XM_SSE_INTRINSICS_) + // Perform the dot product + XMVECTOR vDot = _mm_mul_ps(V1, V2); + // x=Dot.vector4_f32[1], y=Dot.vector4_f32[2] + XMVECTOR vTemp = XM_PERMUTE_PS(vDot, _MM_SHUFFLE(2, 1, 2, 1)); + // Result.vector4_f32[0] = x+y + vDot = _mm_add_ss(vDot, vTemp); + // x=Dot.vector4_f32[2] + vTemp = XM_PERMUTE_PS(vTemp, _MM_SHUFFLE(1, 1, 1, 1)); + // Result.vector4_f32[0] = (x+y)+z + vDot = _mm_add_ss(vDot, vTemp); + // Splat x + return XM_PERMUTE_PS(vDot, _MM_SHUFFLE(0, 0, 0, 0)); +#endif +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMVector3Cross +( + FXMVECTOR V1, + FXMVECTOR V2 +) noexcept +{ + // [ V1.y*V2.z - V1.z*V2.y, V1.z*V2.x - V1.x*V2.z, V1.x*V2.y - V1.y*V2.x ] + +#if defined(_XM_NO_INTRINSICS_) + XMVECTORF32 vResult = { { { + (V1.vector4_f32[1] * V2.vector4_f32[2]) - (V1.vector4_f32[2] * V2.vector4_f32[1]), + (V1.vector4_f32[2] * V2.vector4_f32[0]) - (V1.vector4_f32[0] * V2.vector4_f32[2]), + (V1.vector4_f32[0] * V2.vector4_f32[1]) - (V1.vector4_f32[1] * V2.vector4_f32[0]), + 0.0f + } } }; + return vResult.v; +#elif defined(_XM_ARM_NEON_INTRINSICS_) + float32x2_t v1xy = vget_low_f32(V1); + float32x2_t v2xy = vget_low_f32(V2); + + float32x2_t v1yx = vrev64_f32(v1xy); + float32x2_t v2yx = vrev64_f32(v2xy); + + float32x2_t v1zz = vdup_lane_f32(vget_high_f32(V1), 0); + float32x2_t v2zz = vdup_lane_f32(vget_high_f32(V2), 0); + + XMVECTOR vResult = vmulq_f32(vcombine_f32(v1yx, v1xy), vcombine_f32(v2zz, v2yx)); + vResult = vmlsq_f32(vResult, vcombine_f32(v1zz, v1yx), vcombine_f32(v2yx, v2xy)); + vResult = veorq_u32(vResult, g_XMFlipY); + return vandq_u32(vResult, g_XMMask3); +#elif defined(_XM_SSE_INTRINSICS_) + // y1,z1,x1,w1 + XMVECTOR vTemp1 = XM_PERMUTE_PS(V1, _MM_SHUFFLE(3, 0, 2, 1)); + // z2,x2,y2,w2 + XMVECTOR vTemp2 = XM_PERMUTE_PS(V2, _MM_SHUFFLE(3, 1, 0, 2)); + // Perform the left operation + XMVECTOR vResult = _mm_mul_ps(vTemp1, vTemp2); + // z1,x1,y1,w1 + vTemp1 = XM_PERMUTE_PS(vTemp1, _MM_SHUFFLE(3, 0, 2, 1)); + // y2,z2,x2,w2 + vTemp2 = XM_PERMUTE_PS(vTemp2, _MM_SHUFFLE(3, 1, 0, 2)); + // Perform the right operation + vResult = XM_FNMADD_PS(vTemp1, vTemp2, vResult); + // Set w to zero + return _mm_and_ps(vResult, g_XMMask3); +#endif +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMVector3LengthSq(FXMVECTOR V) noexcept +{ + return XMVector3Dot(V, V); +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMVector3ReciprocalLengthEst(FXMVECTOR V) noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + + XMVECTOR Result; + + Result = XMVector3LengthSq(V); + Result = XMVectorReciprocalSqrtEst(Result); + + return Result; + +#elif defined(_XM_ARM_NEON_INTRINSICS_) + // Dot3 + float32x4_t vTemp = vmulq_f32(V, V); + float32x2_t v1 = vget_low_f32(vTemp); + float32x2_t v2 = vget_high_f32(vTemp); + v1 = vpadd_f32(v1, v1); + v2 = vdup_lane_f32(v2, 0); + v1 = vadd_f32(v1, v2); + // Reciprocal sqrt (estimate) + v2 = vrsqrte_f32(v1); + return vcombine_f32(v2, v2); +#elif defined(_XM_SSE4_INTRINSICS_) + XMVECTOR vTemp = _mm_dp_ps(V, V, 0x7f); + return _mm_rsqrt_ps(vTemp); +#elif defined(_XM_SSE3_INTRINSICS_) + XMVECTOR vLengthSq = _mm_mul_ps(V, V); + vLengthSq = _mm_and_ps(vLengthSq, g_XMMask3); + vLengthSq = _mm_hadd_ps(vLengthSq, vLengthSq); + vLengthSq = _mm_hadd_ps(vLengthSq, vLengthSq); + vLengthSq = _mm_rsqrt_ps(vLengthSq); + return vLengthSq; +#elif defined(_XM_SSE_INTRINSICS_) + // Perform the dot product on x,y and z + XMVECTOR vLengthSq = _mm_mul_ps(V, V); + // vTemp has z and y + XMVECTOR vTemp = XM_PERMUTE_PS(vLengthSq, _MM_SHUFFLE(1, 2, 1, 2)); + // x+z, y + vLengthSq = _mm_add_ss(vLengthSq, vTemp); + // y,y,y,y + vTemp = XM_PERMUTE_PS(vTemp, _MM_SHUFFLE(1, 1, 1, 1)); + // x+z+y,??,??,?? + vLengthSq = _mm_add_ss(vLengthSq, vTemp); + // Splat the length squared + vLengthSq = XM_PERMUTE_PS(vLengthSq, _MM_SHUFFLE(0, 0, 0, 0)); + // Get the reciprocal + vLengthSq = _mm_rsqrt_ps(vLengthSq); + return vLengthSq; +#endif +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMVector3ReciprocalLength(FXMVECTOR V) noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + + XMVECTOR Result; + + Result = XMVector3LengthSq(V); + Result = XMVectorReciprocalSqrt(Result); + + return Result; + +#elif defined(_XM_ARM_NEON_INTRINSICS_) + // Dot3 + float32x4_t vTemp = vmulq_f32(V, V); + float32x2_t v1 = vget_low_f32(vTemp); + float32x2_t v2 = vget_high_f32(vTemp); + v1 = vpadd_f32(v1, v1); + v2 = vdup_lane_f32(v2, 0); + v1 = vadd_f32(v1, v2); + // Reciprocal sqrt + float32x2_t S0 = vrsqrte_f32(v1); + float32x2_t P0 = vmul_f32(v1, S0); + float32x2_t R0 = vrsqrts_f32(P0, S0); + float32x2_t S1 = vmul_f32(S0, R0); + float32x2_t P1 = vmul_f32(v1, S1); + float32x2_t R1 = vrsqrts_f32(P1, S1); + float32x2_t Result = vmul_f32(S1, R1); + return vcombine_f32(Result, Result); +#elif defined(_XM_SSE4_INTRINSICS_) + XMVECTOR vTemp = _mm_dp_ps(V, V, 0x7f); + XMVECTOR vLengthSq = _mm_sqrt_ps(vTemp); + return _mm_div_ps(g_XMOne, vLengthSq); +#elif defined(_XM_SSE3_INTRINSICS_) + XMVECTOR vDot = _mm_mul_ps(V, V); + vDot = _mm_and_ps(vDot, g_XMMask3); + vDot = _mm_hadd_ps(vDot, vDot); + vDot = _mm_hadd_ps(vDot, vDot); + vDot = _mm_sqrt_ps(vDot); + vDot = _mm_div_ps(g_XMOne, vDot); + return vDot; +#elif defined(_XM_SSE_INTRINSICS_) + // Perform the dot product + XMVECTOR vDot = _mm_mul_ps(V, V); + // x=Dot.y, y=Dot.z + XMVECTOR vTemp = XM_PERMUTE_PS(vDot, _MM_SHUFFLE(2, 1, 2, 1)); + // Result.x = x+y + vDot = _mm_add_ss(vDot, vTemp); + // x=Dot.z + vTemp = XM_PERMUTE_PS(vTemp, _MM_SHUFFLE(1, 1, 1, 1)); + // Result.x = (x+y)+z + vDot = _mm_add_ss(vDot, vTemp); + // Splat x + vDot = XM_PERMUTE_PS(vDot, _MM_SHUFFLE(0, 0, 0, 0)); + // Get the reciprocal + vDot = _mm_sqrt_ps(vDot); + // Get the reciprocal + vDot = _mm_div_ps(g_XMOne, vDot); + return vDot; +#endif +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMVector3LengthEst(FXMVECTOR V) noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + + XMVECTOR Result; + + Result = XMVector3LengthSq(V); + Result = XMVectorSqrtEst(Result); + + return Result; + +#elif defined(_XM_ARM_NEON_INTRINSICS_) + // Dot3 + float32x4_t vTemp = vmulq_f32(V, V); + float32x2_t v1 = vget_low_f32(vTemp); + float32x2_t v2 = vget_high_f32(vTemp); + v1 = vpadd_f32(v1, v1); + v2 = vdup_lane_f32(v2, 0); + v1 = vadd_f32(v1, v2); + const float32x2_t zero = vdup_n_f32(0); + uint32x2_t VEqualsZero = vceq_f32(v1, zero); + // Sqrt (estimate) + float32x2_t Result = vrsqrte_f32(v1); + Result = vmul_f32(v1, Result); + Result = vbsl_f32(VEqualsZero, zero, Result); + return vcombine_f32(Result, Result); +#elif defined(_XM_SSE4_INTRINSICS_) + XMVECTOR vTemp = _mm_dp_ps(V, V, 0x7f); + return _mm_sqrt_ps(vTemp); +#elif defined(_XM_SSE3_INTRINSICS_) + XMVECTOR vLengthSq = _mm_mul_ps(V, V); + vLengthSq = _mm_and_ps(vLengthSq, g_XMMask3); + vLengthSq = _mm_hadd_ps(vLengthSq, vLengthSq); + vLengthSq = _mm_hadd_ps(vLengthSq, vLengthSq); + vLengthSq = _mm_sqrt_ps(vLengthSq); + return vLengthSq; +#elif defined(_XM_SSE_INTRINSICS_) + // Perform the dot product on x,y and z + XMVECTOR vLengthSq = _mm_mul_ps(V, V); + // vTemp has z and y + XMVECTOR vTemp = XM_PERMUTE_PS(vLengthSq, _MM_SHUFFLE(1, 2, 1, 2)); + // x+z, y + vLengthSq = _mm_add_ss(vLengthSq, vTemp); + // y,y,y,y + vTemp = XM_PERMUTE_PS(vTemp, _MM_SHUFFLE(1, 1, 1, 1)); + // x+z+y,??,??,?? + vLengthSq = _mm_add_ss(vLengthSq, vTemp); + // Splat the length squared + vLengthSq = XM_PERMUTE_PS(vLengthSq, _MM_SHUFFLE(0, 0, 0, 0)); + // Get the length + vLengthSq = _mm_sqrt_ps(vLengthSq); + return vLengthSq; +#endif +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMVector3Length(FXMVECTOR V) noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + + XMVECTOR Result; + + Result = XMVector3LengthSq(V); + Result = XMVectorSqrt(Result); + + return Result; + +#elif defined(_XM_ARM_NEON_INTRINSICS_) + // Dot3 + float32x4_t vTemp = vmulq_f32(V, V); + float32x2_t v1 = vget_low_f32(vTemp); + float32x2_t v2 = vget_high_f32(vTemp); + v1 = vpadd_f32(v1, v1); + v2 = vdup_lane_f32(v2, 0); + v1 = vadd_f32(v1, v2); + const float32x2_t zero = vdup_n_f32(0); + uint32x2_t VEqualsZero = vceq_f32(v1, zero); + // Sqrt + float32x2_t S0 = vrsqrte_f32(v1); + float32x2_t P0 = vmul_f32(v1, S0); + float32x2_t R0 = vrsqrts_f32(P0, S0); + float32x2_t S1 = vmul_f32(S0, R0); + float32x2_t P1 = vmul_f32(v1, S1); + float32x2_t R1 = vrsqrts_f32(P1, S1); + float32x2_t Result = vmul_f32(S1, R1); + Result = vmul_f32(v1, Result); + Result = vbsl_f32(VEqualsZero, zero, Result); + return vcombine_f32(Result, Result); +#elif defined(_XM_SSE4_INTRINSICS_) + XMVECTOR vTemp = _mm_dp_ps(V, V, 0x7f); + return _mm_sqrt_ps(vTemp); +#elif defined(_XM_SSE3_INTRINSICS_) + XMVECTOR vLengthSq = _mm_mul_ps(V, V); + vLengthSq = _mm_and_ps(vLengthSq, g_XMMask3); + vLengthSq = _mm_hadd_ps(vLengthSq, vLengthSq); + vLengthSq = _mm_hadd_ps(vLengthSq, vLengthSq); + vLengthSq = _mm_sqrt_ps(vLengthSq); + return vLengthSq; +#elif defined(_XM_SSE_INTRINSICS_) + // Perform the dot product on x,y and z + XMVECTOR vLengthSq = _mm_mul_ps(V, V); + // vTemp has z and y + XMVECTOR vTemp = XM_PERMUTE_PS(vLengthSq, _MM_SHUFFLE(1, 2, 1, 2)); + // x+z, y + vLengthSq = _mm_add_ss(vLengthSq, vTemp); + // y,y,y,y + vTemp = XM_PERMUTE_PS(vTemp, _MM_SHUFFLE(1, 1, 1, 1)); + // x+z+y,??,??,?? + vLengthSq = _mm_add_ss(vLengthSq, vTemp); + // Splat the length squared + vLengthSq = XM_PERMUTE_PS(vLengthSq, _MM_SHUFFLE(0, 0, 0, 0)); + // Get the length + vLengthSq = _mm_sqrt_ps(vLengthSq); + return vLengthSq; +#endif +} + +//------------------------------------------------------------------------------ +// XMVector3NormalizeEst uses a reciprocal estimate and +// returns QNaN on zero and infinite vectors. + +inline XMVECTOR XM_CALLCONV XMVector3NormalizeEst(FXMVECTOR V) noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + + XMVECTOR Result; + Result = XMVector3ReciprocalLength(V); + Result = XMVectorMultiply(V, Result); + return Result; + +#elif defined(_XM_ARM_NEON_INTRINSICS_) + // Dot3 + float32x4_t vTemp = vmulq_f32(V, V); + float32x2_t v1 = vget_low_f32(vTemp); + float32x2_t v2 = vget_high_f32(vTemp); + v1 = vpadd_f32(v1, v1); + v2 = vdup_lane_f32(v2, 0); + v1 = vadd_f32(v1, v2); + // Reciprocal sqrt (estimate) + v2 = vrsqrte_f32(v1); + // Normalize + return vmulq_f32(V, vcombine_f32(v2, v2)); +#elif defined(_XM_SSE4_INTRINSICS_) + XMVECTOR vTemp = _mm_dp_ps(V, V, 0x7f); + XMVECTOR vResult = _mm_rsqrt_ps(vTemp); + return _mm_mul_ps(vResult, V); +#elif defined(_XM_SSE3_INTRINSICS_) + XMVECTOR vDot = _mm_mul_ps(V, V); + vDot = _mm_and_ps(vDot, g_XMMask3); + vDot = _mm_hadd_ps(vDot, vDot); + vDot = _mm_hadd_ps(vDot, vDot); + vDot = _mm_rsqrt_ps(vDot); + vDot = _mm_mul_ps(vDot, V); + return vDot; +#elif defined(_XM_SSE_INTRINSICS_) + // Perform the dot product + XMVECTOR vDot = _mm_mul_ps(V, V); + // x=Dot.y, y=Dot.z + XMVECTOR vTemp = XM_PERMUTE_PS(vDot, _MM_SHUFFLE(2, 1, 2, 1)); + // Result.x = x+y + vDot = _mm_add_ss(vDot, vTemp); + // x=Dot.z + vTemp = XM_PERMUTE_PS(vTemp, _MM_SHUFFLE(1, 1, 1, 1)); + // Result.x = (x+y)+z + vDot = _mm_add_ss(vDot, vTemp); + // Splat x + vDot = XM_PERMUTE_PS(vDot, _MM_SHUFFLE(0, 0, 0, 0)); + // Get the reciprocal + vDot = _mm_rsqrt_ps(vDot); + // Perform the normalization + vDot = _mm_mul_ps(vDot, V); + return vDot; +#endif +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMVector3Normalize(FXMVECTOR V) noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + float fLength; + XMVECTOR vResult; + + vResult = XMVector3Length(V); + fLength = vResult.vector4_f32[0]; + + // Prevent divide by zero + if (fLength > 0) + { + fLength = 1.0f / fLength; + } + + vResult.vector4_f32[0] = V.vector4_f32[0] * fLength; + vResult.vector4_f32[1] = V.vector4_f32[1] * fLength; + vResult.vector4_f32[2] = V.vector4_f32[2] * fLength; + vResult.vector4_f32[3] = V.vector4_f32[3] * fLength; + return vResult; + +#elif defined(_XM_ARM_NEON_INTRINSICS_) + // Dot3 + float32x4_t vTemp = vmulq_f32(V, V); + float32x2_t v1 = vget_low_f32(vTemp); + float32x2_t v2 = vget_high_f32(vTemp); + v1 = vpadd_f32(v1, v1); + v2 = vdup_lane_f32(v2, 0); + v1 = vadd_f32(v1, v2); + uint32x2_t VEqualsZero = vceq_f32(v1, vdup_n_f32(0)); + uint32x2_t VEqualsInf = vceq_f32(v1, vget_low_f32(g_XMInfinity)); + // Reciprocal sqrt (2 iterations of Newton-Raphson) + float32x2_t S0 = vrsqrte_f32(v1); + float32x2_t P0 = vmul_f32(v1, S0); + float32x2_t R0 = vrsqrts_f32(P0, S0); + float32x2_t S1 = vmul_f32(S0, R0); + float32x2_t P1 = vmul_f32(v1, S1); + float32x2_t R1 = vrsqrts_f32(P1, S1); + v2 = vmul_f32(S1, R1); + // Normalize + XMVECTOR vResult = vmulq_f32(V, vcombine_f32(v2, v2)); + vResult = vbslq_f32(vcombine_f32(VEqualsZero, VEqualsZero), vdupq_n_f32(0), vResult); + return vbslq_f32(vcombine_f32(VEqualsInf, VEqualsInf), g_XMQNaN, vResult); +#elif defined(_XM_SSE4_INTRINSICS_) + XMVECTOR vLengthSq = _mm_dp_ps(V, V, 0x7f); + // Prepare for the division + XMVECTOR vResult = _mm_sqrt_ps(vLengthSq); + // Create zero with a single instruction + XMVECTOR vZeroMask = _mm_setzero_ps(); + // Test for a divide by zero (Must be FP to detect -0.0) + vZeroMask = _mm_cmpneq_ps(vZeroMask, vResult); + // Failsafe on zero (Or epsilon) length planes + // If the length is infinity, set the elements to zero + vLengthSq = _mm_cmpneq_ps(vLengthSq, g_XMInfinity); + // Divide to perform the normalization + vResult = _mm_div_ps(V, vResult); + // Any that are infinity, set to zero + vResult = _mm_and_ps(vResult, vZeroMask); + // Select qnan or result based on infinite length + XMVECTOR vTemp1 = _mm_andnot_ps(vLengthSq, g_XMQNaN); + XMVECTOR vTemp2 = _mm_and_ps(vResult, vLengthSq); + vResult = _mm_or_ps(vTemp1, vTemp2); + return vResult; +#elif defined(_XM_SSE3_INTRINSICS_) + // Perform the dot product on x,y and z only + XMVECTOR vLengthSq = _mm_mul_ps(V, V); + vLengthSq = _mm_and_ps(vLengthSq, g_XMMask3); + vLengthSq = _mm_hadd_ps(vLengthSq, vLengthSq); + vLengthSq = _mm_hadd_ps(vLengthSq, vLengthSq); + // Prepare for the division + XMVECTOR vResult = _mm_sqrt_ps(vLengthSq); + // Create zero with a single instruction + XMVECTOR vZeroMask = _mm_setzero_ps(); + // Test for a divide by zero (Must be FP to detect -0.0) + vZeroMask = _mm_cmpneq_ps(vZeroMask, vResult); + // Failsafe on zero (Or epsilon) length planes + // If the length is infinity, set the elements to zero + vLengthSq = _mm_cmpneq_ps(vLengthSq, g_XMInfinity); + // Divide to perform the normalization + vResult = _mm_div_ps(V, vResult); + // Any that are infinity, set to zero + vResult = _mm_and_ps(vResult, vZeroMask); + // Select qnan or result based on infinite length + XMVECTOR vTemp1 = _mm_andnot_ps(vLengthSq, g_XMQNaN); + XMVECTOR vTemp2 = _mm_and_ps(vResult, vLengthSq); + vResult = _mm_or_ps(vTemp1, vTemp2); + return vResult; +#elif defined(_XM_SSE_INTRINSICS_) + // Perform the dot product on x,y and z only + XMVECTOR vLengthSq = _mm_mul_ps(V, V); + XMVECTOR vTemp = XM_PERMUTE_PS(vLengthSq, _MM_SHUFFLE(2, 1, 2, 1)); + vLengthSq = _mm_add_ss(vLengthSq, vTemp); + vTemp = XM_PERMUTE_PS(vTemp, _MM_SHUFFLE(1, 1, 1, 1)); + vLengthSq = _mm_add_ss(vLengthSq, vTemp); + vLengthSq = XM_PERMUTE_PS(vLengthSq, _MM_SHUFFLE(0, 0, 0, 0)); + // Prepare for the division + XMVECTOR vResult = _mm_sqrt_ps(vLengthSq); + // Create zero with a single instruction + XMVECTOR vZeroMask = _mm_setzero_ps(); + // Test for a divide by zero (Must be FP to detect -0.0) + vZeroMask = _mm_cmpneq_ps(vZeroMask, vResult); + // Failsafe on zero (Or epsilon) length planes + // If the length is infinity, set the elements to zero + vLengthSq = _mm_cmpneq_ps(vLengthSq, g_XMInfinity); + // Divide to perform the normalization + vResult = _mm_div_ps(V, vResult); + // Any that are infinity, set to zero + vResult = _mm_and_ps(vResult, vZeroMask); + // Select qnan or result based on infinite length + XMVECTOR vTemp1 = _mm_andnot_ps(vLengthSq, g_XMQNaN); + XMVECTOR vTemp2 = _mm_and_ps(vResult, vLengthSq); + vResult = _mm_or_ps(vTemp1, vTemp2); + return vResult; +#endif +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMVector3ClampLength +( + FXMVECTOR V, + float LengthMin, + float LengthMax +) noexcept +{ + XMVECTOR ClampMax = XMVectorReplicate(LengthMax); + XMVECTOR ClampMin = XMVectorReplicate(LengthMin); + + return XMVector3ClampLengthV(V, ClampMin, ClampMax); +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMVector3ClampLengthV +( + FXMVECTOR V, + FXMVECTOR LengthMin, + FXMVECTOR LengthMax +) noexcept +{ + assert((XMVectorGetY(LengthMin) == XMVectorGetX(LengthMin)) && (XMVectorGetZ(LengthMin) == XMVectorGetX(LengthMin))); + assert((XMVectorGetY(LengthMax) == XMVectorGetX(LengthMax)) && (XMVectorGetZ(LengthMax) == XMVectorGetX(LengthMax))); + assert(XMVector3GreaterOrEqual(LengthMin, XMVectorZero())); + assert(XMVector3GreaterOrEqual(LengthMax, XMVectorZero())); + assert(XMVector3GreaterOrEqual(LengthMax, LengthMin)); + + XMVECTOR LengthSq = XMVector3LengthSq(V); + + const XMVECTOR Zero = XMVectorZero(); + + XMVECTOR RcpLength = XMVectorReciprocalSqrt(LengthSq); + + XMVECTOR InfiniteLength = XMVectorEqualInt(LengthSq, g_XMInfinity.v); + XMVECTOR ZeroLength = XMVectorEqual(LengthSq, Zero); + + XMVECTOR Normal = XMVectorMultiply(V, RcpLength); + + XMVECTOR Length = XMVectorMultiply(LengthSq, RcpLength); + + XMVECTOR Select = XMVectorEqualInt(InfiniteLength, ZeroLength); + Length = XMVectorSelect(LengthSq, Length, Select); + Normal = XMVectorSelect(LengthSq, Normal, Select); + + XMVECTOR ControlMax = XMVectorGreater(Length, LengthMax); + XMVECTOR ControlMin = XMVectorLess(Length, LengthMin); + + XMVECTOR ClampLength = XMVectorSelect(Length, LengthMax, ControlMax); + ClampLength = XMVectorSelect(ClampLength, LengthMin, ControlMin); + + XMVECTOR Result = XMVectorMultiply(Normal, ClampLength); + + // Preserve the original vector (with no precision loss) if the length falls within the given range + XMVECTOR Control = XMVectorEqualInt(ControlMax, ControlMin); + Result = XMVectorSelect(Result, V, Control); + + return Result; +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMVector3Reflect +( + FXMVECTOR Incident, + FXMVECTOR Normal +) noexcept +{ + // Result = Incident - (2 * dot(Incident, Normal)) * Normal + + XMVECTOR Result = XMVector3Dot(Incident, Normal); + Result = XMVectorAdd(Result, Result); + Result = XMVectorNegativeMultiplySubtract(Result, Normal, Incident); + + return Result; +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMVector3Refract +( + FXMVECTOR Incident, + FXMVECTOR Normal, + float RefractionIndex +) noexcept +{ + XMVECTOR Index = XMVectorReplicate(RefractionIndex); + return XMVector3RefractV(Incident, Normal, Index); +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMVector3RefractV +( + FXMVECTOR Incident, + FXMVECTOR Normal, + FXMVECTOR RefractionIndex +) noexcept +{ + // Result = RefractionIndex * Incident - Normal * (RefractionIndex * dot(Incident, Normal) + + // sqrt(1 - RefractionIndex * RefractionIndex * (1 - dot(Incident, Normal) * dot(Incident, Normal)))) + +#if defined(_XM_NO_INTRINSICS_) + + const XMVECTOR Zero = XMVectorZero(); + + XMVECTOR IDotN = XMVector3Dot(Incident, Normal); + + // R = 1.0f - RefractionIndex * RefractionIndex * (1.0f - IDotN * IDotN) + XMVECTOR R = XMVectorNegativeMultiplySubtract(IDotN, IDotN, g_XMOne.v); + R = XMVectorMultiply(R, RefractionIndex); + R = XMVectorNegativeMultiplySubtract(R, RefractionIndex, g_XMOne.v); + + if (XMVector4LessOrEqual(R, Zero)) + { + // Total internal reflection + return Zero; + } + else + { + // R = RefractionIndex * IDotN + sqrt(R) + R = XMVectorSqrt(R); + R = XMVectorMultiplyAdd(RefractionIndex, IDotN, R); + + // Result = RefractionIndex * Incident - Normal * R + XMVECTOR Result = XMVectorMultiply(RefractionIndex, Incident); + Result = XMVectorNegativeMultiplySubtract(Normal, R, Result); + + return Result; + } + +#elif defined(_XM_ARM_NEON_INTRINSICS_) + XMVECTOR IDotN = XMVector3Dot(Incident, Normal); + + // R = 1.0f - RefractionIndex * RefractionIndex * (1.0f - IDotN * IDotN) + float32x4_t R = vmlsq_f32(g_XMOne, IDotN, IDotN); + R = vmulq_f32(R, RefractionIndex); + R = vmlsq_f32(g_XMOne, R, RefractionIndex); + + uint32x4_t vResult = vcleq_f32(R, g_XMZero); + uint8x8x2_t vTemp = vzip_u8(vget_low_u8(vResult), vget_high_u8(vResult)); + uint16x4x2_t vTemp2 = vzip_u16(vTemp.val[0], vTemp.val[1]); + if (vget_lane_u32(vTemp2.val[1], 1) == 0xFFFFFFFFU) + { + // Total internal reflection + vResult = g_XMZero; + } + else + { + // Sqrt(R) + float32x4_t S0 = vrsqrteq_f32(R); + float32x4_t P0 = vmulq_f32(R, S0); + float32x4_t R0 = vrsqrtsq_f32(P0, S0); + float32x4_t S1 = vmulq_f32(S0, R0); + float32x4_t P1 = vmulq_f32(R, S1); + float32x4_t R1 = vrsqrtsq_f32(P1, S1); + float32x4_t S2 = vmulq_f32(S1, R1); + R = vmulq_f32(R, S2); + // R = RefractionIndex * IDotN + sqrt(R) + R = vmlaq_f32(R, RefractionIndex, IDotN); + // Result = RefractionIndex * Incident - Normal * R + vResult = vmulq_f32(RefractionIndex, Incident); + vResult = vmlsq_f32(vResult, R, Normal); + } + return vResult; +#elif defined(_XM_SSE_INTRINSICS_) + // Result = RefractionIndex * Incident - Normal * (RefractionIndex * dot(Incident, Normal) + + // sqrt(1 - RefractionIndex * RefractionIndex * (1 - dot(Incident, Normal) * dot(Incident, Normal)))) + XMVECTOR IDotN = XMVector3Dot(Incident, Normal); + // R = 1.0f - RefractionIndex * RefractionIndex * (1.0f - IDotN * IDotN) + XMVECTOR R = XM_FNMADD_PS(IDotN, IDotN, g_XMOne); + XMVECTOR R2 = _mm_mul_ps(RefractionIndex, RefractionIndex); + R = XM_FNMADD_PS(R, R2, g_XMOne); + + XMVECTOR vResult = _mm_cmple_ps(R, g_XMZero); + if (_mm_movemask_ps(vResult) == 0x0f) + { + // Total internal reflection + vResult = g_XMZero; + } + else + { + // R = RefractionIndex * IDotN + sqrt(R) + R = _mm_sqrt_ps(R); + R = XM_FMADD_PS(RefractionIndex, IDotN, R); + // Result = RefractionIndex * Incident - Normal * R + vResult = _mm_mul_ps(RefractionIndex, Incident); + vResult = XM_FNMADD_PS(R, Normal, vResult); + } + return vResult; +#endif +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMVector3Orthogonal(FXMVECTOR V) noexcept +{ + XMVECTOR Zero = XMVectorZero(); + XMVECTOR Z = XMVectorSplatZ(V); + XMVECTOR YZYY = XMVectorSwizzle(V); + + XMVECTOR NegativeV = XMVectorSubtract(Zero, V); + + XMVECTOR ZIsNegative = XMVectorLess(Z, Zero); + XMVECTOR YZYYIsNegative = XMVectorLess(YZYY, Zero); + + XMVECTOR S = XMVectorAdd(YZYY, Z); + XMVECTOR D = XMVectorSubtract(YZYY, Z); + + XMVECTOR Select = XMVectorEqualInt(ZIsNegative, YZYYIsNegative); + + XMVECTOR R0 = XMVectorPermute(NegativeV, S); + XMVECTOR R1 = XMVectorPermute(V, D); + + return XMVectorSelect(R1, R0, Select); +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMVector3AngleBetweenNormalsEst +( + FXMVECTOR N1, + FXMVECTOR N2 +) noexcept +{ + XMVECTOR Result = XMVector3Dot(N1, N2); + Result = XMVectorClamp(Result, g_XMNegativeOne.v, g_XMOne.v); + Result = XMVectorACosEst(Result); + return Result; +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMVector3AngleBetweenNormals +( + FXMVECTOR N1, + FXMVECTOR N2 +) noexcept +{ + XMVECTOR Result = XMVector3Dot(N1, N2); + Result = XMVectorClamp(Result, g_XMNegativeOne.v, g_XMOne.v); + Result = XMVectorACos(Result); + return Result; +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMVector3AngleBetweenVectors +( + FXMVECTOR V1, + FXMVECTOR V2 +) noexcept +{ + XMVECTOR L1 = XMVector3ReciprocalLength(V1); + XMVECTOR L2 = XMVector3ReciprocalLength(V2); + + XMVECTOR Dot = XMVector3Dot(V1, V2); + + L1 = XMVectorMultiply(L1, L2); + + XMVECTOR CosAngle = XMVectorMultiply(Dot, L1); + CosAngle = XMVectorClamp(CosAngle, g_XMNegativeOne.v, g_XMOne.v); + + return XMVectorACos(CosAngle); +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMVector3LinePointDistance +( + FXMVECTOR LinePoint1, + FXMVECTOR LinePoint2, + FXMVECTOR Point +) noexcept +{ + // Given a vector PointVector from LinePoint1 to Point and a vector + // LineVector from LinePoint1 to LinePoint2, the scaled distance + // PointProjectionScale from LinePoint1 to the perpendicular projection + // of PointVector onto the line is defined as: + // + // PointProjectionScale = dot(PointVector, LineVector) / LengthSq(LineVector) + + XMVECTOR PointVector = XMVectorSubtract(Point, LinePoint1); + XMVECTOR LineVector = XMVectorSubtract(LinePoint2, LinePoint1); + + XMVECTOR LengthSq = XMVector3LengthSq(LineVector); + + XMVECTOR PointProjectionScale = XMVector3Dot(PointVector, LineVector); + PointProjectionScale = XMVectorDivide(PointProjectionScale, LengthSq); + + XMVECTOR DistanceVector = XMVectorMultiply(LineVector, PointProjectionScale); + DistanceVector = XMVectorSubtract(PointVector, DistanceVector); + + return XMVector3Length(DistanceVector); +} + +//------------------------------------------------------------------------------ + +_Use_decl_annotations_ +inline void XM_CALLCONV XMVector3ComponentsFromNormal +( + XMVECTOR* pParallel, + XMVECTOR* pPerpendicular, + FXMVECTOR V, + FXMVECTOR Normal +) noexcept +{ + assert(pParallel != nullptr); + assert(pPerpendicular != nullptr); + + XMVECTOR Scale = XMVector3Dot(V, Normal); + + XMVECTOR Parallel = XMVectorMultiply(Normal, Scale); + + *pParallel = Parallel; + *pPerpendicular = XMVectorSubtract(V, Parallel); +} + +//------------------------------------------------------------------------------ +// Transform a vector using a rotation expressed as a unit quaternion + +inline XMVECTOR XM_CALLCONV XMVector3Rotate +( + FXMVECTOR V, + FXMVECTOR RotationQuaternion +) noexcept +{ + XMVECTOR A = XMVectorSelect(g_XMSelect1110.v, V, g_XMSelect1110.v); + XMVECTOR Q = XMQuaternionConjugate(RotationQuaternion); + XMVECTOR Result = XMQuaternionMultiply(Q, A); + return XMQuaternionMultiply(Result, RotationQuaternion); +} + +//------------------------------------------------------------------------------ +// Transform a vector using the inverse of a rotation expressed as a unit quaternion + +inline XMVECTOR XM_CALLCONV XMVector3InverseRotate +( + FXMVECTOR V, + FXMVECTOR RotationQuaternion +) noexcept +{ + XMVECTOR A = XMVectorSelect(g_XMSelect1110.v, V, g_XMSelect1110.v); + XMVECTOR Result = XMQuaternionMultiply(RotationQuaternion, A); + XMVECTOR Q = XMQuaternionConjugate(RotationQuaternion); + return XMQuaternionMultiply(Result, Q); +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMVector3Transform +( + FXMVECTOR V, + FXMMATRIX M +) noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + + XMVECTOR Z = XMVectorSplatZ(V); + XMVECTOR Y = XMVectorSplatY(V); + XMVECTOR X = XMVectorSplatX(V); + + XMVECTOR Result = XMVectorMultiplyAdd(Z, M.r[2], M.r[3]); + Result = XMVectorMultiplyAdd(Y, M.r[1], Result); + Result = XMVectorMultiplyAdd(X, M.r[0], Result); + + return Result; + +#elif defined(_XM_ARM_NEON_INTRINSICS_) + float32x2_t VL = vget_low_f32(V); + XMVECTOR vResult = vmlaq_lane_f32(M.r[3], M.r[0], VL, 0); // X + vResult = vmlaq_lane_f32(vResult, M.r[1], VL, 1); // Y + return vmlaq_lane_f32(vResult, M.r[2], vget_high_f32(V), 0); // Z +#elif defined(_XM_SSE_INTRINSICS_) + XMVECTOR vResult = XM_PERMUTE_PS(V, _MM_SHUFFLE(2, 2, 2, 2)); // Z + vResult = XM_FMADD_PS(vResult, M.r[2], M.r[3]); + XMVECTOR vTemp = XM_PERMUTE_PS(V, _MM_SHUFFLE(1, 1, 1, 1)); // Y + vResult = XM_FMADD_PS(vTemp, M.r[1], vResult); + vTemp = XM_PERMUTE_PS(V, _MM_SHUFFLE(0, 0, 0, 0)); // X + vResult = XM_FMADD_PS(vTemp, M.r[0], vResult); + return vResult; +#endif +} + +//------------------------------------------------------------------------------ + +#ifdef _PREFAST_ +#pragma prefast(push) +#pragma prefast(disable : 26015 26019, "PREfast noise: Esp:1307" ) +#endif + +_Use_decl_annotations_ +inline XMFLOAT4* XM_CALLCONV XMVector3TransformStream +( + XMFLOAT4* pOutputStream, + size_t OutputStride, + const XMFLOAT3* pInputStream, + size_t InputStride, + size_t VectorCount, + FXMMATRIX M +) noexcept +{ + assert(pOutputStream != nullptr); + assert(pInputStream != nullptr); + + assert(InputStride >= sizeof(XMFLOAT3)); + _Analysis_assume_(InputStride >= sizeof(XMFLOAT3)); + + assert(OutputStride >= sizeof(XMFLOAT4)); + _Analysis_assume_(OutputStride >= sizeof(XMFLOAT4)); + +#if defined(_XM_NO_INTRINSICS_) + + auto pInputVector = reinterpret_cast(pInputStream); + auto pOutputVector = reinterpret_cast(pOutputStream); + + const XMVECTOR row0 = M.r[0]; + const XMVECTOR row1 = M.r[1]; + const XMVECTOR row2 = M.r[2]; + const XMVECTOR row3 = M.r[3]; + + for (size_t i = 0; i < VectorCount; i++) + { + XMVECTOR V = XMLoadFloat3(reinterpret_cast(pInputVector)); + XMVECTOR Z = XMVectorSplatZ(V); + XMVECTOR Y = XMVectorSplatY(V); + XMVECTOR X = XMVectorSplatX(V); + + XMVECTOR Result = XMVectorMultiplyAdd(Z, row2, row3); + Result = XMVectorMultiplyAdd(Y, row1, Result); + Result = XMVectorMultiplyAdd(X, row0, Result); + + XMStoreFloat4(reinterpret_cast(pOutputVector), Result); + + pInputVector += InputStride; + pOutputVector += OutputStride; + } + + return pOutputStream; + +#elif defined(_XM_ARM_NEON_INTRINSICS_) + auto pInputVector = reinterpret_cast(pInputStream); + auto pOutputVector = reinterpret_cast(pOutputStream); + + const XMVECTOR row0 = M.r[0]; + const XMVECTOR row1 = M.r[1]; + const XMVECTOR row2 = M.r[2]; + const XMVECTOR row3 = M.r[3]; + + size_t i = 0; + size_t four = VectorCount >> 2; + if (four > 0) + { + if ((InputStride == sizeof(XMFLOAT3)) && (OutputStride == sizeof(XMFLOAT4))) + { + for (size_t j = 0; j < four; ++j) + { + float32x4x3_t V = vld3q_f32(reinterpret_cast(pInputVector)); + pInputVector += sizeof(XMFLOAT3) * 4; + + float32x2_t r3 = vget_low_f32(row3); + float32x2_t r = vget_low_f32(row0); + XMVECTOR vResult0 = vmlaq_lane_f32(vdupq_lane_f32(r3, 0), V.val[0], r, 0); // Ax+M + XMVECTOR vResult1 = vmlaq_lane_f32(vdupq_lane_f32(r3, 1), V.val[0], r, 1); // Bx+N + + XM_PREFETCH(pInputVector); + + r3 = vget_high_f32(row3); + r = vget_high_f32(row0); + XMVECTOR vResult2 = vmlaq_lane_f32(vdupq_lane_f32(r3, 0), V.val[0], r, 0); // Cx+O + XMVECTOR vResult3 = vmlaq_lane_f32(vdupq_lane_f32(r3, 1), V.val[0], r, 1); // Dx+P + + XM_PREFETCH(pInputVector + XM_CACHE_LINE_SIZE); + + r = vget_low_f32(row1); + vResult0 = vmlaq_lane_f32(vResult0, V.val[1], r, 0); // Ax+Ey+M + vResult1 = vmlaq_lane_f32(vResult1, V.val[1], r, 1); // Bx+Fy+N + + XM_PREFETCH(pInputVector + (XM_CACHE_LINE_SIZE * 2)); + + r = vget_high_f32(row1); + vResult2 = vmlaq_lane_f32(vResult2, V.val[1], r, 0); // Cx+Gy+O + vResult3 = vmlaq_lane_f32(vResult3, V.val[1], r, 1); // Dx+Hy+P + + XM_PREFETCH(pInputVector + (XM_CACHE_LINE_SIZE * 3)); + + r = vget_low_f32(row2); + vResult0 = vmlaq_lane_f32(vResult0, V.val[2], r, 0); // Ax+Ey+Iz+M + vResult1 = vmlaq_lane_f32(vResult1, V.val[2], r, 1); // Bx+Fy+Jz+N + + XM_PREFETCH(pInputVector + (XM_CACHE_LINE_SIZE * 4)); + + r = vget_high_f32(row2); + vResult2 = vmlaq_lane_f32(vResult2, V.val[2], r, 0); // Cx+Gy+Kz+O + vResult3 = vmlaq_lane_f32(vResult3, V.val[2], r, 1); // Dx+Hy+Lz+P + + XM_PREFETCH(pInputVector + (XM_CACHE_LINE_SIZE * 5)); + + float32x4x4_t R; + R.val[0] = vResult0; + R.val[1] = vResult1; + R.val[2] = vResult2; + R.val[3] = vResult3; + + vst4q_f32(reinterpret_cast(pOutputVector), R); + pOutputVector += sizeof(XMFLOAT4) * 4; + + i += 4; + } + } + } + + for (; i < VectorCount; i++) + { + float32x2_t VL = vld1_f32(reinterpret_cast(pInputVector)); + float32x2_t zero = vdup_n_f32(0); + float32x2_t VH = vld1_lane_f32(reinterpret_cast(pInputVector) + 2, zero, 0); + pInputVector += InputStride; + + XMVECTOR vResult = vmlaq_lane_f32(row3, row0, VL, 0); // X + vResult = vmlaq_lane_f32(vResult, row1, VL, 1); // Y + vResult = vmlaq_lane_f32(vResult, row2, VH, 0); // Z + + vst1q_f32(reinterpret_cast(pOutputVector), vResult); + pOutputVector += OutputStride; + } + + return pOutputStream; +#elif defined(_XM_SSE_INTRINSICS_) + auto pInputVector = reinterpret_cast(pInputStream); + auto pOutputVector = reinterpret_cast(pOutputStream); + + const XMVECTOR row0 = M.r[0]; + const XMVECTOR row1 = M.r[1]; + const XMVECTOR row2 = M.r[2]; + const XMVECTOR row3 = M.r[3]; + + size_t i = 0; + size_t four = VectorCount >> 2; + if (four > 0) + { + if (InputStride == sizeof(XMFLOAT3)) + { + if (!(reinterpret_cast(pOutputStream) & 0xF) && !(OutputStride & 0xF)) + { + // Packed input, aligned output + for (size_t j = 0; j < four; ++j) + { + __m128 V1 = _mm_loadu_ps(reinterpret_cast(pInputVector)); + __m128 L2 = _mm_loadu_ps(reinterpret_cast(pInputVector + 16)); + __m128 L3 = _mm_loadu_ps(reinterpret_cast(pInputVector + 32)); + pInputVector += sizeof(XMFLOAT3) * 4; + + // Unpack the 4 vectors (.w components are junk) + XM3UNPACK3INTO4(V1, L2, L3); + + // Result 1 + XMVECTOR Z = XM_PERMUTE_PS(V1, _MM_SHUFFLE(2, 2, 2, 2)); + XMVECTOR Y = XM_PERMUTE_PS(V1, _MM_SHUFFLE(1, 1, 1, 1)); + XMVECTOR X = XM_PERMUTE_PS(V1, _MM_SHUFFLE(0, 0, 0, 0)); + + XMVECTOR vTemp = XM_FMADD_PS(Z, row2, row3); + XMVECTOR vTemp2 = _mm_mul_ps(Y, row1); + XMVECTOR vTemp3 = _mm_mul_ps(X, row0); + vTemp = _mm_add_ps(vTemp, vTemp2); + vTemp = _mm_add_ps(vTemp, vTemp3); + XM_STREAM_PS(reinterpret_cast(pOutputVector), vTemp); + pOutputVector += OutputStride; + + // Result 2 + Z = XM_PERMUTE_PS(V2, _MM_SHUFFLE(2, 2, 2, 2)); + Y = XM_PERMUTE_PS(V2, _MM_SHUFFLE(1, 1, 1, 1)); + X = XM_PERMUTE_PS(V2, _MM_SHUFFLE(0, 0, 0, 0)); + + vTemp = XM_FMADD_PS(Z, row2, row3); + vTemp2 = _mm_mul_ps(Y, row1); + vTemp3 = _mm_mul_ps(X, row0); + vTemp = _mm_add_ps(vTemp, vTemp2); + vTemp = _mm_add_ps(vTemp, vTemp3); + XM_STREAM_PS(reinterpret_cast(pOutputVector), vTemp); + pOutputVector += OutputStride; + + // Result 3 + Z = XM_PERMUTE_PS(V3, _MM_SHUFFLE(2, 2, 2, 2)); + Y = XM_PERMUTE_PS(V3, _MM_SHUFFLE(1, 1, 1, 1)); + X = XM_PERMUTE_PS(V3, _MM_SHUFFLE(0, 0, 0, 0)); + + vTemp = XM_FMADD_PS(Z, row2, row3); + vTemp2 = _mm_mul_ps(Y, row1); + vTemp3 = _mm_mul_ps(X, row0); + vTemp = _mm_add_ps(vTemp, vTemp2); + vTemp = _mm_add_ps(vTemp, vTemp3); + XM_STREAM_PS(reinterpret_cast(pOutputVector), vTemp); + pOutputVector += OutputStride; + + // Result 4 + Z = XM_PERMUTE_PS(V4, _MM_SHUFFLE(2, 2, 2, 2)); + Y = XM_PERMUTE_PS(V4, _MM_SHUFFLE(1, 1, 1, 1)); + X = XM_PERMUTE_PS(V4, _MM_SHUFFLE(0, 0, 0, 0)); + + vTemp = XM_FMADD_PS(Z, row2, row3); + vTemp2 = _mm_mul_ps(Y, row1); + vTemp3 = _mm_mul_ps(X, row0); + vTemp = _mm_add_ps(vTemp, vTemp2); + vTemp = _mm_add_ps(vTemp, vTemp3); + XM_STREAM_PS(reinterpret_cast(pOutputVector), vTemp); + pOutputVector += OutputStride; + + i += 4; + } + } + else + { + // Packed input, unaligned output + for (size_t j = 0; j < four; ++j) + { + __m128 V1 = _mm_loadu_ps(reinterpret_cast(pInputVector)); + __m128 L2 = _mm_loadu_ps(reinterpret_cast(pInputVector + 16)); + __m128 L3 = _mm_loadu_ps(reinterpret_cast(pInputVector + 32)); + pInputVector += sizeof(XMFLOAT3) * 4; + + // Unpack the 4 vectors (.w components are junk) + XM3UNPACK3INTO4(V1, L2, L3); + + // Result 1 + XMVECTOR Z = XM_PERMUTE_PS(V1, _MM_SHUFFLE(2, 2, 2, 2)); + XMVECTOR Y = XM_PERMUTE_PS(V1, _MM_SHUFFLE(1, 1, 1, 1)); + XMVECTOR X = XM_PERMUTE_PS(V1, _MM_SHUFFLE(0, 0, 0, 0)); + + XMVECTOR vTemp = XM_FMADD_PS(Z, row2, row3); + XMVECTOR vTemp2 = _mm_mul_ps(Y, row1); + XMVECTOR vTemp3 = _mm_mul_ps(X, row0); + vTemp = _mm_add_ps(vTemp, vTemp2); + vTemp = _mm_add_ps(vTemp, vTemp3); + _mm_storeu_ps(reinterpret_cast(pOutputVector), vTemp); + pOutputVector += OutputStride; + + // Result 2 + Z = XM_PERMUTE_PS(V2, _MM_SHUFFLE(2, 2, 2, 2)); + Y = XM_PERMUTE_PS(V2, _MM_SHUFFLE(1, 1, 1, 1)); + X = XM_PERMUTE_PS(V2, _MM_SHUFFLE(0, 0, 0, 0)); + + vTemp = XM_FMADD_PS(Z, row2, row3); + vTemp2 = _mm_mul_ps(Y, row1); + vTemp3 = _mm_mul_ps(X, row0); + vTemp = _mm_add_ps(vTemp, vTemp2); + vTemp = _mm_add_ps(vTemp, vTemp3); + _mm_storeu_ps(reinterpret_cast(pOutputVector), vTemp); + pOutputVector += OutputStride; + + // Result 3 + Z = XM_PERMUTE_PS(V3, _MM_SHUFFLE(2, 2, 2, 2)); + Y = XM_PERMUTE_PS(V3, _MM_SHUFFLE(1, 1, 1, 1)); + X = XM_PERMUTE_PS(V3, _MM_SHUFFLE(0, 0, 0, 0)); + + vTemp = XM_FMADD_PS(Z, row2, row3); + vTemp2 = _mm_mul_ps(Y, row1); + vTemp3 = _mm_mul_ps(X, row0); + vTemp = _mm_add_ps(vTemp, vTemp2); + vTemp = _mm_add_ps(vTemp, vTemp3); + _mm_storeu_ps(reinterpret_cast(pOutputVector), vTemp); + pOutputVector += OutputStride; + + // Result 4 + Z = XM_PERMUTE_PS(V4, _MM_SHUFFLE(2, 2, 2, 2)); + Y = XM_PERMUTE_PS(V4, _MM_SHUFFLE(1, 1, 1, 1)); + X = XM_PERMUTE_PS(V4, _MM_SHUFFLE(0, 0, 0, 0)); + + vTemp = XM_FMADD_PS(Z, row2, row3); + vTemp2 = _mm_mul_ps(Y, row1); + vTemp3 = _mm_mul_ps(X, row0); + vTemp = _mm_add_ps(vTemp, vTemp2); + vTemp = _mm_add_ps(vTemp, vTemp3); + _mm_storeu_ps(reinterpret_cast(pOutputVector), vTemp); + pOutputVector += OutputStride; + + i += 4; + } + } + } + } + + if (!(reinterpret_cast(pOutputStream) & 0xF) && !(OutputStride & 0xF)) + { + // Aligned output + for (; i < VectorCount; ++i) + { + XMVECTOR V = XMLoadFloat3(reinterpret_cast(pInputVector)); + pInputVector += InputStride; + + XMVECTOR Z = XM_PERMUTE_PS(V, _MM_SHUFFLE(2, 2, 2, 2)); + XMVECTOR Y = XM_PERMUTE_PS(V, _MM_SHUFFLE(1, 1, 1, 1)); + XMVECTOR X = XM_PERMUTE_PS(V, _MM_SHUFFLE(0, 0, 0, 0)); + + XMVECTOR vTemp = XM_FMADD_PS(Z, row2, row3); + XMVECTOR vTemp2 = _mm_mul_ps(Y, row1); + XMVECTOR vTemp3 = _mm_mul_ps(X, row0); + vTemp = _mm_add_ps(vTemp, vTemp2); + vTemp = _mm_add_ps(vTemp, vTemp3); + + XM_STREAM_PS(reinterpret_cast(pOutputVector), vTemp); + pOutputVector += OutputStride; + } + } + else + { + // Unaligned output + for (; i < VectorCount; ++i) + { + XMVECTOR V = XMLoadFloat3(reinterpret_cast(pInputVector)); + pInputVector += InputStride; + + XMVECTOR Z = XM_PERMUTE_PS(V, _MM_SHUFFLE(2, 2, 2, 2)); + XMVECTOR Y = XM_PERMUTE_PS(V, _MM_SHUFFLE(1, 1, 1, 1)); + XMVECTOR X = XM_PERMUTE_PS(V, _MM_SHUFFLE(0, 0, 0, 0)); + + XMVECTOR vTemp = XM_FMADD_PS(Z, row2, row3); + XMVECTOR vTemp2 = _mm_mul_ps(Y, row1); + XMVECTOR vTemp3 = _mm_mul_ps(X, row0); + vTemp = _mm_add_ps(vTemp, vTemp2); + vTemp = _mm_add_ps(vTemp, vTemp3); + + _mm_storeu_ps(reinterpret_cast(pOutputVector), vTemp); + pOutputVector += OutputStride; + } + } + + XM_SFENCE(); + + return pOutputStream; +#endif +} + +#ifdef _PREFAST_ +#pragma prefast(pop) +#endif + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMVector3TransformCoord +( + FXMVECTOR V, + FXMMATRIX M +) noexcept +{ + XMVECTOR Z = XMVectorSplatZ(V); + XMVECTOR Y = XMVectorSplatY(V); + XMVECTOR X = XMVectorSplatX(V); + + XMVECTOR Result = XMVectorMultiplyAdd(Z, M.r[2], M.r[3]); + Result = XMVectorMultiplyAdd(Y, M.r[1], Result); + Result = XMVectorMultiplyAdd(X, M.r[0], Result); + + XMVECTOR W = XMVectorSplatW(Result); + return XMVectorDivide(Result, W); +} + +//------------------------------------------------------------------------------ + +#ifdef _PREFAST_ +#pragma prefast(push) +#pragma prefast(disable : 26015 26019, "PREfast noise: Esp:1307" ) +#endif + +_Use_decl_annotations_ +inline XMFLOAT3* XM_CALLCONV XMVector3TransformCoordStream +( + XMFLOAT3* pOutputStream, + size_t OutputStride, + const XMFLOAT3* pInputStream, + size_t InputStride, + size_t VectorCount, + FXMMATRIX M +) noexcept +{ + assert(pOutputStream != nullptr); + assert(pInputStream != nullptr); + + assert(InputStride >= sizeof(XMFLOAT3)); + _Analysis_assume_(InputStride >= sizeof(XMFLOAT3)); + + assert(OutputStride >= sizeof(XMFLOAT3)); + _Analysis_assume_(OutputStride >= sizeof(XMFLOAT3)); + +#if defined(_XM_NO_INTRINSICS_) + + auto pInputVector = reinterpret_cast(pInputStream); + auto pOutputVector = reinterpret_cast(pOutputStream); + + const XMVECTOR row0 = M.r[0]; + const XMVECTOR row1 = M.r[1]; + const XMVECTOR row2 = M.r[2]; + const XMVECTOR row3 = M.r[3]; + + for (size_t i = 0; i < VectorCount; i++) + { + XMVECTOR V = XMLoadFloat3(reinterpret_cast(pInputVector)); + XMVECTOR Z = XMVectorSplatZ(V); + XMVECTOR Y = XMVectorSplatY(V); + XMVECTOR X = XMVectorSplatX(V); + + XMVECTOR Result = XMVectorMultiplyAdd(Z, row2, row3); + Result = XMVectorMultiplyAdd(Y, row1, Result); + Result = XMVectorMultiplyAdd(X, row0, Result); + + XMVECTOR W = XMVectorSplatW(Result); + + Result = XMVectorDivide(Result, W); + + XMStoreFloat3(reinterpret_cast(pOutputVector), Result); + + pInputVector += InputStride; + pOutputVector += OutputStride; + } + + return pOutputStream; + +#elif defined(_XM_ARM_NEON_INTRINSICS_) + auto pInputVector = reinterpret_cast(pInputStream); + auto pOutputVector = reinterpret_cast(pOutputStream); + + const XMVECTOR row0 = M.r[0]; + const XMVECTOR row1 = M.r[1]; + const XMVECTOR row2 = M.r[2]; + const XMVECTOR row3 = M.r[3]; + + size_t i = 0; + size_t four = VectorCount >> 2; + if (four > 0) + { + if ((InputStride == sizeof(XMFLOAT3)) && (OutputStride == sizeof(XMFLOAT3))) + { + for (size_t j = 0; j < four; ++j) + { + float32x4x3_t V = vld3q_f32(reinterpret_cast(pInputVector)); + pInputVector += sizeof(XMFLOAT3) * 4; + + float32x2_t r3 = vget_low_f32(row3); + float32x2_t r = vget_low_f32(row0); + XMVECTOR vResult0 = vmlaq_lane_f32(vdupq_lane_f32(r3, 0), V.val[0], r, 0); // Ax+M + XMVECTOR vResult1 = vmlaq_lane_f32(vdupq_lane_f32(r3, 1), V.val[0], r, 1); // Bx+N + + XM_PREFETCH(pInputVector); + + r3 = vget_high_f32(row3); + r = vget_high_f32(row0); + XMVECTOR vResult2 = vmlaq_lane_f32(vdupq_lane_f32(r3, 0), V.val[0], r, 0); // Cx+O + XMVECTOR W = vmlaq_lane_f32(vdupq_lane_f32(r3, 1), V.val[0], r, 1); // Dx+P + + XM_PREFETCH(pInputVector + XM_CACHE_LINE_SIZE); + + r = vget_low_f32(row1); + vResult0 = vmlaq_lane_f32(vResult0, V.val[1], r, 0); // Ax+Ey+M + vResult1 = vmlaq_lane_f32(vResult1, V.val[1], r, 1); // Bx+Fy+N + + XM_PREFETCH(pInputVector + (XM_CACHE_LINE_SIZE * 2)); + + r = vget_high_f32(row1); + vResult2 = vmlaq_lane_f32(vResult2, V.val[1], r, 0); // Cx+Gy+O + W = vmlaq_lane_f32(W, V.val[1], r, 1); // Dx+Hy+P + + XM_PREFETCH(pInputVector + (XM_CACHE_LINE_SIZE * 3)); + + r = vget_low_f32(row2); + vResult0 = vmlaq_lane_f32(vResult0, V.val[2], r, 0); // Ax+Ey+Iz+M + vResult1 = vmlaq_lane_f32(vResult1, V.val[2], r, 1); // Bx+Fy+Jz+N + + XM_PREFETCH(pInputVector + (XM_CACHE_LINE_SIZE * 4)); + + r = vget_high_f32(row2); + vResult2 = vmlaq_lane_f32(vResult2, V.val[2], r, 0); // Cx+Gy+Kz+O + W = vmlaq_lane_f32(W, V.val[2], r, 1); // Dx+Hy+Lz+P + + XM_PREFETCH(pInputVector + (XM_CACHE_LINE_SIZE * 5)); + +#if defined(_M_ARM64) || defined(_M_HYBRID_X86_ARM64) || __aarch64__ + V.val[0] = vdivq_f32(vResult0, W); + V.val[1] = vdivq_f32(vResult1, W); + V.val[2] = vdivq_f32(vResult2, W); +#else + // 2 iterations of Newton-Raphson refinement of reciprocal + float32x4_t Reciprocal = vrecpeq_f32(W); + float32x4_t S = vrecpsq_f32(Reciprocal, W); + Reciprocal = vmulq_f32(S, Reciprocal); + S = vrecpsq_f32(Reciprocal, W); + Reciprocal = vmulq_f32(S, Reciprocal); + + V.val[0] = vmulq_f32(vResult0, Reciprocal); + V.val[1] = vmulq_f32(vResult1, Reciprocal); + V.val[2] = vmulq_f32(vResult2, Reciprocal); +#endif + + vst3q_f32(reinterpret_cast(pOutputVector), V); + pOutputVector += sizeof(XMFLOAT3) * 4; + + i += 4; + } + } + } + + for (; i < VectorCount; i++) + { + float32x2_t VL = vld1_f32(reinterpret_cast(pInputVector)); + float32x2_t zero = vdup_n_f32(0); + float32x2_t VH = vld1_lane_f32(reinterpret_cast(pInputVector) + 2, zero, 0); + pInputVector += InputStride; + + XMVECTOR vResult = vmlaq_lane_f32(row3, row0, VL, 0); // X + vResult = vmlaq_lane_f32(vResult, row1, VL, 1); // Y + vResult = vmlaq_lane_f32(vResult, row2, VH, 0); // Z + + VH = vget_high_f32(vResult); + XMVECTOR W = vdupq_lane_f32(VH, 1); + +#if defined(_M_ARM64) || defined(_M_HYBRID_X86_ARM64) || __aarch64__ + vResult = vdivq_f32(vResult, W); +#else + // 2 iterations of Newton-Raphson refinement of reciprocal for W + float32x4_t Reciprocal = vrecpeq_f32(W); + float32x4_t S = vrecpsq_f32(Reciprocal, W); + Reciprocal = vmulq_f32(S, Reciprocal); + S = vrecpsq_f32(Reciprocal, W); + Reciprocal = vmulq_f32(S, Reciprocal); + + vResult = vmulq_f32(vResult, Reciprocal); +#endif + + VL = vget_low_f32(vResult); + vst1_f32(reinterpret_cast(pOutputVector), VL); + vst1q_lane_f32(reinterpret_cast(pOutputVector) + 2, vResult, 2); + pOutputVector += OutputStride; + } + + return pOutputStream; +#elif defined(_XM_SSE_INTRINSICS_) + auto pInputVector = reinterpret_cast(pInputStream); + auto pOutputVector = reinterpret_cast(pOutputStream); + + const XMVECTOR row0 = M.r[0]; + const XMVECTOR row1 = M.r[1]; + const XMVECTOR row2 = M.r[2]; + const XMVECTOR row3 = M.r[3]; + + size_t i = 0; + size_t four = VectorCount >> 2; + if (four > 0) + { + if (InputStride == sizeof(XMFLOAT3)) + { + if (OutputStride == sizeof(XMFLOAT3)) + { + if (!(reinterpret_cast(pOutputStream) & 0xF)) + { + // Packed input, aligned & packed output + for (size_t j = 0; j < four; ++j) + { + __m128 V1 = _mm_loadu_ps(reinterpret_cast(pInputVector)); + __m128 L2 = _mm_loadu_ps(reinterpret_cast(pInputVector + 16)); + __m128 L3 = _mm_loadu_ps(reinterpret_cast(pInputVector + 32)); + pInputVector += sizeof(XMFLOAT3) * 4; + + // Unpack the 4 vectors (.w components are junk) + XM3UNPACK3INTO4(V1, L2, L3); + + // Result 1 + XMVECTOR Z = XM_PERMUTE_PS(V1, _MM_SHUFFLE(2, 2, 2, 2)); + XMVECTOR Y = XM_PERMUTE_PS(V1, _MM_SHUFFLE(1, 1, 1, 1)); + XMVECTOR X = XM_PERMUTE_PS(V1, _MM_SHUFFLE(0, 0, 0, 0)); + + XMVECTOR vTemp = XM_FMADD_PS(Z, row2, row3); + XMVECTOR vTemp2 = _mm_mul_ps(Y, row1); + XMVECTOR vTemp3 = _mm_mul_ps(X, row0); + vTemp = _mm_add_ps(vTemp, vTemp2); + vTemp = _mm_add_ps(vTemp, vTemp3); + + XMVECTOR W = XM_PERMUTE_PS(vTemp, _MM_SHUFFLE(3, 3, 3, 3)); + + V1 = _mm_div_ps(vTemp, W); + + // Result 2 + Z = XM_PERMUTE_PS(V2, _MM_SHUFFLE(2, 2, 2, 2)); + Y = XM_PERMUTE_PS(V2, _MM_SHUFFLE(1, 1, 1, 1)); + X = XM_PERMUTE_PS(V2, _MM_SHUFFLE(0, 0, 0, 0)); + + vTemp = XM_FMADD_PS(Z, row2, row3); + vTemp2 = _mm_mul_ps(Y, row1); + vTemp3 = _mm_mul_ps(X, row0); + vTemp = _mm_add_ps(vTemp, vTemp2); + vTemp = _mm_add_ps(vTemp, vTemp3); + + W = XM_PERMUTE_PS(vTemp, _MM_SHUFFLE(3, 3, 3, 3)); + + V2 = _mm_div_ps(vTemp, W); + + // Result 3 + Z = XM_PERMUTE_PS(V3, _MM_SHUFFLE(2, 2, 2, 2)); + Y = XM_PERMUTE_PS(V3, _MM_SHUFFLE(1, 1, 1, 1)); + X = XM_PERMUTE_PS(V3, _MM_SHUFFLE(0, 0, 0, 0)); + + vTemp = XM_FMADD_PS(Z, row2, row3); + vTemp2 = _mm_mul_ps(Y, row1); + vTemp3 = _mm_mul_ps(X, row0); + vTemp = _mm_add_ps(vTemp, vTemp2); + vTemp = _mm_add_ps(vTemp, vTemp3); + + W = XM_PERMUTE_PS(vTemp, _MM_SHUFFLE(3, 3, 3, 3)); + + V3 = _mm_div_ps(vTemp, W); + + // Result 4 + Z = XM_PERMUTE_PS(V4, _MM_SHUFFLE(2, 2, 2, 2)); + Y = XM_PERMUTE_PS(V4, _MM_SHUFFLE(1, 1, 1, 1)); + X = XM_PERMUTE_PS(V4, _MM_SHUFFLE(0, 0, 0, 0)); + + vTemp = XM_FMADD_PS(Z, row2, row3); + vTemp2 = _mm_mul_ps(Y, row1); + vTemp3 = _mm_mul_ps(X, row0); + vTemp = _mm_add_ps(vTemp, vTemp2); + vTemp = _mm_add_ps(vTemp, vTemp3); + + W = XM_PERMUTE_PS(vTemp, _MM_SHUFFLE(3, 3, 3, 3)); + + V4 = _mm_div_ps(vTemp, W); + + // Pack and store the vectors + XM3PACK4INTO3(vTemp); + XM_STREAM_PS(reinterpret_cast(pOutputVector), V1); + XM_STREAM_PS(reinterpret_cast(pOutputVector + 16), vTemp); + XM_STREAM_PS(reinterpret_cast(pOutputVector + 32), V3); + pOutputVector += sizeof(XMFLOAT3) * 4; + i += 4; + } + } + else + { + // Packed input, unaligned & packed output + for (size_t j = 0; j < four; ++j) + { + __m128 V1 = _mm_loadu_ps(reinterpret_cast(pInputVector)); + __m128 L2 = _mm_loadu_ps(reinterpret_cast(pInputVector + 16)); + __m128 L3 = _mm_loadu_ps(reinterpret_cast(pInputVector + 32)); + pInputVector += sizeof(XMFLOAT3) * 4; + + // Unpack the 4 vectors (.w components are junk) + XM3UNPACK3INTO4(V1, L2, L3); + + // Result 1 + XMVECTOR Z = XM_PERMUTE_PS(V1, _MM_SHUFFLE(2, 2, 2, 2)); + XMVECTOR Y = XM_PERMUTE_PS(V1, _MM_SHUFFLE(1, 1, 1, 1)); + XMVECTOR X = XM_PERMUTE_PS(V1, _MM_SHUFFLE(0, 0, 0, 0)); + + XMVECTOR vTemp = XM_FMADD_PS(Z, row2, row3); + XMVECTOR vTemp2 = _mm_mul_ps(Y, row1); + XMVECTOR vTemp3 = _mm_mul_ps(X, row0); + vTemp = _mm_add_ps(vTemp, vTemp2); + vTemp = _mm_add_ps(vTemp, vTemp3); + + XMVECTOR W = XM_PERMUTE_PS(vTemp, _MM_SHUFFLE(3, 3, 3, 3)); + + V1 = _mm_div_ps(vTemp, W); + + // Result 2 + Z = XM_PERMUTE_PS(V2, _MM_SHUFFLE(2, 2, 2, 2)); + Y = XM_PERMUTE_PS(V2, _MM_SHUFFLE(1, 1, 1, 1)); + X = XM_PERMUTE_PS(V2, _MM_SHUFFLE(0, 0, 0, 0)); + + vTemp = XM_FMADD_PS(Z, row2, row3); + vTemp2 = _mm_mul_ps(Y, row1); + vTemp3 = _mm_mul_ps(X, row0); + vTemp = _mm_add_ps(vTemp, vTemp2); + vTemp = _mm_add_ps(vTemp, vTemp3); + + W = XM_PERMUTE_PS(vTemp, _MM_SHUFFLE(3, 3, 3, 3)); + + V2 = _mm_div_ps(vTemp, W); + + // Result 3 + Z = XM_PERMUTE_PS(V3, _MM_SHUFFLE(2, 2, 2, 2)); + Y = XM_PERMUTE_PS(V3, _MM_SHUFFLE(1, 1, 1, 1)); + X = XM_PERMUTE_PS(V3, _MM_SHUFFLE(0, 0, 0, 0)); + + vTemp = XM_FMADD_PS(Z, row2, row3); + vTemp2 = _mm_mul_ps(Y, row1); + vTemp3 = _mm_mul_ps(X, row0); + vTemp = _mm_add_ps(vTemp, vTemp2); + vTemp = _mm_add_ps(vTemp, vTemp3); + + W = XM_PERMUTE_PS(vTemp, _MM_SHUFFLE(3, 3, 3, 3)); + + V3 = _mm_div_ps(vTemp, W); + + // Result 4 + Z = XM_PERMUTE_PS(V4, _MM_SHUFFLE(2, 2, 2, 2)); + Y = XM_PERMUTE_PS(V4, _MM_SHUFFLE(1, 1, 1, 1)); + X = XM_PERMUTE_PS(V4, _MM_SHUFFLE(0, 0, 0, 0)); + + vTemp = XM_FMADD_PS(Z, row2, row3); + vTemp2 = _mm_mul_ps(Y, row1); + vTemp3 = _mm_mul_ps(X, row0); + vTemp = _mm_add_ps(vTemp, vTemp2); + vTemp = _mm_add_ps(vTemp, vTemp3); + + W = XM_PERMUTE_PS(vTemp, _MM_SHUFFLE(3, 3, 3, 3)); + + V4 = _mm_div_ps(vTemp, W); + + // Pack and store the vectors + XM3PACK4INTO3(vTemp); + _mm_storeu_ps(reinterpret_cast(pOutputVector), V1); + _mm_storeu_ps(reinterpret_cast(pOutputVector + 16), vTemp); + _mm_storeu_ps(reinterpret_cast(pOutputVector + 32), V3); + pOutputVector += sizeof(XMFLOAT3) * 4; + i += 4; + } + } + } + else + { + // Packed input, unpacked output + for (size_t j = 0; j < four; ++j) + { + __m128 V1 = _mm_loadu_ps(reinterpret_cast(pInputVector)); + __m128 L2 = _mm_loadu_ps(reinterpret_cast(pInputVector + 16)); + __m128 L3 = _mm_loadu_ps(reinterpret_cast(pInputVector + 32)); + pInputVector += sizeof(XMFLOAT3) * 4; + + // Unpack the 4 vectors (.w components are junk) + XM3UNPACK3INTO4(V1, L2, L3); + + // Result 1 + XMVECTOR Z = XM_PERMUTE_PS(V1, _MM_SHUFFLE(2, 2, 2, 2)); + XMVECTOR Y = XM_PERMUTE_PS(V1, _MM_SHUFFLE(1, 1, 1, 1)); + XMVECTOR X = XM_PERMUTE_PS(V1, _MM_SHUFFLE(0, 0, 0, 0)); + + XMVECTOR vTemp = XM_FMADD_PS(Z, row2, row3); + XMVECTOR vTemp2 = _mm_mul_ps(Y, row1); + XMVECTOR vTemp3 = _mm_mul_ps(X, row0); + vTemp = _mm_add_ps(vTemp, vTemp2); + vTemp = _mm_add_ps(vTemp, vTemp3); + + XMVECTOR W = XM_PERMUTE_PS(vTemp, _MM_SHUFFLE(3, 3, 3, 3)); + + vTemp = _mm_div_ps(vTemp, W); + XMStoreFloat3(reinterpret_cast(pOutputVector), vTemp); + pOutputVector += OutputStride; + + // Result 2 + Z = XM_PERMUTE_PS(V2, _MM_SHUFFLE(2, 2, 2, 2)); + Y = XM_PERMUTE_PS(V2, _MM_SHUFFLE(1, 1, 1, 1)); + X = XM_PERMUTE_PS(V2, _MM_SHUFFLE(0, 0, 0, 0)); + + vTemp = XM_FMADD_PS(Z, row2, row3); + vTemp2 = _mm_mul_ps(Y, row1); + vTemp3 = _mm_mul_ps(X, row0); + vTemp = _mm_add_ps(vTemp, vTemp2); + vTemp = _mm_add_ps(vTemp, vTemp3); + + W = XM_PERMUTE_PS(vTemp, _MM_SHUFFLE(3, 3, 3, 3)); + + vTemp = _mm_div_ps(vTemp, W); + XMStoreFloat3(reinterpret_cast(pOutputVector), vTemp); + pOutputVector += OutputStride; + + // Result 3 + Z = XM_PERMUTE_PS(V3, _MM_SHUFFLE(2, 2, 2, 2)); + Y = XM_PERMUTE_PS(V3, _MM_SHUFFLE(1, 1, 1, 1)); + X = XM_PERMUTE_PS(V3, _MM_SHUFFLE(0, 0, 0, 0)); + + vTemp = XM_FMADD_PS(Z, row2, row3); + vTemp2 = _mm_mul_ps(Y, row1); + vTemp3 = _mm_mul_ps(X, row0); + vTemp = _mm_add_ps(vTemp, vTemp2); + vTemp = _mm_add_ps(vTemp, vTemp3); + + W = XM_PERMUTE_PS(vTemp, _MM_SHUFFLE(3, 3, 3, 3)); + + vTemp = _mm_div_ps(vTemp, W); + XMStoreFloat3(reinterpret_cast(pOutputVector), vTemp); + pOutputVector += OutputStride; + + // Result 4 + Z = XM_PERMUTE_PS(V4, _MM_SHUFFLE(2, 2, 2, 2)); + Y = XM_PERMUTE_PS(V4, _MM_SHUFFLE(1, 1, 1, 1)); + X = XM_PERMUTE_PS(V4, _MM_SHUFFLE(0, 0, 0, 0)); + + vTemp = XM_FMADD_PS(Z, row2, row3); + vTemp2 = _mm_mul_ps(Y, row1); + vTemp3 = _mm_mul_ps(X, row0); + vTemp = _mm_add_ps(vTemp, vTemp2); + vTemp = _mm_add_ps(vTemp, vTemp3); + + W = XM_PERMUTE_PS(vTemp, _MM_SHUFFLE(3, 3, 3, 3)); + + vTemp = _mm_div_ps(vTemp, W); + XMStoreFloat3(reinterpret_cast(pOutputVector), vTemp); + pOutputVector += OutputStride; + + i += 4; + } + } + } + } + + for (; i < VectorCount; i++) + { + XMVECTOR V = XMLoadFloat3(reinterpret_cast(pInputVector)); + pInputVector += InputStride; + + XMVECTOR Z = XM_PERMUTE_PS(V, _MM_SHUFFLE(2, 2, 2, 2)); + XMVECTOR Y = XM_PERMUTE_PS(V, _MM_SHUFFLE(1, 1, 1, 1)); + XMVECTOR X = XM_PERMUTE_PS(V, _MM_SHUFFLE(0, 0, 0, 0)); + + XMVECTOR vTemp = XM_FMADD_PS(Z, row2, row3); + XMVECTOR vTemp2 = _mm_mul_ps(Y, row1); + XMVECTOR vTemp3 = _mm_mul_ps(X, row0); + vTemp = _mm_add_ps(vTemp, vTemp2); + vTemp = _mm_add_ps(vTemp, vTemp3); + + XMVECTOR W = XM_PERMUTE_PS(vTemp, _MM_SHUFFLE(3, 3, 3, 3)); + + vTemp = _mm_div_ps(vTemp, W); + + XMStoreFloat3(reinterpret_cast(pOutputVector), vTemp); + pOutputVector += OutputStride; + } + + XM_SFENCE(); + + return pOutputStream; +#endif +} + +#ifdef _PREFAST_ +#pragma prefast(pop) +#endif + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMVector3TransformNormal +( + FXMVECTOR V, + FXMMATRIX M +) noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + + XMVECTOR Z = XMVectorSplatZ(V); + XMVECTOR Y = XMVectorSplatY(V); + XMVECTOR X = XMVectorSplatX(V); + + XMVECTOR Result = XMVectorMultiply(Z, M.r[2]); + Result = XMVectorMultiplyAdd(Y, M.r[1], Result); + Result = XMVectorMultiplyAdd(X, M.r[0], Result); + + return Result; + +#elif defined(_XM_ARM_NEON_INTRINSICS_) + float32x2_t VL = vget_low_f32(V); + XMVECTOR vResult = vmulq_lane_f32(M.r[0], VL, 0); // X + vResult = vmlaq_lane_f32(vResult, M.r[1], VL, 1); // Y + return vmlaq_lane_f32(vResult, M.r[2], vget_high_f32(V), 0); // Z +#elif defined(_XM_SSE_INTRINSICS_) + XMVECTOR vResult = XM_PERMUTE_PS(V, _MM_SHUFFLE(2, 2, 2, 2)); // Z + vResult = _mm_mul_ps(vResult, M.r[2]); + XMVECTOR vTemp = XM_PERMUTE_PS(V, _MM_SHUFFLE(1, 1, 1, 1)); // Y + vResult = XM_FMADD_PS(vTemp, M.r[1], vResult); + vTemp = XM_PERMUTE_PS(V, _MM_SHUFFLE(0, 0, 0, 0)); // X + vResult = XM_FMADD_PS(vTemp, M.r[0], vResult); + return vResult; +#endif +} + +//------------------------------------------------------------------------------ + +#ifdef _PREFAST_ +#pragma prefast(push) +#pragma prefast(disable : 26015 26019, "PREfast noise: Esp:1307" ) +#endif + +_Use_decl_annotations_ +inline XMFLOAT3* XM_CALLCONV XMVector3TransformNormalStream +( + XMFLOAT3* pOutputStream, + size_t OutputStride, + const XMFLOAT3* pInputStream, + size_t InputStride, + size_t VectorCount, + FXMMATRIX M +) noexcept +{ + assert(pOutputStream != nullptr); + assert(pInputStream != nullptr); + + assert(InputStride >= sizeof(XMFLOAT3)); + _Analysis_assume_(InputStride >= sizeof(XMFLOAT3)); + + assert(OutputStride >= sizeof(XMFLOAT3)); + _Analysis_assume_(OutputStride >= sizeof(XMFLOAT3)); + +#if defined(_XM_NO_INTRINSICS_) + + auto pInputVector = reinterpret_cast(pInputStream); + auto pOutputVector = reinterpret_cast(pOutputStream); + + const XMVECTOR row0 = M.r[0]; + const XMVECTOR row1 = M.r[1]; + const XMVECTOR row2 = M.r[2]; + + for (size_t i = 0; i < VectorCount; i++) + { + XMVECTOR V = XMLoadFloat3(reinterpret_cast(pInputVector)); + XMVECTOR Z = XMVectorSplatZ(V); + XMVECTOR Y = XMVectorSplatY(V); + XMVECTOR X = XMVectorSplatX(V); + + XMVECTOR Result = XMVectorMultiply(Z, row2); + Result = XMVectorMultiplyAdd(Y, row1, Result); + Result = XMVectorMultiplyAdd(X, row0, Result); + + XMStoreFloat3(reinterpret_cast(pOutputVector), Result); + + pInputVector += InputStride; + pOutputVector += OutputStride; + } + + return pOutputStream; + +#elif defined(_XM_ARM_NEON_INTRINSICS_) + auto pInputVector = reinterpret_cast(pInputStream); + auto pOutputVector = reinterpret_cast(pOutputStream); + + const XMVECTOR row0 = M.r[0]; + const XMVECTOR row1 = M.r[1]; + const XMVECTOR row2 = M.r[2]; + + size_t i = 0; + size_t four = VectorCount >> 2; + if (four > 0) + { + if ((InputStride == sizeof(XMFLOAT3)) && (OutputStride == sizeof(XMFLOAT3))) + { + for (size_t j = 0; j < four; ++j) + { + float32x4x3_t V = vld3q_f32(reinterpret_cast(pInputVector)); + pInputVector += sizeof(XMFLOAT3) * 4; + + float32x2_t r = vget_low_f32(row0); + XMVECTOR vResult0 = vmulq_lane_f32(V.val[0], r, 0); // Ax + XMVECTOR vResult1 = vmulq_lane_f32(V.val[0], r, 1); // Bx + + XM_PREFETCH(pInputVector); + + r = vget_high_f32(row0); + XMVECTOR vResult2 = vmulq_lane_f32(V.val[0], r, 0); // Cx + + XM_PREFETCH(pInputVector + XM_CACHE_LINE_SIZE); + + r = vget_low_f32(row1); + vResult0 = vmlaq_lane_f32(vResult0, V.val[1], r, 0); // Ax+Ey + vResult1 = vmlaq_lane_f32(vResult1, V.val[1], r, 1); // Bx+Fy + + XM_PREFETCH(pInputVector + (XM_CACHE_LINE_SIZE * 2)); + + r = vget_high_f32(row1); + vResult2 = vmlaq_lane_f32(vResult2, V.val[1], r, 0); // Cx+Gy + + XM_PREFETCH(pInputVector + (XM_CACHE_LINE_SIZE * 3)); + + r = vget_low_f32(row2); + vResult0 = vmlaq_lane_f32(vResult0, V.val[2], r, 0); // Ax+Ey+Iz + vResult1 = vmlaq_lane_f32(vResult1, V.val[2], r, 1); // Bx+Fy+Jz + + XM_PREFETCH(pInputVector + (XM_CACHE_LINE_SIZE * 4)); + + r = vget_high_f32(row2); + vResult2 = vmlaq_lane_f32(vResult2, V.val[2], r, 0); // Cx+Gy+Kz + + XM_PREFETCH(pInputVector + (XM_CACHE_LINE_SIZE * 5)); + + V.val[0] = vResult0; + V.val[1] = vResult1; + V.val[2] = vResult2; + + vst3q_f32(reinterpret_cast(pOutputVector), V); + pOutputVector += sizeof(XMFLOAT3) * 4; + + i += 4; + } + } + } + + for (; i < VectorCount; i++) + { + float32x2_t VL = vld1_f32(reinterpret_cast(pInputVector)); + float32x2_t zero = vdup_n_f32(0); + float32x2_t VH = vld1_lane_f32(reinterpret_cast(pInputVector) + 2, zero, 0); + pInputVector += InputStride; + + XMVECTOR vResult = vmulq_lane_f32(row0, VL, 0); // X + vResult = vmlaq_lane_f32(vResult, row1, VL, 1); // Y + vResult = vmlaq_lane_f32(vResult, row2, VH, 0); // Z + + VL = vget_low_f32(vResult); + vst1_f32(reinterpret_cast(pOutputVector), VL); + vst1q_lane_f32(reinterpret_cast(pOutputVector) + 2, vResult, 2); + pOutputVector += OutputStride; + } + + return pOutputStream; +#elif defined(_XM_SSE_INTRINSICS_) + auto pInputVector = reinterpret_cast(pInputStream); + auto pOutputVector = reinterpret_cast(pOutputStream); + + const XMVECTOR row0 = M.r[0]; + const XMVECTOR row1 = M.r[1]; + const XMVECTOR row2 = M.r[2]; + + size_t i = 0; + size_t four = VectorCount >> 2; + if (four > 0) + { + if (InputStride == sizeof(XMFLOAT3)) + { + if (OutputStride == sizeof(XMFLOAT3)) + { + if (!(reinterpret_cast(pOutputStream) & 0xF)) + { + // Packed input, aligned & packed output + for (size_t j = 0; j < four; ++j) + { + __m128 V1 = _mm_loadu_ps(reinterpret_cast(pInputVector)); + __m128 L2 = _mm_loadu_ps(reinterpret_cast(pInputVector + 16)); + __m128 L3 = _mm_loadu_ps(reinterpret_cast(pInputVector + 32)); + pInputVector += sizeof(XMFLOAT3) * 4; + + // Unpack the 4 vectors (.w components are junk) + XM3UNPACK3INTO4(V1, L2, L3); + + // Result 1 + XMVECTOR Z = XM_PERMUTE_PS(V1, _MM_SHUFFLE(2, 2, 2, 2)); + XMVECTOR Y = XM_PERMUTE_PS(V1, _MM_SHUFFLE(1, 1, 1, 1)); + XMVECTOR X = XM_PERMUTE_PS(V1, _MM_SHUFFLE(0, 0, 0, 0)); + + XMVECTOR vTemp = _mm_mul_ps(Z, row2); + XMVECTOR vTemp2 = _mm_mul_ps(Y, row1); + XMVECTOR vTemp3 = _mm_mul_ps(X, row0); + vTemp = _mm_add_ps(vTemp, vTemp2); + V1 = _mm_add_ps(vTemp, vTemp3); + + // Result 2 + Z = XM_PERMUTE_PS(V2, _MM_SHUFFLE(2, 2, 2, 2)); + Y = XM_PERMUTE_PS(V2, _MM_SHUFFLE(1, 1, 1, 1)); + X = XM_PERMUTE_PS(V2, _MM_SHUFFLE(0, 0, 0, 0)); + + vTemp = _mm_mul_ps(Z, row2); + vTemp2 = _mm_mul_ps(Y, row1); + vTemp3 = _mm_mul_ps(X, row0); + vTemp = _mm_add_ps(vTemp, vTemp2); + V2 = _mm_add_ps(vTemp, vTemp3); + + // Result 3 + Z = XM_PERMUTE_PS(V3, _MM_SHUFFLE(2, 2, 2, 2)); + Y = XM_PERMUTE_PS(V3, _MM_SHUFFLE(1, 1, 1, 1)); + X = XM_PERMUTE_PS(V3, _MM_SHUFFLE(0, 0, 0, 0)); + + vTemp = _mm_mul_ps(Z, row2); + vTemp2 = _mm_mul_ps(Y, row1); + vTemp3 = _mm_mul_ps(X, row0); + vTemp = _mm_add_ps(vTemp, vTemp2); + V3 = _mm_add_ps(vTemp, vTemp3); + + // Result 4 + Z = XM_PERMUTE_PS(V4, _MM_SHUFFLE(2, 2, 2, 2)); + Y = XM_PERMUTE_PS(V4, _MM_SHUFFLE(1, 1, 1, 1)); + X = XM_PERMUTE_PS(V4, _MM_SHUFFLE(0, 0, 0, 0)); + + vTemp = _mm_mul_ps(Z, row2); + vTemp2 = _mm_mul_ps(Y, row1); + vTemp3 = _mm_mul_ps(X, row0); + vTemp = _mm_add_ps(vTemp, vTemp2); + V4 = _mm_add_ps(vTemp, vTemp3); + + // Pack and store the vectors + XM3PACK4INTO3(vTemp); + XM_STREAM_PS(reinterpret_cast(pOutputVector), V1); + XM_STREAM_PS(reinterpret_cast(pOutputVector + 16), vTemp); + XM_STREAM_PS(reinterpret_cast(pOutputVector + 32), V3); + pOutputVector += sizeof(XMFLOAT3) * 4; + i += 4; + } + } + else + { + // Packed input, unaligned & packed output + for (size_t j = 0; j < four; ++j) + { + __m128 V1 = _mm_loadu_ps(reinterpret_cast(pInputVector)); + __m128 L2 = _mm_loadu_ps(reinterpret_cast(pInputVector + 16)); + __m128 L3 = _mm_loadu_ps(reinterpret_cast(pInputVector + 32)); + pInputVector += sizeof(XMFLOAT3) * 4; + + // Unpack the 4 vectors (.w components are junk) + XM3UNPACK3INTO4(V1, L2, L3); + + // Result 1 + XMVECTOR Z = XM_PERMUTE_PS(V1, _MM_SHUFFLE(2, 2, 2, 2)); + XMVECTOR Y = XM_PERMUTE_PS(V1, _MM_SHUFFLE(1, 1, 1, 1)); + XMVECTOR X = XM_PERMUTE_PS(V1, _MM_SHUFFLE(0, 0, 0, 0)); + + XMVECTOR vTemp = _mm_mul_ps(Z, row2); + XMVECTOR vTemp2 = _mm_mul_ps(Y, row1); + XMVECTOR vTemp3 = _mm_mul_ps(X, row0); + vTemp = _mm_add_ps(vTemp, vTemp2); + V1 = _mm_add_ps(vTemp, vTemp3); + + // Result 2 + Z = XM_PERMUTE_PS(V2, _MM_SHUFFLE(2, 2, 2, 2)); + Y = XM_PERMUTE_PS(V2, _MM_SHUFFLE(1, 1, 1, 1)); + X = XM_PERMUTE_PS(V2, _MM_SHUFFLE(0, 0, 0, 0)); + + vTemp = _mm_mul_ps(Z, row2); + vTemp2 = _mm_mul_ps(Y, row1); + vTemp3 = _mm_mul_ps(X, row0); + vTemp = _mm_add_ps(vTemp, vTemp2); + V2 = _mm_add_ps(vTemp, vTemp3); + + // Result 3 + Z = XM_PERMUTE_PS(V3, _MM_SHUFFLE(2, 2, 2, 2)); + Y = XM_PERMUTE_PS(V3, _MM_SHUFFLE(1, 1, 1, 1)); + X = XM_PERMUTE_PS(V3, _MM_SHUFFLE(0, 0, 0, 0)); + + vTemp = _mm_mul_ps(Z, row2); + vTemp2 = _mm_mul_ps(Y, row1); + vTemp3 = _mm_mul_ps(X, row0); + vTemp = _mm_add_ps(vTemp, vTemp2); + V3 = _mm_add_ps(vTemp, vTemp3); + + // Result 4 + Z = XM_PERMUTE_PS(V4, _MM_SHUFFLE(2, 2, 2, 2)); + Y = XM_PERMUTE_PS(V4, _MM_SHUFFLE(1, 1, 1, 1)); + X = XM_PERMUTE_PS(V4, _MM_SHUFFLE(0, 0, 0, 0)); + + vTemp = _mm_mul_ps(Z, row2); + vTemp2 = _mm_mul_ps(Y, row1); + vTemp3 = _mm_mul_ps(X, row0); + vTemp = _mm_add_ps(vTemp, vTemp2); + V4 = _mm_add_ps(vTemp, vTemp3); + + // Pack and store the vectors + XM3PACK4INTO3(vTemp); + _mm_storeu_ps(reinterpret_cast(pOutputVector), V1); + _mm_storeu_ps(reinterpret_cast(pOutputVector + 16), vTemp); + _mm_storeu_ps(reinterpret_cast(pOutputVector + 32), V3); + pOutputVector += sizeof(XMFLOAT3) * 4; + i += 4; + } + } + } + else + { + // Packed input, unpacked output + for (size_t j = 0; j < four; ++j) + { + __m128 V1 = _mm_loadu_ps(reinterpret_cast(pInputVector)); + __m128 L2 = _mm_loadu_ps(reinterpret_cast(pInputVector + 16)); + __m128 L3 = _mm_loadu_ps(reinterpret_cast(pInputVector + 32)); + pInputVector += sizeof(XMFLOAT3) * 4; + + // Unpack the 4 vectors (.w components are junk) + XM3UNPACK3INTO4(V1, L2, L3); + + // Result 1 + XMVECTOR Z = XM_PERMUTE_PS(V1, _MM_SHUFFLE(2, 2, 2, 2)); + XMVECTOR Y = XM_PERMUTE_PS(V1, _MM_SHUFFLE(1, 1, 1, 1)); + XMVECTOR X = XM_PERMUTE_PS(V1, _MM_SHUFFLE(0, 0, 0, 0)); + + XMVECTOR vTemp = _mm_mul_ps(Z, row2); + XMVECTOR vTemp2 = _mm_mul_ps(Y, row1); + XMVECTOR vTemp3 = _mm_mul_ps(X, row0); + vTemp = _mm_add_ps(vTemp, vTemp2); + vTemp = _mm_add_ps(vTemp, vTemp3); + + XMStoreFloat3(reinterpret_cast(pOutputVector), vTemp); + pOutputVector += OutputStride; + + // Result 2 + Z = XM_PERMUTE_PS(V2, _MM_SHUFFLE(2, 2, 2, 2)); + Y = XM_PERMUTE_PS(V2, _MM_SHUFFLE(1, 1, 1, 1)); + X = XM_PERMUTE_PS(V2, _MM_SHUFFLE(0, 0, 0, 0)); + + vTemp = _mm_mul_ps(Z, row2); + vTemp2 = _mm_mul_ps(Y, row1); + vTemp3 = _mm_mul_ps(X, row0); + vTemp = _mm_add_ps(vTemp, vTemp2); + vTemp = _mm_add_ps(vTemp, vTemp3); + + XMStoreFloat3(reinterpret_cast(pOutputVector), vTemp); + pOutputVector += OutputStride; + + // Result 3 + Z = XM_PERMUTE_PS(V3, _MM_SHUFFLE(2, 2, 2, 2)); + Y = XM_PERMUTE_PS(V3, _MM_SHUFFLE(1, 1, 1, 1)); + X = XM_PERMUTE_PS(V3, _MM_SHUFFLE(0, 0, 0, 0)); + + vTemp = _mm_mul_ps(Z, row2); + vTemp2 = _mm_mul_ps(Y, row1); + vTemp3 = _mm_mul_ps(X, row0); + vTemp = _mm_add_ps(vTemp, vTemp2); + vTemp = _mm_add_ps(vTemp, vTemp3); + + XMStoreFloat3(reinterpret_cast(pOutputVector), vTemp); + pOutputVector += OutputStride; + + // Result 4 + Z = XM_PERMUTE_PS(V4, _MM_SHUFFLE(2, 2, 2, 2)); + Y = XM_PERMUTE_PS(V4, _MM_SHUFFLE(1, 1, 1, 1)); + X = XM_PERMUTE_PS(V4, _MM_SHUFFLE(0, 0, 0, 0)); + + vTemp = _mm_mul_ps(Z, row2); + vTemp2 = _mm_mul_ps(Y, row1); + vTemp3 = _mm_mul_ps(X, row0); + vTemp = _mm_add_ps(vTemp, vTemp2); + vTemp = _mm_add_ps(vTemp, vTemp3); + + XMStoreFloat3(reinterpret_cast(pOutputVector), vTemp); + pOutputVector += OutputStride; + + i += 4; + } + } + } + } + + for (; i < VectorCount; i++) + { + XMVECTOR V = XMLoadFloat3(reinterpret_cast(pInputVector)); + pInputVector += InputStride; + + XMVECTOR Z = XM_PERMUTE_PS(V, _MM_SHUFFLE(2, 2, 2, 2)); + XMVECTOR Y = XM_PERMUTE_PS(V, _MM_SHUFFLE(1, 1, 1, 1)); + XMVECTOR X = XM_PERMUTE_PS(V, _MM_SHUFFLE(0, 0, 0, 0)); + + XMVECTOR vTemp = _mm_mul_ps(Z, row2); + XMVECTOR vTemp2 = _mm_mul_ps(Y, row1); + XMVECTOR vTemp3 = _mm_mul_ps(X, row0); + vTemp = _mm_add_ps(vTemp, vTemp2); + vTemp = _mm_add_ps(vTemp, vTemp3); + + XMStoreFloat3(reinterpret_cast(pOutputVector), vTemp); + pOutputVector += OutputStride; + } + + XM_SFENCE(); + + return pOutputStream; +#endif +} + +#ifdef _PREFAST_ +#pragma prefast(pop) +#endif + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMVector3Project +( + FXMVECTOR V, + float ViewportX, + float ViewportY, + float ViewportWidth, + float ViewportHeight, + float ViewportMinZ, + float ViewportMaxZ, + FXMMATRIX Projection, + CXMMATRIX View, + CXMMATRIX World +) noexcept +{ + const float HalfViewportWidth = ViewportWidth * 0.5f; + const float HalfViewportHeight = ViewportHeight * 0.5f; + + XMVECTOR Scale = XMVectorSet(HalfViewportWidth, -HalfViewportHeight, ViewportMaxZ - ViewportMinZ, 0.0f); + XMVECTOR Offset = XMVectorSet(ViewportX + HalfViewportWidth, ViewportY + HalfViewportHeight, ViewportMinZ, 0.0f); + + XMMATRIX Transform = XMMatrixMultiply(World, View); + Transform = XMMatrixMultiply(Transform, Projection); + + XMVECTOR Result = XMVector3TransformCoord(V, Transform); + + Result = XMVectorMultiplyAdd(Result, Scale, Offset); + + return Result; +} + +//------------------------------------------------------------------------------ + +#ifdef _PREFAST_ +#pragma prefast(push) +#pragma prefast(disable : 26015 26019, "PREfast noise: Esp:1307" ) +#endif + +_Use_decl_annotations_ +inline XMFLOAT3* XM_CALLCONV XMVector3ProjectStream +( + XMFLOAT3* pOutputStream, + size_t OutputStride, + const XMFLOAT3* pInputStream, + size_t InputStride, + size_t VectorCount, + float ViewportX, + float ViewportY, + float ViewportWidth, + float ViewportHeight, + float ViewportMinZ, + float ViewportMaxZ, + FXMMATRIX Projection, + CXMMATRIX View, + CXMMATRIX World +) noexcept +{ + assert(pOutputStream != nullptr); + assert(pInputStream != nullptr); + + assert(InputStride >= sizeof(XMFLOAT3)); + _Analysis_assume_(InputStride >= sizeof(XMFLOAT3)); + + assert(OutputStride >= sizeof(XMFLOAT3)); + _Analysis_assume_(OutputStride >= sizeof(XMFLOAT3)); + +#if defined(_XM_NO_INTRINSICS_) + + const float HalfViewportWidth = ViewportWidth * 0.5f; + const float HalfViewportHeight = ViewportHeight * 0.5f; + + XMVECTOR Scale = XMVectorSet(HalfViewportWidth, -HalfViewportHeight, ViewportMaxZ - ViewportMinZ, 1.0f); + XMVECTOR Offset = XMVectorSet(ViewportX + HalfViewportWidth, ViewportY + HalfViewportHeight, ViewportMinZ, 0.0f); + + XMMATRIX Transform = XMMatrixMultiply(World, View); + Transform = XMMatrixMultiply(Transform, Projection); + + auto pInputVector = reinterpret_cast(pInputStream); + auto pOutputVector = reinterpret_cast(pOutputStream); + + for (size_t i = 0; i < VectorCount; i++) + { + XMVECTOR V = XMLoadFloat3(reinterpret_cast(pInputVector)); + + XMVECTOR Result = XMVector3TransformCoord(V, Transform); + Result = XMVectorMultiplyAdd(Result, Scale, Offset); + + XMStoreFloat3(reinterpret_cast(pOutputVector), Result); + + pInputVector += InputStride; + pOutputVector += OutputStride; + } + + return pOutputStream; + +#elif defined(_XM_ARM_NEON_INTRINSICS_) + const float HalfViewportWidth = ViewportWidth * 0.5f; + const float HalfViewportHeight = ViewportHeight * 0.5f; + + XMMATRIX Transform = XMMatrixMultiply(World, View); + Transform = XMMatrixMultiply(Transform, Projection); + + auto pInputVector = reinterpret_cast(pInputStream); + auto pOutputVector = reinterpret_cast(pOutputStream); + + size_t i = 0; + size_t four = VectorCount >> 2; + if (four > 0) + { + if ((InputStride == sizeof(XMFLOAT3)) && (OutputStride == sizeof(XMFLOAT3))) + { + XMVECTOR ScaleX = vdupq_n_f32(HalfViewportWidth); + XMVECTOR ScaleY = vdupq_n_f32(-HalfViewportHeight); + XMVECTOR ScaleZ = vdupq_n_f32(ViewportMaxZ - ViewportMinZ); + + XMVECTOR OffsetX = vdupq_n_f32(ViewportX + HalfViewportWidth); + XMVECTOR OffsetY = vdupq_n_f32(ViewportY + HalfViewportHeight); + XMVECTOR OffsetZ = vdupq_n_f32(ViewportMinZ); + + for (size_t j = 0; j < four; ++j) + { + float32x4x3_t V = vld3q_f32(reinterpret_cast(pInputVector)); + pInputVector += sizeof(XMFLOAT3) * 4; + + float32x2_t r3 = vget_low_f32(Transform.r[3]); + float32x2_t r = vget_low_f32(Transform.r[0]); + XMVECTOR vResult0 = vmlaq_lane_f32(vdupq_lane_f32(r3, 0), V.val[0], r, 0); // Ax+M + XMVECTOR vResult1 = vmlaq_lane_f32(vdupq_lane_f32(r3, 1), V.val[0], r, 1); // Bx+N + + XM_PREFETCH(pInputVector); + + r3 = vget_high_f32(Transform.r[3]); + r = vget_high_f32(Transform.r[0]); + XMVECTOR vResult2 = vmlaq_lane_f32(vdupq_lane_f32(r3, 0), V.val[0], r, 0); // Cx+O + XMVECTOR W = vmlaq_lane_f32(vdupq_lane_f32(r3, 1), V.val[0], r, 1); // Dx+P + + XM_PREFETCH(pInputVector + XM_CACHE_LINE_SIZE); + + r = vget_low_f32(Transform.r[1]); + vResult0 = vmlaq_lane_f32(vResult0, V.val[1], r, 0); // Ax+Ey+M + vResult1 = vmlaq_lane_f32(vResult1, V.val[1], r, 1); // Bx+Fy+N + + XM_PREFETCH(pInputVector + (XM_CACHE_LINE_SIZE * 2)); + + r = vget_high_f32(Transform.r[1]); + vResult2 = vmlaq_lane_f32(vResult2, V.val[1], r, 0); // Cx+Gy+O + W = vmlaq_lane_f32(W, V.val[1], r, 1); // Dx+Hy+P + + XM_PREFETCH(pInputVector + (XM_CACHE_LINE_SIZE * 3)); + + r = vget_low_f32(Transform.r[2]); + vResult0 = vmlaq_lane_f32(vResult0, V.val[2], r, 0); // Ax+Ey+Iz+M + vResult1 = vmlaq_lane_f32(vResult1, V.val[2], r, 1); // Bx+Fy+Jz+N + + XM_PREFETCH(pInputVector + (XM_CACHE_LINE_SIZE * 4)); + + r = vget_high_f32(Transform.r[2]); + vResult2 = vmlaq_lane_f32(vResult2, V.val[2], r, 0); // Cx+Gy+Kz+O + W = vmlaq_lane_f32(W, V.val[2], r, 1); // Dx+Hy+Lz+P + + XM_PREFETCH(pInputVector + (XM_CACHE_LINE_SIZE * 5)); + +#if defined(_M_ARM64) || defined(_M_HYBRID_X86_ARM64) || __aarch64__ + vResult0 = vdivq_f32(vResult0, W); + vResult1 = vdivq_f32(vResult1, W); + vResult2 = vdivq_f32(vResult2, W); +#else + // 2 iterations of Newton-Raphson refinement of reciprocal + float32x4_t Reciprocal = vrecpeq_f32(W); + float32x4_t S = vrecpsq_f32(Reciprocal, W); + Reciprocal = vmulq_f32(S, Reciprocal); + S = vrecpsq_f32(Reciprocal, W); + Reciprocal = vmulq_f32(S, Reciprocal); + + vResult0 = vmulq_f32(vResult0, Reciprocal); + vResult1 = vmulq_f32(vResult1, Reciprocal); + vResult2 = vmulq_f32(vResult2, Reciprocal); +#endif + + V.val[0] = vmlaq_f32(OffsetX, vResult0, ScaleX); + V.val[1] = vmlaq_f32(OffsetY, vResult1, ScaleY); + V.val[2] = vmlaq_f32(OffsetZ, vResult2, ScaleZ); + + vst3q_f32(reinterpret_cast(pOutputVector), V); + pOutputVector += sizeof(XMFLOAT3) * 4; + + i += 4; + } + } + } + + if (i < VectorCount) + { + XMVECTOR Scale = XMVectorSet(HalfViewportWidth, -HalfViewportHeight, ViewportMaxZ - ViewportMinZ, 1.0f); + XMVECTOR Offset = XMVectorSet(ViewportX + HalfViewportWidth, ViewportY + HalfViewportHeight, ViewportMinZ, 0.0f); + + for (; i < VectorCount; i++) + { + float32x2_t VL = vld1_f32(reinterpret_cast(pInputVector)); + float32x2_t zero = vdup_n_f32(0); + float32x2_t VH = vld1_lane_f32(reinterpret_cast(pInputVector) + 2, zero, 0); + pInputVector += InputStride; + + XMVECTOR vResult = vmlaq_lane_f32(Transform.r[3], Transform.r[0], VL, 0); // X + vResult = vmlaq_lane_f32(vResult, Transform.r[1], VL, 1); // Y + vResult = vmlaq_lane_f32(vResult, Transform.r[2], VH, 0); // Z + + VH = vget_high_f32(vResult); + XMVECTOR W = vdupq_lane_f32(VH, 1); + +#if defined(_M_ARM64) || defined(_M_HYBRID_X86_ARM64) || __aarch64__ + vResult = vdivq_f32(vResult, W); +#else + // 2 iterations of Newton-Raphson refinement of reciprocal for W + float32x4_t Reciprocal = vrecpeq_f32(W); + float32x4_t S = vrecpsq_f32(Reciprocal, W); + Reciprocal = vmulq_f32(S, Reciprocal); + S = vrecpsq_f32(Reciprocal, W); + Reciprocal = vmulq_f32(S, Reciprocal); + + vResult = vmulq_f32(vResult, Reciprocal); +#endif + + vResult = vmlaq_f32(Offset, vResult, Scale); + + VL = vget_low_f32(vResult); + vst1_f32(reinterpret_cast(pOutputVector), VL); + vst1q_lane_f32(reinterpret_cast(pOutputVector) + 2, vResult, 2); + pOutputVector += OutputStride; + } + } + + return pOutputStream; +#elif defined(_XM_SSE_INTRINSICS_) + const float HalfViewportWidth = ViewportWidth * 0.5f; + const float HalfViewportHeight = ViewportHeight * 0.5f; + + XMVECTOR Scale = XMVectorSet(HalfViewportWidth, -HalfViewportHeight, ViewportMaxZ - ViewportMinZ, 1.0f); + XMVECTOR Offset = XMVectorSet(ViewportX + HalfViewportWidth, ViewportY + HalfViewportHeight, ViewportMinZ, 0.0f); + + XMMATRIX Transform = XMMatrixMultiply(World, View); + Transform = XMMatrixMultiply(Transform, Projection); + + auto pInputVector = reinterpret_cast(pInputStream); + auto pOutputVector = reinterpret_cast(pOutputStream); + + size_t i = 0; + size_t four = VectorCount >> 2; + if (four > 0) + { + if (InputStride == sizeof(XMFLOAT3)) + { + if (OutputStride == sizeof(XMFLOAT3)) + { + if (!(reinterpret_cast(pOutputStream) & 0xF)) + { + // Packed input, aligned & packed output + for (size_t j = 0; j < four; ++j) + { + __m128 V1 = _mm_loadu_ps(reinterpret_cast(pInputVector)); + __m128 L2 = _mm_loadu_ps(reinterpret_cast(pInputVector + 16)); + __m128 L3 = _mm_loadu_ps(reinterpret_cast(pInputVector + 32)); + pInputVector += sizeof(XMFLOAT3) * 4; + + // Unpack the 4 vectors (.w components are junk) + XM3UNPACK3INTO4(V1, L2, L3); + + // Result 1 + XMVECTOR Z = XM_PERMUTE_PS(V1, _MM_SHUFFLE(2, 2, 2, 2)); + XMVECTOR Y = XM_PERMUTE_PS(V1, _MM_SHUFFLE(1, 1, 1, 1)); + XMVECTOR X = XM_PERMUTE_PS(V1, _MM_SHUFFLE(0, 0, 0, 0)); + + XMVECTOR vTemp = XM_FMADD_PS(Z, Transform.r[2], Transform.r[3]); + XMVECTOR vTemp2 = _mm_mul_ps(Y, Transform.r[1]); + XMVECTOR vTemp3 = _mm_mul_ps(X, Transform.r[0]); + vTemp = _mm_add_ps(vTemp, vTemp2); + vTemp = _mm_add_ps(vTemp, vTemp3); + + XMVECTOR W = XM_PERMUTE_PS(vTemp, _MM_SHUFFLE(3, 3, 3, 3)); + vTemp = _mm_div_ps(vTemp, W); + V1 = XM_FMADD_PS(vTemp, Scale, Offset); + + // Result 2 + Z = XM_PERMUTE_PS(V2, _MM_SHUFFLE(2, 2, 2, 2)); + Y = XM_PERMUTE_PS(V2, _MM_SHUFFLE(1, 1, 1, 1)); + X = XM_PERMUTE_PS(V2, _MM_SHUFFLE(0, 0, 0, 0)); + + vTemp = XM_FMADD_PS(Z, Transform.r[2], Transform.r[3]); + vTemp2 = _mm_mul_ps(Y, Transform.r[1]); + vTemp3 = _mm_mul_ps(X, Transform.r[0]); + vTemp = _mm_add_ps(vTemp, vTemp2); + vTemp = _mm_add_ps(vTemp, vTemp3); + + W = XM_PERMUTE_PS(vTemp, _MM_SHUFFLE(3, 3, 3, 3)); + vTemp = _mm_div_ps(vTemp, W); + V2 = XM_FMADD_PS(vTemp, Scale, Offset); + + // Result 3 + Z = XM_PERMUTE_PS(V3, _MM_SHUFFLE(2, 2, 2, 2)); + Y = XM_PERMUTE_PS(V3, _MM_SHUFFLE(1, 1, 1, 1)); + X = XM_PERMUTE_PS(V3, _MM_SHUFFLE(0, 0, 0, 0)); + + vTemp = XM_FMADD_PS(Z, Transform.r[2], Transform.r[3]); + vTemp2 = _mm_mul_ps(Y, Transform.r[1]); + vTemp3 = _mm_mul_ps(X, Transform.r[0]); + vTemp = _mm_add_ps(vTemp, vTemp2); + vTemp = _mm_add_ps(vTemp, vTemp3); + + W = XM_PERMUTE_PS(vTemp, _MM_SHUFFLE(3, 3, 3, 3)); + vTemp = _mm_div_ps(vTemp, W); + V3 = XM_FMADD_PS(vTemp, Scale, Offset); + + // Result 4 + Z = XM_PERMUTE_PS(V4, _MM_SHUFFLE(2, 2, 2, 2)); + Y = XM_PERMUTE_PS(V4, _MM_SHUFFLE(1, 1, 1, 1)); + X = XM_PERMUTE_PS(V4, _MM_SHUFFLE(0, 0, 0, 0)); + + vTemp = XM_FMADD_PS(Z, Transform.r[2], Transform.r[3]); + vTemp2 = _mm_mul_ps(Y, Transform.r[1]); + vTemp3 = _mm_mul_ps(X, Transform.r[0]); + vTemp = _mm_add_ps(vTemp, vTemp2); + vTemp = _mm_add_ps(vTemp, vTemp3); + + W = XM_PERMUTE_PS(vTemp, _MM_SHUFFLE(3, 3, 3, 3)); + vTemp = _mm_div_ps(vTemp, W); + V4 = XM_FMADD_PS(vTemp, Scale, Offset); + + // Pack and store the vectors + XM3PACK4INTO3(vTemp); + XM_STREAM_PS(reinterpret_cast(pOutputVector), V1); + XM_STREAM_PS(reinterpret_cast(pOutputVector + 16), vTemp); + XM_STREAM_PS(reinterpret_cast(pOutputVector + 32), V3); + pOutputVector += sizeof(XMFLOAT3) * 4; + i += 4; + } + } + else + { + // Packed input, unaligned & packed output + for (size_t j = 0; j < four; ++j) + { + __m128 V1 = _mm_loadu_ps(reinterpret_cast(pInputVector)); + __m128 L2 = _mm_loadu_ps(reinterpret_cast(pInputVector + 16)); + __m128 L3 = _mm_loadu_ps(reinterpret_cast(pInputVector + 32)); + pInputVector += sizeof(XMFLOAT3) * 4; + + // Unpack the 4 vectors (.w components are junk) + XM3UNPACK3INTO4(V1, L2, L3); + + // Result 1 + XMVECTOR Z = XM_PERMUTE_PS(V1, _MM_SHUFFLE(2, 2, 2, 2)); + XMVECTOR Y = XM_PERMUTE_PS(V1, _MM_SHUFFLE(1, 1, 1, 1)); + XMVECTOR X = XM_PERMUTE_PS(V1, _MM_SHUFFLE(0, 0, 0, 0)); + + XMVECTOR vTemp = XM_FMADD_PS(Z, Transform.r[2], Transform.r[3]); + XMVECTOR vTemp2 = _mm_mul_ps(Y, Transform.r[1]); + XMVECTOR vTemp3 = _mm_mul_ps(X, Transform.r[0]); + vTemp = _mm_add_ps(vTemp, vTemp2); + vTemp = _mm_add_ps(vTemp, vTemp3); + + XMVECTOR W = XM_PERMUTE_PS(vTemp, _MM_SHUFFLE(3, 3, 3, 3)); + vTemp = _mm_div_ps(vTemp, W); + V1 = XM_FMADD_PS(vTemp, Scale, Offset); + + // Result 2 + Z = XM_PERMUTE_PS(V2, _MM_SHUFFLE(2, 2, 2, 2)); + Y = XM_PERMUTE_PS(V2, _MM_SHUFFLE(1, 1, 1, 1)); + X = XM_PERMUTE_PS(V2, _MM_SHUFFLE(0, 0, 0, 0)); + + vTemp = XM_FMADD_PS(Z, Transform.r[2], Transform.r[3]); + vTemp2 = _mm_mul_ps(Y, Transform.r[1]); + vTemp3 = _mm_mul_ps(X, Transform.r[0]); + vTemp = _mm_add_ps(vTemp, vTemp2); + vTemp = _mm_add_ps(vTemp, vTemp3); + + W = XM_PERMUTE_PS(vTemp, _MM_SHUFFLE(3, 3, 3, 3)); + vTemp = _mm_div_ps(vTemp, W); + V2 = XM_FMADD_PS(vTemp, Scale, Offset); + + // Result 3 + Z = XM_PERMUTE_PS(V3, _MM_SHUFFLE(2, 2, 2, 2)); + Y = XM_PERMUTE_PS(V3, _MM_SHUFFLE(1, 1, 1, 1)); + X = XM_PERMUTE_PS(V3, _MM_SHUFFLE(0, 0, 0, 0)); + + vTemp = XM_FMADD_PS(Z, Transform.r[2], Transform.r[3]); + vTemp2 = _mm_mul_ps(Y, Transform.r[1]); + vTemp3 = _mm_mul_ps(X, Transform.r[0]); + vTemp = _mm_add_ps(vTemp, vTemp2); + vTemp = _mm_add_ps(vTemp, vTemp3); + + W = XM_PERMUTE_PS(vTemp, _MM_SHUFFLE(3, 3, 3, 3)); + vTemp = _mm_div_ps(vTemp, W); + V3 = XM_FMADD_PS(vTemp, Scale, Offset); + + // Result 4 + Z = XM_PERMUTE_PS(V4, _MM_SHUFFLE(2, 2, 2, 2)); + Y = XM_PERMUTE_PS(V4, _MM_SHUFFLE(1, 1, 1, 1)); + X = XM_PERMUTE_PS(V4, _MM_SHUFFLE(0, 0, 0, 0)); + + vTemp = XM_FMADD_PS(Z, Transform.r[2], Transform.r[3]); + vTemp2 = _mm_mul_ps(Y, Transform.r[1]); + vTemp3 = _mm_mul_ps(X, Transform.r[0]); + vTemp = _mm_add_ps(vTemp, vTemp2); + vTemp = _mm_add_ps(vTemp, vTemp3); + + W = XM_PERMUTE_PS(vTemp, _MM_SHUFFLE(3, 3, 3, 3)); + vTemp = _mm_div_ps(vTemp, W); + V4 = XM_FMADD_PS(vTemp, Scale, Offset); + + // Pack and store the vectors + XM3PACK4INTO3(vTemp); + _mm_storeu_ps(reinterpret_cast(pOutputVector), V1); + _mm_storeu_ps(reinterpret_cast(pOutputVector + 16), vTemp); + _mm_storeu_ps(reinterpret_cast(pOutputVector + 32), V3); + pOutputVector += sizeof(XMFLOAT3) * 4; + i += 4; + } + } + } + else + { + // Packed input, unpacked output + for (size_t j = 0; j < four; ++j) + { + __m128 V1 = _mm_loadu_ps(reinterpret_cast(pInputVector)); + __m128 L2 = _mm_loadu_ps(reinterpret_cast(pInputVector + 16)); + __m128 L3 = _mm_loadu_ps(reinterpret_cast(pInputVector + 32)); + pInputVector += sizeof(XMFLOAT3) * 4; + + // Unpack the 4 vectors (.w components are junk) + XM3UNPACK3INTO4(V1, L2, L3); + + // Result 1 + XMVECTOR Z = XM_PERMUTE_PS(V1, _MM_SHUFFLE(2, 2, 2, 2)); + XMVECTOR Y = XM_PERMUTE_PS(V1, _MM_SHUFFLE(1, 1, 1, 1)); + XMVECTOR X = XM_PERMUTE_PS(V1, _MM_SHUFFLE(0, 0, 0, 0)); + + XMVECTOR vTemp = XM_FMADD_PS(Z, Transform.r[2], Transform.r[3]); + XMVECTOR vTemp2 = _mm_mul_ps(Y, Transform.r[1]); + XMVECTOR vTemp3 = _mm_mul_ps(X, Transform.r[0]); + vTemp = _mm_add_ps(vTemp, vTemp2); + vTemp = _mm_add_ps(vTemp, vTemp3); + + XMVECTOR W = XM_PERMUTE_PS(vTemp, _MM_SHUFFLE(3, 3, 3, 3)); + vTemp = _mm_div_ps(vTemp, W); + vTemp = XM_FMADD_PS(vTemp, Scale, Offset); + + XMStoreFloat3(reinterpret_cast(pOutputVector), vTemp); + pOutputVector += OutputStride; + + // Result 2 + Z = XM_PERMUTE_PS(V2, _MM_SHUFFLE(2, 2, 2, 2)); + Y = XM_PERMUTE_PS(V2, _MM_SHUFFLE(1, 1, 1, 1)); + X = XM_PERMUTE_PS(V2, _MM_SHUFFLE(0, 0, 0, 0)); + + vTemp = XM_FMADD_PS(Z, Transform.r[2], Transform.r[3]); + vTemp2 = _mm_mul_ps(Y, Transform.r[1]); + vTemp3 = _mm_mul_ps(X, Transform.r[0]); + vTemp = _mm_add_ps(vTemp, vTemp2); + vTemp = _mm_add_ps(vTemp, vTemp3); + + W = XM_PERMUTE_PS(vTemp, _MM_SHUFFLE(3, 3, 3, 3)); + vTemp = _mm_div_ps(vTemp, W); + vTemp = XM_FMADD_PS(vTemp, Scale, Offset); + + XMStoreFloat3(reinterpret_cast(pOutputVector), vTemp); + pOutputVector += OutputStride; + + // Result 3 + Z = XM_PERMUTE_PS(V3, _MM_SHUFFLE(2, 2, 2, 2)); + Y = XM_PERMUTE_PS(V3, _MM_SHUFFLE(1, 1, 1, 1)); + X = XM_PERMUTE_PS(V3, _MM_SHUFFLE(0, 0, 0, 0)); + + vTemp = XM_FMADD_PS(Z, Transform.r[2], Transform.r[3]); + vTemp2 = _mm_mul_ps(Y, Transform.r[1]); + vTemp3 = _mm_mul_ps(X, Transform.r[0]); + vTemp = _mm_add_ps(vTemp, vTemp2); + vTemp = _mm_add_ps(vTemp, vTemp3); + + W = XM_PERMUTE_PS(vTemp, _MM_SHUFFLE(3, 3, 3, 3)); + vTemp = _mm_div_ps(vTemp, W); + vTemp = XM_FMADD_PS(vTemp, Scale, Offset); + + XMStoreFloat3(reinterpret_cast(pOutputVector), vTemp); + pOutputVector += OutputStride; + + // Result 4 + Z = XM_PERMUTE_PS(V4, _MM_SHUFFLE(2, 2, 2, 2)); + Y = XM_PERMUTE_PS(V4, _MM_SHUFFLE(1, 1, 1, 1)); + X = XM_PERMUTE_PS(V4, _MM_SHUFFLE(0, 0, 0, 0)); + + vTemp = XM_FMADD_PS(Z, Transform.r[2], Transform.r[3]); + vTemp2 = _mm_mul_ps(Y, Transform.r[1]); + vTemp3 = _mm_mul_ps(X, Transform.r[0]); + vTemp = _mm_add_ps(vTemp, vTemp2); + vTemp = _mm_add_ps(vTemp, vTemp3); + + W = XM_PERMUTE_PS(vTemp, _MM_SHUFFLE(3, 3, 3, 3)); + vTemp = _mm_div_ps(vTemp, W); + vTemp = XM_FMADD_PS(vTemp, Scale, Offset); + + XMStoreFloat3(reinterpret_cast(pOutputVector), vTemp); + pOutputVector += OutputStride; + + i += 4; + } + } + } + } + + for (; i < VectorCount; i++) + { + XMVECTOR V = XMLoadFloat3(reinterpret_cast(pInputVector)); + pInputVector += InputStride; + + XMVECTOR Z = XM_PERMUTE_PS(V, _MM_SHUFFLE(2, 2, 2, 2)); + XMVECTOR Y = XM_PERMUTE_PS(V, _MM_SHUFFLE(1, 1, 1, 1)); + XMVECTOR X = XM_PERMUTE_PS(V, _MM_SHUFFLE(0, 0, 0, 0)); + + XMVECTOR vTemp = XM_FMADD_PS(Z, Transform.r[2], Transform.r[3]); + XMVECTOR vTemp2 = _mm_mul_ps(Y, Transform.r[1]); + XMVECTOR vTemp3 = _mm_mul_ps(X, Transform.r[0]); + vTemp = _mm_add_ps(vTemp, vTemp2); + vTemp = _mm_add_ps(vTemp, vTemp3); + + XMVECTOR W = XM_PERMUTE_PS(vTemp, _MM_SHUFFLE(3, 3, 3, 3)); + vTemp = _mm_div_ps(vTemp, W); + vTemp = XM_FMADD_PS(vTemp, Scale, Offset); + + XMStoreFloat3(reinterpret_cast(pOutputVector), vTemp); + pOutputVector += OutputStride; + } + + XM_SFENCE(); + + return pOutputStream; +#endif +} + +#ifdef _PREFAST_ +#pragma prefast(pop) +#endif + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMVector3Unproject +( + FXMVECTOR V, + float ViewportX, + float ViewportY, + float ViewportWidth, + float ViewportHeight, + float ViewportMinZ, + float ViewportMaxZ, + FXMMATRIX Projection, + CXMMATRIX View, + CXMMATRIX World +) noexcept +{ + static const XMVECTORF32 D = { { { -1.0f, 1.0f, 0.0f, 0.0f } } }; + + XMVECTOR Scale = XMVectorSet(ViewportWidth * 0.5f, -ViewportHeight * 0.5f, ViewportMaxZ - ViewportMinZ, 1.0f); + Scale = XMVectorReciprocal(Scale); + + XMVECTOR Offset = XMVectorSet(-ViewportX, -ViewportY, -ViewportMinZ, 0.0f); + Offset = XMVectorMultiplyAdd(Scale, Offset, D.v); + + XMMATRIX Transform = XMMatrixMultiply(World, View); + Transform = XMMatrixMultiply(Transform, Projection); + Transform = XMMatrixInverse(nullptr, Transform); + + XMVECTOR Result = XMVectorMultiplyAdd(V, Scale, Offset); + + return XMVector3TransformCoord(Result, Transform); +} + +//------------------------------------------------------------------------------ + +#ifdef _PREFAST_ +#pragma prefast(push) +#pragma prefast(disable : 26015 26019, "PREfast noise: Esp:1307" ) +#endif + +_Use_decl_annotations_ +inline XMFLOAT3* XM_CALLCONV XMVector3UnprojectStream +( + XMFLOAT3* pOutputStream, + size_t OutputStride, + const XMFLOAT3* pInputStream, + size_t InputStride, + size_t VectorCount, + float ViewportX, + float ViewportY, + float ViewportWidth, + float ViewportHeight, + float ViewportMinZ, + float ViewportMaxZ, + FXMMATRIX Projection, + CXMMATRIX View, + CXMMATRIX World +) noexcept +{ + assert(pOutputStream != nullptr); + assert(pInputStream != nullptr); + + assert(InputStride >= sizeof(XMFLOAT3)); + _Analysis_assume_(InputStride >= sizeof(XMFLOAT3)); + + assert(OutputStride >= sizeof(XMFLOAT3)); + _Analysis_assume_(OutputStride >= sizeof(XMFLOAT3)); + +#if defined(_XM_NO_INTRINSICS_) + + static const XMVECTORF32 D = { { { -1.0f, 1.0f, 0.0f, 0.0f } } }; + + XMVECTOR Scale = XMVectorSet(ViewportWidth * 0.5f, -ViewportHeight * 0.5f, ViewportMaxZ - ViewportMinZ, 1.0f); + Scale = XMVectorReciprocal(Scale); + + XMVECTOR Offset = XMVectorSet(-ViewportX, -ViewportY, -ViewportMinZ, 0.0f); + Offset = XMVectorMultiplyAdd(Scale, Offset, D.v); + + XMMATRIX Transform = XMMatrixMultiply(World, View); + Transform = XMMatrixMultiply(Transform, Projection); + Transform = XMMatrixInverse(nullptr, Transform); + + auto pInputVector = reinterpret_cast(pInputStream); + auto pOutputVector = reinterpret_cast(pOutputStream); + + for (size_t i = 0; i < VectorCount; i++) + { + XMVECTOR V = XMLoadFloat3(reinterpret_cast(pInputVector)); + + XMVECTOR Result = XMVectorMultiplyAdd(V, Scale, Offset); + + Result = XMVector3TransformCoord(Result, Transform); + + XMStoreFloat3(reinterpret_cast(pOutputVector), Result); + + pInputVector += InputStride; + pOutputVector += OutputStride; + } + + return pOutputStream; + +#elif defined(_XM_ARM_NEON_INTRINSICS_) + XMMATRIX Transform = XMMatrixMultiply(World, View); + Transform = XMMatrixMultiply(Transform, Projection); + Transform = XMMatrixInverse(nullptr, Transform); + + auto pInputVector = reinterpret_cast(pInputStream); + auto pOutputVector = reinterpret_cast(pOutputStream); + + float sx = 1.f / (ViewportWidth * 0.5f); + float sy = 1.f / (-ViewportHeight * 0.5f); + float sz = 1.f / (ViewportMaxZ - ViewportMinZ); + + float ox = (-ViewportX * sx) - 1.f; + float oy = (-ViewportY * sy) + 1.f; + float oz = (-ViewportMinZ * sz); + + size_t i = 0; + size_t four = VectorCount >> 2; + if (four > 0) + { + if ((InputStride == sizeof(XMFLOAT3)) && (OutputStride == sizeof(XMFLOAT3))) + { + for (size_t j = 0; j < four; ++j) + { + float32x4x3_t V = vld3q_f32(reinterpret_cast(pInputVector)); + pInputVector += sizeof(XMFLOAT3) * 4; + + XMVECTOR ScaleX = vdupq_n_f32(sx); + XMVECTOR OffsetX = vdupq_n_f32(ox); + XMVECTOR VX = vmlaq_f32(OffsetX, ScaleX, V.val[0]); + + float32x2_t r3 = vget_low_f32(Transform.r[3]); + float32x2_t r = vget_low_f32(Transform.r[0]); + XMVECTOR vResult0 = vmlaq_lane_f32(vdupq_lane_f32(r3, 0), VX, r, 0); // Ax+M + XMVECTOR vResult1 = vmlaq_lane_f32(vdupq_lane_f32(r3, 1), VX, r, 1); // Bx+N + + XM_PREFETCH(pInputVector); + + r3 = vget_high_f32(Transform.r[3]); + r = vget_high_f32(Transform.r[0]); + XMVECTOR vResult2 = vmlaq_lane_f32(vdupq_lane_f32(r3, 0), VX, r, 0); // Cx+O + XMVECTOR W = vmlaq_lane_f32(vdupq_lane_f32(r3, 1), VX, r, 1); // Dx+P + + XM_PREFETCH(pInputVector + XM_CACHE_LINE_SIZE); + + XMVECTOR ScaleY = vdupq_n_f32(sy); + XMVECTOR OffsetY = vdupq_n_f32(oy); + XMVECTOR VY = vmlaq_f32(OffsetY, ScaleY, V.val[1]); + + r = vget_low_f32(Transform.r[1]); + vResult0 = vmlaq_lane_f32(vResult0, VY, r, 0); // Ax+Ey+M + vResult1 = vmlaq_lane_f32(vResult1, VY, r, 1); // Bx+Fy+N + + XM_PREFETCH(pInputVector + (XM_CACHE_LINE_SIZE * 2)); + + r = vget_high_f32(Transform.r[1]); + vResult2 = vmlaq_lane_f32(vResult2, VY, r, 0); // Cx+Gy+O + W = vmlaq_lane_f32(W, VY, r, 1); // Dx+Hy+P + + XM_PREFETCH(pInputVector + (XM_CACHE_LINE_SIZE * 3)); + + XMVECTOR ScaleZ = vdupq_n_f32(sz); + XMVECTOR OffsetZ = vdupq_n_f32(oz); + XMVECTOR VZ = vmlaq_f32(OffsetZ, ScaleZ, V.val[2]); + + r = vget_low_f32(Transform.r[2]); + vResult0 = vmlaq_lane_f32(vResult0, VZ, r, 0); // Ax+Ey+Iz+M + vResult1 = vmlaq_lane_f32(vResult1, VZ, r, 1); // Bx+Fy+Jz+N + + XM_PREFETCH(pInputVector + (XM_CACHE_LINE_SIZE * 4)); + + r = vget_high_f32(Transform.r[2]); + vResult2 = vmlaq_lane_f32(vResult2, VZ, r, 0); // Cx+Gy+Kz+O + W = vmlaq_lane_f32(W, VZ, r, 1); // Dx+Hy+Lz+P + + XM_PREFETCH(pInputVector + (XM_CACHE_LINE_SIZE * 5)); + +#if defined(_M_ARM64) || defined(_M_HYBRID_X86_ARM64) || __aarch64__ + V.val[0] = vdivq_f32(vResult0, W); + V.val[1] = vdivq_f32(vResult1, W); + V.val[2] = vdivq_f32(vResult2, W); +#else + // 2 iterations of Newton-Raphson refinement of reciprocal + float32x4_t Reciprocal = vrecpeq_f32(W); + float32x4_t S = vrecpsq_f32(Reciprocal, W); + Reciprocal = vmulq_f32(S, Reciprocal); + S = vrecpsq_f32(Reciprocal, W); + Reciprocal = vmulq_f32(S, Reciprocal); + + V.val[0] = vmulq_f32(vResult0, Reciprocal); + V.val[1] = vmulq_f32(vResult1, Reciprocal); + V.val[2] = vmulq_f32(vResult2, Reciprocal); +#endif + + vst3q_f32(reinterpret_cast(pOutputVector), V); + pOutputVector += sizeof(XMFLOAT3) * 4; + + i += 4; + } + } + } + + if (i < VectorCount) + { + float32x2_t ScaleL = vcreate_f32( + static_cast(*reinterpret_cast(&sx)) + | (static_cast(*reinterpret_cast(&sy)) << 32)); + float32x2_t ScaleH = vcreate_f32(static_cast(*reinterpret_cast(&sz))); + + float32x2_t OffsetL = vcreate_f32( + static_cast(*reinterpret_cast(&ox)) + | (static_cast(*reinterpret_cast(&oy)) << 32)); + float32x2_t OffsetH = vcreate_f32(static_cast(*reinterpret_cast(&oz))); + + for (; i < VectorCount; i++) + { + float32x2_t VL = vld1_f32(reinterpret_cast(pInputVector)); + float32x2_t zero = vdup_n_f32(0); + float32x2_t VH = vld1_lane_f32(reinterpret_cast(pInputVector) + 2, zero, 0); + pInputVector += InputStride; + + VL = vmla_f32(OffsetL, VL, ScaleL); + VH = vmla_f32(OffsetH, VH, ScaleH); + + XMVECTOR vResult = vmlaq_lane_f32(Transform.r[3], Transform.r[0], VL, 0); // X + vResult = vmlaq_lane_f32(vResult, Transform.r[1], VL, 1); // Y + vResult = vmlaq_lane_f32(vResult, Transform.r[2], VH, 0); // Z + + VH = vget_high_f32(vResult); + XMVECTOR W = vdupq_lane_f32(VH, 1); + +#if defined(_M_ARM64) || defined(_M_HYBRID_X86_ARM64) || __aarch64__ + vResult = vdivq_f32(vResult, W); +#else + // 2 iterations of Newton-Raphson refinement of reciprocal for W + float32x4_t Reciprocal = vrecpeq_f32(W); + float32x4_t S = vrecpsq_f32(Reciprocal, W); + Reciprocal = vmulq_f32(S, Reciprocal); + S = vrecpsq_f32(Reciprocal, W); + Reciprocal = vmulq_f32(S, Reciprocal); + + vResult = vmulq_f32(vResult, Reciprocal); +#endif + + VL = vget_low_f32(vResult); + vst1_f32(reinterpret_cast(pOutputVector), VL); + vst1q_lane_f32(reinterpret_cast(pOutputVector) + 2, vResult, 2); + pOutputVector += OutputStride; + } + } + + return pOutputStream; +#elif defined(_XM_SSE_INTRINSICS_) + static const XMVECTORF32 D = { { { -1.0f, 1.0f, 0.0f, 0.0f } } }; + + XMVECTOR Scale = XMVectorSet(ViewportWidth * 0.5f, -ViewportHeight * 0.5f, ViewportMaxZ - ViewportMinZ, 1.0f); + Scale = XMVectorReciprocal(Scale); + + XMVECTOR Offset = XMVectorSet(-ViewportX, -ViewportY, -ViewportMinZ, 0.0f); + Offset = _mm_mul_ps(Scale, Offset); + Offset = _mm_add_ps(Offset, D); + + XMMATRIX Transform = XMMatrixMultiply(World, View); + Transform = XMMatrixMultiply(Transform, Projection); + Transform = XMMatrixInverse(nullptr, Transform); + + auto pInputVector = reinterpret_cast(pInputStream); + auto pOutputVector = reinterpret_cast(pOutputStream); + + size_t i = 0; + size_t four = VectorCount >> 2; + if (four > 0) + { + if (InputStride == sizeof(XMFLOAT3)) + { + if (OutputStride == sizeof(XMFLOAT3)) + { + if (!(reinterpret_cast(pOutputStream) & 0xF)) + { + // Packed input, aligned & packed output + for (size_t j = 0; j < four; ++j) + { + __m128 V1 = _mm_loadu_ps(reinterpret_cast(pInputVector)); + __m128 L2 = _mm_loadu_ps(reinterpret_cast(pInputVector + 16)); + __m128 L3 = _mm_loadu_ps(reinterpret_cast(pInputVector + 32)); + pInputVector += sizeof(XMFLOAT3) * 4; + + // Unpack the 4 vectors (.w components are junk) + XM3UNPACK3INTO4(V1, L2, L3); + + // Result 1 + V1 = XM_FMADD_PS(V1, Scale, Offset); + + XMVECTOR Z = XM_PERMUTE_PS(V1, _MM_SHUFFLE(2, 2, 2, 2)); + XMVECTOR Y = XM_PERMUTE_PS(V1, _MM_SHUFFLE(1, 1, 1, 1)); + XMVECTOR X = XM_PERMUTE_PS(V1, _MM_SHUFFLE(0, 0, 0, 0)); + + XMVECTOR vTemp = XM_FMADD_PS(Z, Transform.r[2], Transform.r[3]); + XMVECTOR vTemp2 = _mm_mul_ps(Y, Transform.r[1]); + XMVECTOR vTemp3 = _mm_mul_ps(X, Transform.r[0]); + vTemp = _mm_add_ps(vTemp, vTemp2); + vTemp = _mm_add_ps(vTemp, vTemp3); + + XMVECTOR W = XM_PERMUTE_PS(vTemp, _MM_SHUFFLE(3, 3, 3, 3)); + V1 = _mm_div_ps(vTemp, W); + + // Result 2 + V2 = XM_FMADD_PS(V2, Scale, Offset); + + Z = XM_PERMUTE_PS(V2, _MM_SHUFFLE(2, 2, 2, 2)); + Y = XM_PERMUTE_PS(V2, _MM_SHUFFLE(1, 1, 1, 1)); + X = XM_PERMUTE_PS(V2, _MM_SHUFFLE(0, 0, 0, 0)); + + vTemp = XM_FMADD_PS(Z, Transform.r[2], Transform.r[3]); + vTemp2 = _mm_mul_ps(Y, Transform.r[1]); + vTemp3 = _mm_mul_ps(X, Transform.r[0]); + vTemp = _mm_add_ps(vTemp, vTemp2); + vTemp = _mm_add_ps(vTemp, vTemp3); + + W = XM_PERMUTE_PS(vTemp, _MM_SHUFFLE(3, 3, 3, 3)); + V2 = _mm_div_ps(vTemp, W); + + // Result 3 + V3 = XM_FMADD_PS(V3, Scale, Offset); + + Z = XM_PERMUTE_PS(V3, _MM_SHUFFLE(2, 2, 2, 2)); + Y = XM_PERMUTE_PS(V3, _MM_SHUFFLE(1, 1, 1, 1)); + X = XM_PERMUTE_PS(V3, _MM_SHUFFLE(0, 0, 0, 0)); + + vTemp = XM_FMADD_PS(Z, Transform.r[2], Transform.r[3]); + vTemp2 = _mm_mul_ps(Y, Transform.r[1]); + vTemp3 = _mm_mul_ps(X, Transform.r[0]); + vTemp = _mm_add_ps(vTemp, vTemp2); + vTemp = _mm_add_ps(vTemp, vTemp3); + + W = XM_PERMUTE_PS(vTemp, _MM_SHUFFLE(3, 3, 3, 3)); + V3 = _mm_div_ps(vTemp, W); + + // Result 4 + V4 = XM_FMADD_PS(V4, Scale, Offset); + + Z = XM_PERMUTE_PS(V4, _MM_SHUFFLE(2, 2, 2, 2)); + Y = XM_PERMUTE_PS(V4, _MM_SHUFFLE(1, 1, 1, 1)); + X = XM_PERMUTE_PS(V4, _MM_SHUFFLE(0, 0, 0, 0)); + + vTemp = XM_FMADD_PS(Z, Transform.r[2], Transform.r[3]); + vTemp2 = _mm_mul_ps(Y, Transform.r[1]); + vTemp3 = _mm_mul_ps(X, Transform.r[0]); + vTemp = _mm_add_ps(vTemp, vTemp2); + vTemp = _mm_add_ps(vTemp, vTemp3); + + W = XM_PERMUTE_PS(vTemp, _MM_SHUFFLE(3, 3, 3, 3)); + V4 = _mm_div_ps(vTemp, W); + + // Pack and store the vectors + XM3PACK4INTO3(vTemp); + XM_STREAM_PS(reinterpret_cast(pOutputVector), V1); + XM_STREAM_PS(reinterpret_cast(pOutputVector + 16), vTemp); + XM_STREAM_PS(reinterpret_cast(pOutputVector + 32), V3); + pOutputVector += sizeof(XMFLOAT3) * 4; + i += 4; + } + } + else + { + // Packed input, unaligned & packed output + for (size_t j = 0; j < four; ++j) + { + __m128 V1 = _mm_loadu_ps(reinterpret_cast(pInputVector)); + __m128 L2 = _mm_loadu_ps(reinterpret_cast(pInputVector + 16)); + __m128 L3 = _mm_loadu_ps(reinterpret_cast(pInputVector + 32)); + pInputVector += sizeof(XMFLOAT3) * 4; + + // Unpack the 4 vectors (.w components are junk) + XM3UNPACK3INTO4(V1, L2, L3); + + // Result 1 + V1 = XM_FMADD_PS(V1, Scale, Offset); + + XMVECTOR Z = XM_PERMUTE_PS(V1, _MM_SHUFFLE(2, 2, 2, 2)); + XMVECTOR Y = XM_PERMUTE_PS(V1, _MM_SHUFFLE(1, 1, 1, 1)); + XMVECTOR X = XM_PERMUTE_PS(V1, _MM_SHUFFLE(0, 0, 0, 0)); + + XMVECTOR vTemp = XM_FMADD_PS(Z, Transform.r[2], Transform.r[3]); + XMVECTOR vTemp2 = _mm_mul_ps(Y, Transform.r[1]); + XMVECTOR vTemp3 = _mm_mul_ps(X, Transform.r[0]); + vTemp = _mm_add_ps(vTemp, vTemp2); + vTemp = _mm_add_ps(vTemp, vTemp3); + + XMVECTOR W = XM_PERMUTE_PS(vTemp, _MM_SHUFFLE(3, 3, 3, 3)); + V1 = _mm_div_ps(vTemp, W); + + // Result 2 + V2 = XM_FMADD_PS(V2, Scale, Offset); + + Z = XM_PERMUTE_PS(V2, _MM_SHUFFLE(2, 2, 2, 2)); + Y = XM_PERMUTE_PS(V2, _MM_SHUFFLE(1, 1, 1, 1)); + X = XM_PERMUTE_PS(V2, _MM_SHUFFLE(0, 0, 0, 0)); + + vTemp = XM_FMADD_PS(Z, Transform.r[2], Transform.r[3]); + vTemp2 = _mm_mul_ps(Y, Transform.r[1]); + vTemp3 = _mm_mul_ps(X, Transform.r[0]); + vTemp = _mm_add_ps(vTemp, vTemp2); + vTemp = _mm_add_ps(vTemp, vTemp3); + + W = XM_PERMUTE_PS(vTemp, _MM_SHUFFLE(3, 3, 3, 3)); + V2 = _mm_div_ps(vTemp, W); + + // Result 3 + V3 = XM_FMADD_PS(V3, Scale, Offset); + + Z = XM_PERMUTE_PS(V3, _MM_SHUFFLE(2, 2, 2, 2)); + Y = XM_PERMUTE_PS(V3, _MM_SHUFFLE(1, 1, 1, 1)); + X = XM_PERMUTE_PS(V3, _MM_SHUFFLE(0, 0, 0, 0)); + + vTemp = XM_FMADD_PS(Z, Transform.r[2], Transform.r[3]); + vTemp2 = _mm_mul_ps(Y, Transform.r[1]); + vTemp3 = _mm_mul_ps(X, Transform.r[0]); + vTemp = _mm_add_ps(vTemp, vTemp2); + vTemp = _mm_add_ps(vTemp, vTemp3); + + W = XM_PERMUTE_PS(vTemp, _MM_SHUFFLE(3, 3, 3, 3)); + V3 = _mm_div_ps(vTemp, W); + + // Result 4 + V4 = XM_FMADD_PS(V4, Scale, Offset); + + Z = XM_PERMUTE_PS(V4, _MM_SHUFFLE(2, 2, 2, 2)); + Y = XM_PERMUTE_PS(V4, _MM_SHUFFLE(1, 1, 1, 1)); + X = XM_PERMUTE_PS(V4, _MM_SHUFFLE(0, 0, 0, 0)); + + vTemp = XM_FMADD_PS(Z, Transform.r[2], Transform.r[3]); + vTemp2 = _mm_mul_ps(Y, Transform.r[1]); + vTemp3 = _mm_mul_ps(X, Transform.r[0]); + vTemp = _mm_add_ps(vTemp, vTemp2); + vTemp = _mm_add_ps(vTemp, vTemp3); + + W = XM_PERMUTE_PS(vTemp, _MM_SHUFFLE(3, 3, 3, 3)); + V4 = _mm_div_ps(vTemp, W); + + // Pack and store the vectors + XM3PACK4INTO3(vTemp); + _mm_storeu_ps(reinterpret_cast(pOutputVector), V1); + _mm_storeu_ps(reinterpret_cast(pOutputVector + 16), vTemp); + _mm_storeu_ps(reinterpret_cast(pOutputVector + 32), V3); + pOutputVector += sizeof(XMFLOAT3) * 4; + i += 4; + } + } + } + else + { + // Packed input, unpacked output + for (size_t j = 0; j < four; ++j) + { + __m128 V1 = _mm_loadu_ps(reinterpret_cast(pInputVector)); + __m128 L2 = _mm_loadu_ps(reinterpret_cast(pInputVector + 16)); + __m128 L3 = _mm_loadu_ps(reinterpret_cast(pInputVector + 32)); + pInputVector += sizeof(XMFLOAT3) * 4; + + // Unpack the 4 vectors (.w components are junk) + XM3UNPACK3INTO4(V1, L2, L3); + + // Result 1 + V1 = XM_FMADD_PS(V1, Scale, Offset); + + XMVECTOR Z = XM_PERMUTE_PS(V1, _MM_SHUFFLE(2, 2, 2, 2)); + XMVECTOR Y = XM_PERMUTE_PS(V1, _MM_SHUFFLE(1, 1, 1, 1)); + XMVECTOR X = XM_PERMUTE_PS(V1, _MM_SHUFFLE(0, 0, 0, 0)); + + XMVECTOR vTemp = XM_FMADD_PS(Z, Transform.r[2], Transform.r[3]); + XMVECTOR vTemp2 = _mm_mul_ps(Y, Transform.r[1]); + XMVECTOR vTemp3 = _mm_mul_ps(X, Transform.r[0]); + vTemp = _mm_add_ps(vTemp, vTemp2); + vTemp = _mm_add_ps(vTemp, vTemp3); + + XMVECTOR W = XM_PERMUTE_PS(vTemp, _MM_SHUFFLE(3, 3, 3, 3)); + vTemp = _mm_div_ps(vTemp, W); + + XMStoreFloat3(reinterpret_cast(pOutputVector), vTemp); + pOutputVector += OutputStride; + + // Result 2 + V2 = XM_FMADD_PS(V2, Scale, Offset); + + Z = XM_PERMUTE_PS(V2, _MM_SHUFFLE(2, 2, 2, 2)); + Y = XM_PERMUTE_PS(V2, _MM_SHUFFLE(1, 1, 1, 1)); + X = XM_PERMUTE_PS(V2, _MM_SHUFFLE(0, 0, 0, 0)); + + vTemp = XM_FMADD_PS(Z, Transform.r[2], Transform.r[3]); + vTemp2 = _mm_mul_ps(Y, Transform.r[1]); + vTemp3 = _mm_mul_ps(X, Transform.r[0]); + vTemp = _mm_add_ps(vTemp, vTemp2); + vTemp = _mm_add_ps(vTemp, vTemp3); + + W = XM_PERMUTE_PS(vTemp, _MM_SHUFFLE(3, 3, 3, 3)); + vTemp = _mm_div_ps(vTemp, W); + + XMStoreFloat3(reinterpret_cast(pOutputVector), vTemp); + pOutputVector += OutputStride; + + // Result 3 + V3 = XM_FMADD_PS(V3, Scale, Offset); + + Z = XM_PERMUTE_PS(V3, _MM_SHUFFLE(2, 2, 2, 2)); + Y = XM_PERMUTE_PS(V3, _MM_SHUFFLE(1, 1, 1, 1)); + X = XM_PERMUTE_PS(V3, _MM_SHUFFLE(0, 0, 0, 0)); + + vTemp = XM_FMADD_PS(Z, Transform.r[2], Transform.r[3]); + vTemp2 = _mm_mul_ps(Y, Transform.r[1]); + vTemp3 = _mm_mul_ps(X, Transform.r[0]); + vTemp = _mm_add_ps(vTemp, vTemp2); + vTemp = _mm_add_ps(vTemp, vTemp3); + + W = XM_PERMUTE_PS(vTemp, _MM_SHUFFLE(3, 3, 3, 3)); + vTemp = _mm_div_ps(vTemp, W); + + XMStoreFloat3(reinterpret_cast(pOutputVector), vTemp); + pOutputVector += OutputStride; + + // Result 4 + V4 = XM_FMADD_PS(V4, Scale, Offset); + + Z = XM_PERMUTE_PS(V4, _MM_SHUFFLE(2, 2, 2, 2)); + Y = XM_PERMUTE_PS(V4, _MM_SHUFFLE(1, 1, 1, 1)); + X = XM_PERMUTE_PS(V4, _MM_SHUFFLE(0, 0, 0, 0)); + + vTemp = XM_FMADD_PS(Z, Transform.r[2], Transform.r[3]); + vTemp2 = _mm_mul_ps(Y, Transform.r[1]); + vTemp3 = _mm_mul_ps(X, Transform.r[0]); + vTemp = _mm_add_ps(vTemp, vTemp2); + vTemp = _mm_add_ps(vTemp, vTemp3); + + W = XM_PERMUTE_PS(vTemp, _MM_SHUFFLE(3, 3, 3, 3)); + vTemp = _mm_div_ps(vTemp, W); + + XMStoreFloat3(reinterpret_cast(pOutputVector), vTemp); + pOutputVector += OutputStride; + + i += 4; + } + } + } + } + + for (; i < VectorCount; i++) + { + XMVECTOR V = XMLoadFloat3(reinterpret_cast(pInputVector)); + pInputVector += InputStride; + + V = _mm_mul_ps(V, Scale); + V = _mm_add_ps(V, Offset); + + XMVECTOR Z = XM_PERMUTE_PS(V, _MM_SHUFFLE(2, 2, 2, 2)); + XMVECTOR Y = XM_PERMUTE_PS(V, _MM_SHUFFLE(1, 1, 1, 1)); + XMVECTOR X = XM_PERMUTE_PS(V, _MM_SHUFFLE(0, 0, 0, 0)); + + XMVECTOR vTemp = XM_FMADD_PS(Z, Transform.r[2], Transform.r[3]); + XMVECTOR vTemp2 = _mm_mul_ps(Y, Transform.r[1]); + XMVECTOR vTemp3 = _mm_mul_ps(X, Transform.r[0]); + vTemp = _mm_add_ps(vTemp, vTemp2); + vTemp = _mm_add_ps(vTemp, vTemp3); + + XMVECTOR W = XM_PERMUTE_PS(vTemp, _MM_SHUFFLE(3, 3, 3, 3)); + vTemp = _mm_div_ps(vTemp, W); + + XMStoreFloat3(reinterpret_cast(pOutputVector), vTemp); + pOutputVector += OutputStride; + } + + XM_SFENCE(); + + return pOutputStream; +#endif +} + +#ifdef _PREFAST_ +#pragma prefast(pop) +#endif + +/**************************************************************************** + * + * 4D Vector + * + ****************************************************************************/ + + //------------------------------------------------------------------------------ + // Comparison operations + //------------------------------------------------------------------------------ + + //------------------------------------------------------------------------------ + +inline bool XM_CALLCONV XMVector4Equal +( + FXMVECTOR V1, + FXMVECTOR V2 +) noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + return (((V1.vector4_f32[0] == V2.vector4_f32[0]) && (V1.vector4_f32[1] == V2.vector4_f32[1]) && (V1.vector4_f32[2] == V2.vector4_f32[2]) && (V1.vector4_f32[3] == V2.vector4_f32[3])) != 0); +#elif defined(_XM_ARM_NEON_INTRINSICS_) + uint32x4_t vResult = vceqq_f32(V1, V2); + uint8x8x2_t vTemp = vzip_u8(vget_low_u8(vResult), vget_high_u8(vResult)); + uint16x4x2_t vTemp2 = vzip_u16(vTemp.val[0], vTemp.val[1]); + return (vget_lane_u32(vTemp2.val[1], 1) == 0xFFFFFFFFU); +#elif defined(_XM_SSE_INTRINSICS_) + XMVECTOR vTemp = _mm_cmpeq_ps(V1, V2); + return ((_mm_movemask_ps(vTemp) == 0x0f) != 0); +#else + return XMComparisonAllTrue(XMVector4EqualR(V1, V2)); +#endif +} + +//------------------------------------------------------------------------------ + +inline uint32_t XM_CALLCONV XMVector4EqualR +( + FXMVECTOR V1, + FXMVECTOR V2 +) noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + + uint32_t CR = 0; + + if ((V1.vector4_f32[0] == V2.vector4_f32[0]) && + (V1.vector4_f32[1] == V2.vector4_f32[1]) && + (V1.vector4_f32[2] == V2.vector4_f32[2]) && + (V1.vector4_f32[3] == V2.vector4_f32[3])) + { + CR = XM_CRMASK_CR6TRUE; + } + else if ((V1.vector4_f32[0] != V2.vector4_f32[0]) && + (V1.vector4_f32[1] != V2.vector4_f32[1]) && + (V1.vector4_f32[2] != V2.vector4_f32[2]) && + (V1.vector4_f32[3] != V2.vector4_f32[3])) + { + CR = XM_CRMASK_CR6FALSE; + } + return CR; + +#elif defined(_XM_ARM_NEON_INTRINSICS_) + uint32x4_t vResult = vceqq_f32(V1, V2); + uint8x8x2_t vTemp = vzip_u8(vget_low_u8(vResult), vget_high_u8(vResult)); + uint16x4x2_t vTemp2 = vzip_u16(vTemp.val[0], vTemp.val[1]); + uint32_t r = vget_lane_u32(vTemp2.val[1], 1); + + uint32_t CR = 0; + if (r == 0xFFFFFFFFU) + { + CR = XM_CRMASK_CR6TRUE; + } + else if (!r) + { + CR = XM_CRMASK_CR6FALSE; + } + return CR; +#elif defined(_XM_SSE_INTRINSICS_) + XMVECTOR vTemp = _mm_cmpeq_ps(V1, V2); + int iTest = _mm_movemask_ps(vTemp); + uint32_t CR = 0; + if (iTest == 0xf) // All equal? + { + CR = XM_CRMASK_CR6TRUE; + } + else if (iTest == 0) // All not equal? + { + CR = XM_CRMASK_CR6FALSE; + } + return CR; +#endif +} + +//------------------------------------------------------------------------------ + +inline bool XM_CALLCONV XMVector4EqualInt +( + FXMVECTOR V1, + FXMVECTOR V2 +) noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + return (((V1.vector4_u32[0] == V2.vector4_u32[0]) && (V1.vector4_u32[1] == V2.vector4_u32[1]) && (V1.vector4_u32[2] == V2.vector4_u32[2]) && (V1.vector4_u32[3] == V2.vector4_u32[3])) != 0); +#elif defined(_XM_ARM_NEON_INTRINSICS_) + uint32x4_t vResult = vceqq_u32(V1, V2); + uint8x8x2_t vTemp = vzip_u8(vget_low_u8(vResult), vget_high_u8(vResult)); + uint16x4x2_t vTemp2 = vzip_u16(vTemp.val[0], vTemp.val[1]); + return (vget_lane_u32(vTemp2.val[1], 1) == 0xFFFFFFFFU); +#elif defined(_XM_SSE_INTRINSICS_) + __m128i vTemp = _mm_cmpeq_epi32(_mm_castps_si128(V1), _mm_castps_si128(V2)); + return ((_mm_movemask_ps(_mm_castsi128_ps(vTemp)) == 0xf) != 0); +#else + return XMComparisonAllTrue(XMVector4EqualIntR(V1, V2)); +#endif +} + +//------------------------------------------------------------------------------ + +inline uint32_t XM_CALLCONV XMVector4EqualIntR +( + FXMVECTOR V1, + FXMVECTOR V2 +) noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + uint32_t CR = 0; + if (V1.vector4_u32[0] == V2.vector4_u32[0] && + V1.vector4_u32[1] == V2.vector4_u32[1] && + V1.vector4_u32[2] == V2.vector4_u32[2] && + V1.vector4_u32[3] == V2.vector4_u32[3]) + { + CR = XM_CRMASK_CR6TRUE; + } + else if (V1.vector4_u32[0] != V2.vector4_u32[0] && + V1.vector4_u32[1] != V2.vector4_u32[1] && + V1.vector4_u32[2] != V2.vector4_u32[2] && + V1.vector4_u32[3] != V2.vector4_u32[3]) + { + CR = XM_CRMASK_CR6FALSE; + } + return CR; + +#elif defined(_XM_ARM_NEON_INTRINSICS_) + uint32x4_t vResult = vceqq_u32(V1, V2); + uint8x8x2_t vTemp = vzip_u8(vget_low_u8(vResult), vget_high_u8(vResult)); + uint16x4x2_t vTemp2 = vzip_u16(vTemp.val[0], vTemp.val[1]); + uint32_t r = vget_lane_u32(vTemp2.val[1], 1); + + uint32_t CR = 0; + if (r == 0xFFFFFFFFU) + { + CR = XM_CRMASK_CR6TRUE; + } + else if (!r) + { + CR = XM_CRMASK_CR6FALSE; + } + return CR; +#elif defined(_XM_SSE_INTRINSICS_) + __m128i vTemp = _mm_cmpeq_epi32(_mm_castps_si128(V1), _mm_castps_si128(V2)); + int iTest = _mm_movemask_ps(_mm_castsi128_ps(vTemp)); + uint32_t CR = 0; + if (iTest == 0xf) // All equal? + { + CR = XM_CRMASK_CR6TRUE; + } + else if (iTest == 0) // All not equal? + { + CR = XM_CRMASK_CR6FALSE; + } + return CR; +#endif +} + +inline bool XM_CALLCONV XMVector4NearEqual +( + FXMVECTOR V1, + FXMVECTOR V2, + FXMVECTOR Epsilon +) noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + float dx, dy, dz, dw; + + dx = fabsf(V1.vector4_f32[0] - V2.vector4_f32[0]); + dy = fabsf(V1.vector4_f32[1] - V2.vector4_f32[1]); + dz = fabsf(V1.vector4_f32[2] - V2.vector4_f32[2]); + dw = fabsf(V1.vector4_f32[3] - V2.vector4_f32[3]); + return (((dx <= Epsilon.vector4_f32[0]) && + (dy <= Epsilon.vector4_f32[1]) && + (dz <= Epsilon.vector4_f32[2]) && + (dw <= Epsilon.vector4_f32[3])) != 0); +#elif defined(_XM_ARM_NEON_INTRINSICS_) + float32x4_t vDelta = vsubq_f32(V1, V2); +#ifdef _MSC_VER + uint32x4_t vResult = vacleq_f32(vDelta, Epsilon); +#else + uint32x4_t vResult = vcleq_f32(vabsq_f32(vDelta), Epsilon); +#endif + uint8x8x2_t vTemp = vzip_u8(vget_low_u8(vResult), vget_high_u8(vResult)); + uint16x4x2_t vTemp2 = vzip_u16(vTemp.val[0], vTemp.val[1]); + return (vget_lane_u32(vTemp2.val[1], 1) == 0xFFFFFFFFU); +#elif defined(_XM_SSE_INTRINSICS_) + // Get the difference + XMVECTOR vDelta = _mm_sub_ps(V1, V2); + // Get the absolute value of the difference + XMVECTOR vTemp = _mm_setzero_ps(); + vTemp = _mm_sub_ps(vTemp, vDelta); + vTemp = _mm_max_ps(vTemp, vDelta); + vTemp = _mm_cmple_ps(vTemp, Epsilon); + return ((_mm_movemask_ps(vTemp) == 0xf) != 0); +#endif +} + +//------------------------------------------------------------------------------ + +inline bool XM_CALLCONV XMVector4NotEqual +( + FXMVECTOR V1, + FXMVECTOR V2 +) noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + return (((V1.vector4_f32[0] != V2.vector4_f32[0]) || (V1.vector4_f32[1] != V2.vector4_f32[1]) || (V1.vector4_f32[2] != V2.vector4_f32[2]) || (V1.vector4_f32[3] != V2.vector4_f32[3])) != 0); +#elif defined(_XM_ARM_NEON_INTRINSICS_) + uint32x4_t vResult = vceqq_f32(V1, V2); + uint8x8x2_t vTemp = vzip_u8(vget_low_u8(vResult), vget_high_u8(vResult)); + uint16x4x2_t vTemp2 = vzip_u16(vTemp.val[0], vTemp.val[1]); + return (vget_lane_u32(vTemp2.val[1], 1) != 0xFFFFFFFFU); +#elif defined(_XM_SSE_INTRINSICS_) + XMVECTOR vTemp = _mm_cmpneq_ps(V1, V2); + return ((_mm_movemask_ps(vTemp)) != 0); +#else + return XMComparisonAnyFalse(XMVector4EqualR(V1, V2)); +#endif +} + +//------------------------------------------------------------------------------ + +inline bool XM_CALLCONV XMVector4NotEqualInt +( + FXMVECTOR V1, + FXMVECTOR V2 +) noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + return (((V1.vector4_u32[0] != V2.vector4_u32[0]) || (V1.vector4_u32[1] != V2.vector4_u32[1]) || (V1.vector4_u32[2] != V2.vector4_u32[2]) || (V1.vector4_u32[3] != V2.vector4_u32[3])) != 0); +#elif defined(_XM_ARM_NEON_INTRINSICS_) + uint32x4_t vResult = vceqq_u32(V1, V2); + uint8x8x2_t vTemp = vzip_u8(vget_low_u8(vResult), vget_high_u8(vResult)); + uint16x4x2_t vTemp2 = vzip_u16(vTemp.val[0], vTemp.val[1]); + return (vget_lane_u32(vTemp2.val[1], 1) != 0xFFFFFFFFU); +#elif defined(_XM_SSE_INTRINSICS_) + __m128i vTemp = _mm_cmpeq_epi32(_mm_castps_si128(V1), _mm_castps_si128(V2)); + return ((_mm_movemask_ps(_mm_castsi128_ps(vTemp)) != 0xF) != 0); +#else + return XMComparisonAnyFalse(XMVector4EqualIntR(V1, V2)); +#endif +} + +//------------------------------------------------------------------------------ + +inline bool XM_CALLCONV XMVector4Greater +( + FXMVECTOR V1, + FXMVECTOR V2 +) noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + return (((V1.vector4_f32[0] > V2.vector4_f32[0]) && (V1.vector4_f32[1] > V2.vector4_f32[1]) && (V1.vector4_f32[2] > V2.vector4_f32[2]) && (V1.vector4_f32[3] > V2.vector4_f32[3])) != 0); +#elif defined(_XM_ARM_NEON_INTRINSICS_) + uint32x4_t vResult = vcgtq_f32(V1, V2); + uint8x8x2_t vTemp = vzip_u8(vget_low_u8(vResult), vget_high_u8(vResult)); + uint16x4x2_t vTemp2 = vzip_u16(vTemp.val[0], vTemp.val[1]); + return (vget_lane_u32(vTemp2.val[1], 1) == 0xFFFFFFFFU); +#elif defined(_XM_SSE_INTRINSICS_) + XMVECTOR vTemp = _mm_cmpgt_ps(V1, V2); + return ((_mm_movemask_ps(vTemp) == 0x0f) != 0); +#else + return XMComparisonAllTrue(XMVector4GreaterR(V1, V2)); +#endif +} + +//------------------------------------------------------------------------------ + +inline uint32_t XM_CALLCONV XMVector4GreaterR +( + FXMVECTOR V1, + FXMVECTOR V2 +) noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + uint32_t CR = 0; + if (V1.vector4_f32[0] > V2.vector4_f32[0] && + V1.vector4_f32[1] > V2.vector4_f32[1] && + V1.vector4_f32[2] > V2.vector4_f32[2] && + V1.vector4_f32[3] > V2.vector4_f32[3]) + { + CR = XM_CRMASK_CR6TRUE; + } + else if (V1.vector4_f32[0] <= V2.vector4_f32[0] && + V1.vector4_f32[1] <= V2.vector4_f32[1] && + V1.vector4_f32[2] <= V2.vector4_f32[2] && + V1.vector4_f32[3] <= V2.vector4_f32[3]) + { + CR = XM_CRMASK_CR6FALSE; + } + return CR; + +#elif defined(_XM_ARM_NEON_INTRINSICS_) + uint32x4_t vResult = vcgtq_f32(V1, V2); + uint8x8x2_t vTemp = vzip_u8(vget_low_u8(vResult), vget_high_u8(vResult)); + uint16x4x2_t vTemp2 = vzip_u16(vTemp.val[0], vTemp.val[1]); + uint32_t r = vget_lane_u32(vTemp2.val[1], 1); + + uint32_t CR = 0; + if (r == 0xFFFFFFFFU) + { + CR = XM_CRMASK_CR6TRUE; + } + else if (!r) + { + CR = XM_CRMASK_CR6FALSE; + } + return CR; +#elif defined(_XM_SSE_INTRINSICS_) + uint32_t CR = 0; + XMVECTOR vTemp = _mm_cmpgt_ps(V1, V2); + int iTest = _mm_movemask_ps(vTemp); + if (iTest == 0xf) + { + CR = XM_CRMASK_CR6TRUE; + } + else if (!iTest) + { + CR = XM_CRMASK_CR6FALSE; + } + return CR; +#endif +} + +//------------------------------------------------------------------------------ + +inline bool XM_CALLCONV XMVector4GreaterOrEqual +( + FXMVECTOR V1, + FXMVECTOR V2 +) noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + return (((V1.vector4_f32[0] >= V2.vector4_f32[0]) && (V1.vector4_f32[1] >= V2.vector4_f32[1]) && (V1.vector4_f32[2] >= V2.vector4_f32[2]) && (V1.vector4_f32[3] >= V2.vector4_f32[3])) != 0); +#elif defined(_XM_ARM_NEON_INTRINSICS_) + uint32x4_t vResult = vcgeq_f32(V1, V2); + uint8x8x2_t vTemp = vzip_u8(vget_low_u8(vResult), vget_high_u8(vResult)); + uint16x4x2_t vTemp2 = vzip_u16(vTemp.val[0], vTemp.val[1]); + return (vget_lane_u32(vTemp2.val[1], 1) == 0xFFFFFFFFU); +#elif defined(_XM_SSE_INTRINSICS_) + XMVECTOR vTemp = _mm_cmpge_ps(V1, V2); + return ((_mm_movemask_ps(vTemp) == 0x0f) != 0); +#else + return XMComparisonAllTrue(XMVector4GreaterOrEqualR(V1, V2)); +#endif +} + +//------------------------------------------------------------------------------ + +inline uint32_t XM_CALLCONV XMVector4GreaterOrEqualR +( + FXMVECTOR V1, + FXMVECTOR V2 +) noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + uint32_t CR = 0; + if ((V1.vector4_f32[0] >= V2.vector4_f32[0]) && + (V1.vector4_f32[1] >= V2.vector4_f32[1]) && + (V1.vector4_f32[2] >= V2.vector4_f32[2]) && + (V1.vector4_f32[3] >= V2.vector4_f32[3])) + { + CR = XM_CRMASK_CR6TRUE; + } + else if ((V1.vector4_f32[0] < V2.vector4_f32[0]) && + (V1.vector4_f32[1] < V2.vector4_f32[1]) && + (V1.vector4_f32[2] < V2.vector4_f32[2]) && + (V1.vector4_f32[3] < V2.vector4_f32[3])) + { + CR = XM_CRMASK_CR6FALSE; + } + return CR; + +#elif defined(_XM_ARM_NEON_INTRINSICS_) + uint32x4_t vResult = vcgeq_f32(V1, V2); + uint8x8x2_t vTemp = vzip_u8(vget_low_u8(vResult), vget_high_u8(vResult)); + uint16x4x2_t vTemp2 = vzip_u16(vTemp.val[0], vTemp.val[1]); + uint32_t r = vget_lane_u32(vTemp2.val[1], 1); + + uint32_t CR = 0; + if (r == 0xFFFFFFFFU) + { + CR = XM_CRMASK_CR6TRUE; + } + else if (!r) + { + CR = XM_CRMASK_CR6FALSE; + } + return CR; +#elif defined(_XM_SSE_INTRINSICS_) + uint32_t CR = 0; + XMVECTOR vTemp = _mm_cmpge_ps(V1, V2); + int iTest = _mm_movemask_ps(vTemp); + if (iTest == 0x0f) + { + CR = XM_CRMASK_CR6TRUE; + } + else if (!iTest) + { + CR = XM_CRMASK_CR6FALSE; + } + return CR; +#endif +} + +//------------------------------------------------------------------------------ + +inline bool XM_CALLCONV XMVector4Less +( + FXMVECTOR V1, + FXMVECTOR V2 +) noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + return (((V1.vector4_f32[0] < V2.vector4_f32[0]) && (V1.vector4_f32[1] < V2.vector4_f32[1]) && (V1.vector4_f32[2] < V2.vector4_f32[2]) && (V1.vector4_f32[3] < V2.vector4_f32[3])) != 0); +#elif defined(_XM_ARM_NEON_INTRINSICS_) + uint32x4_t vResult = vcltq_f32(V1, V2); + uint8x8x2_t vTemp = vzip_u8(vget_low_u8(vResult), vget_high_u8(vResult)); + uint16x4x2_t vTemp2 = vzip_u16(vTemp.val[0], vTemp.val[1]); + return (vget_lane_u32(vTemp2.val[1], 1) == 0xFFFFFFFFU); +#elif defined(_XM_SSE_INTRINSICS_) + XMVECTOR vTemp = _mm_cmplt_ps(V1, V2); + return ((_mm_movemask_ps(vTemp) == 0x0f) != 0); +#else + return XMComparisonAllTrue(XMVector4GreaterR(V2, V1)); +#endif +} + +//------------------------------------------------------------------------------ + +inline bool XM_CALLCONV XMVector4LessOrEqual +( + FXMVECTOR V1, + FXMVECTOR V2 +) noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + return (((V1.vector4_f32[0] <= V2.vector4_f32[0]) && (V1.vector4_f32[1] <= V2.vector4_f32[1]) && (V1.vector4_f32[2] <= V2.vector4_f32[2]) && (V1.vector4_f32[3] <= V2.vector4_f32[3])) != 0); +#elif defined(_XM_ARM_NEON_INTRINSICS_) + uint32x4_t vResult = vcleq_f32(V1, V2); + uint8x8x2_t vTemp = vzip_u8(vget_low_u8(vResult), vget_high_u8(vResult)); + uint16x4x2_t vTemp2 = vzip_u16(vTemp.val[0], vTemp.val[1]); + return (vget_lane_u32(vTemp2.val[1], 1) == 0xFFFFFFFFU); +#elif defined(_XM_SSE_INTRINSICS_) + XMVECTOR vTemp = _mm_cmple_ps(V1, V2); + return ((_mm_movemask_ps(vTemp) == 0x0f) != 0); +#else + return XMComparisonAllTrue(XMVector4GreaterOrEqualR(V2, V1)); +#endif +} + +//------------------------------------------------------------------------------ + +inline bool XM_CALLCONV XMVector4InBounds +( + FXMVECTOR V, + FXMVECTOR Bounds +) noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + return (((V.vector4_f32[0] <= Bounds.vector4_f32[0] && V.vector4_f32[0] >= -Bounds.vector4_f32[0]) && + (V.vector4_f32[1] <= Bounds.vector4_f32[1] && V.vector4_f32[1] >= -Bounds.vector4_f32[1]) && + (V.vector4_f32[2] <= Bounds.vector4_f32[2] && V.vector4_f32[2] >= -Bounds.vector4_f32[2]) && + (V.vector4_f32[3] <= Bounds.vector4_f32[3] && V.vector4_f32[3] >= -Bounds.vector4_f32[3])) != 0); +#elif defined(_XM_ARM_NEON_INTRINSICS_) + // Test if less than or equal + uint32x4_t ivTemp1 = vcleq_f32(V, Bounds); + // Negate the bounds + float32x4_t vTemp2 = vnegq_f32(Bounds); + // Test if greater or equal (Reversed) + uint32x4_t ivTemp2 = vcleq_f32(vTemp2, V); + // Blend answers + ivTemp1 = vandq_u32(ivTemp1, ivTemp2); + // in bounds? + uint8x8x2_t vTemp = vzip_u8(vget_low_u8(ivTemp1), vget_high_u8(ivTemp1)); + uint16x4x2_t vTemp3 = vzip_u16(vTemp.val[0], vTemp.val[1]); + return (vget_lane_u32(vTemp3.val[1], 1) == 0xFFFFFFFFU); +#elif defined(_XM_SSE_INTRINSICS_) + // Test if less than or equal + XMVECTOR vTemp1 = _mm_cmple_ps(V, Bounds); + // Negate the bounds + XMVECTOR vTemp2 = _mm_mul_ps(Bounds, g_XMNegativeOne); + // Test if greater or equal (Reversed) + vTemp2 = _mm_cmple_ps(vTemp2, V); + // Blend answers + vTemp1 = _mm_and_ps(vTemp1, vTemp2); + // All in bounds? + return ((_mm_movemask_ps(vTemp1) == 0x0f) != 0); +#else + return XMComparisonAllInBounds(XMVector4InBoundsR(V, Bounds)); +#endif +} + +//------------------------------------------------------------------------------ + +#if !defined(_XM_NO_INTRINSICS_) && defined(_MSC_VER) && !defined(__clang__) && !defined(__INTEL_COMPILER) +#pragma float_control(push) +#pragma float_control(precise, on) +#endif + +inline bool XM_CALLCONV XMVector4IsNaN(FXMVECTOR V) noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + return (XMISNAN(V.vector4_f32[0]) || + XMISNAN(V.vector4_f32[1]) || + XMISNAN(V.vector4_f32[2]) || + XMISNAN(V.vector4_f32[3])); +#elif defined(_XM_ARM_NEON_INTRINSICS_) + // Test against itself. NaN is always not equal + uint32x4_t vTempNan = vceqq_f32(V, V); + uint8x8x2_t vTemp = vzip_u8(vget_low_u8(vTempNan), vget_high_u8(vTempNan)); + uint16x4x2_t vTemp2 = vzip_u16(vTemp.val[0], vTemp.val[1]); + // If any are NaN, the mask is zero + return (vget_lane_u32(vTemp2.val[1], 1) != 0xFFFFFFFFU); +#elif defined(_XM_SSE_INTRINSICS_) + // Test against itself. NaN is always not equal + XMVECTOR vTempNan = _mm_cmpneq_ps(V, V); + // If any are NaN, the mask is non-zero + return (_mm_movemask_ps(vTempNan) != 0); +#endif +} + +#if !defined(_XM_NO_INTRINSICS_) && defined(_MSC_VER) && !defined(__clang__) && !defined(__INTEL_COMPILER) +#pragma float_control(pop) +#endif + +//------------------------------------------------------------------------------ + +inline bool XM_CALLCONV XMVector4IsInfinite(FXMVECTOR V) noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + + return (XMISINF(V.vector4_f32[0]) || + XMISINF(V.vector4_f32[1]) || + XMISINF(V.vector4_f32[2]) || + XMISINF(V.vector4_f32[3])); + +#elif defined(_XM_ARM_NEON_INTRINSICS_) + // Mask off the sign bit + uint32x4_t vTempInf = vandq_u32(V, g_XMAbsMask); + // Compare to infinity + vTempInf = vceqq_f32(vTempInf, g_XMInfinity); + // If any are infinity, the signs are true. + uint8x8x2_t vTemp = vzip_u8(vget_low_u8(vTempInf), vget_high_u8(vTempInf)); + uint16x4x2_t vTemp2 = vzip_u16(vTemp.val[0], vTemp.val[1]); + return (vget_lane_u32(vTemp2.val[1], 1) != 0); +#elif defined(_XM_SSE_INTRINSICS_) + // Mask off the sign bit + XMVECTOR vTemp = _mm_and_ps(V, g_XMAbsMask); + // Compare to infinity + vTemp = _mm_cmpeq_ps(vTemp, g_XMInfinity); + // If any are infinity, the signs are true. + return (_mm_movemask_ps(vTemp) != 0); +#endif +} + +//------------------------------------------------------------------------------ +// Computation operations +//------------------------------------------------------------------------------ + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMVector4Dot +( + FXMVECTOR V1, + FXMVECTOR V2 +) noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + + XMVECTORF32 Result; + Result.f[0] = + Result.f[1] = + Result.f[2] = + Result.f[3] = V1.vector4_f32[0] * V2.vector4_f32[0] + V1.vector4_f32[1] * V2.vector4_f32[1] + V1.vector4_f32[2] * V2.vector4_f32[2] + V1.vector4_f32[3] * V2.vector4_f32[3]; + return Result.v; + +#elif defined(_XM_ARM_NEON_INTRINSICS_) + float32x4_t vTemp = vmulq_f32(V1, V2); + float32x2_t v1 = vget_low_f32(vTemp); + float32x2_t v2 = vget_high_f32(vTemp); + v1 = vadd_f32(v1, v2); + v1 = vpadd_f32(v1, v1); + return vcombine_f32(v1, v1); +#elif defined(_XM_SSE4_INTRINSICS_) + return _mm_dp_ps(V1, V2, 0xff); +#elif defined(_XM_SSE3_INTRINSICS_) + XMVECTOR vTemp = _mm_mul_ps(V1, V2); + vTemp = _mm_hadd_ps(vTemp, vTemp); + return _mm_hadd_ps(vTemp, vTemp); +#elif defined(_XM_SSE_INTRINSICS_) + XMVECTOR vTemp2 = V2; + XMVECTOR vTemp = _mm_mul_ps(V1, vTemp2); + vTemp2 = _mm_shuffle_ps(vTemp2, vTemp, _MM_SHUFFLE(1, 0, 0, 0)); // Copy X to the Z position and Y to the W position + vTemp2 = _mm_add_ps(vTemp2, vTemp); // Add Z = X+Z; W = Y+W; + vTemp = _mm_shuffle_ps(vTemp, vTemp2, _MM_SHUFFLE(0, 3, 0, 0)); // Copy W to the Z position + vTemp = _mm_add_ps(vTemp, vTemp2); // Add Z and W together + return XM_PERMUTE_PS(vTemp, _MM_SHUFFLE(2, 2, 2, 2)); // Splat Z and return +#endif +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMVector4Cross +( + FXMVECTOR V1, + FXMVECTOR V2, + FXMVECTOR V3 +) noexcept +{ + // [ ((v2.z*v3.w-v2.w*v3.z)*v1.y)-((v2.y*v3.w-v2.w*v3.y)*v1.z)+((v2.y*v3.z-v2.z*v3.y)*v1.w), + // ((v2.w*v3.z-v2.z*v3.w)*v1.x)-((v2.w*v3.x-v2.x*v3.w)*v1.z)+((v2.z*v3.x-v2.x*v3.z)*v1.w), + // ((v2.y*v3.w-v2.w*v3.y)*v1.x)-((v2.x*v3.w-v2.w*v3.x)*v1.y)+((v2.x*v3.y-v2.y*v3.x)*v1.w), + // ((v2.z*v3.y-v2.y*v3.z)*v1.x)-((v2.z*v3.x-v2.x*v3.z)*v1.y)+((v2.y*v3.x-v2.x*v3.y)*v1.z) ] + +#if defined(_XM_NO_INTRINSICS_) + + XMVECTORF32 Result = { { { + (((V2.vector4_f32[2] * V3.vector4_f32[3]) - (V2.vector4_f32[3] * V3.vector4_f32[2])) * V1.vector4_f32[1]) - (((V2.vector4_f32[1] * V3.vector4_f32[3]) - (V2.vector4_f32[3] * V3.vector4_f32[1])) * V1.vector4_f32[2]) + (((V2.vector4_f32[1] * V3.vector4_f32[2]) - (V2.vector4_f32[2] * V3.vector4_f32[1])) * V1.vector4_f32[3]), + (((V2.vector4_f32[3] * V3.vector4_f32[2]) - (V2.vector4_f32[2] * V3.vector4_f32[3])) * V1.vector4_f32[0]) - (((V2.vector4_f32[3] * V3.vector4_f32[0]) - (V2.vector4_f32[0] * V3.vector4_f32[3])) * V1.vector4_f32[2]) + (((V2.vector4_f32[2] * V3.vector4_f32[0]) - (V2.vector4_f32[0] * V3.vector4_f32[2])) * V1.vector4_f32[3]), + (((V2.vector4_f32[1] * V3.vector4_f32[3]) - (V2.vector4_f32[3] * V3.vector4_f32[1])) * V1.vector4_f32[0]) - (((V2.vector4_f32[0] * V3.vector4_f32[3]) - (V2.vector4_f32[3] * V3.vector4_f32[0])) * V1.vector4_f32[1]) + (((V2.vector4_f32[0] * V3.vector4_f32[1]) - (V2.vector4_f32[1] * V3.vector4_f32[0])) * V1.vector4_f32[3]), + (((V2.vector4_f32[2] * V3.vector4_f32[1]) - (V2.vector4_f32[1] * V3.vector4_f32[2])) * V1.vector4_f32[0]) - (((V2.vector4_f32[2] * V3.vector4_f32[0]) - (V2.vector4_f32[0] * V3.vector4_f32[2])) * V1.vector4_f32[1]) + (((V2.vector4_f32[1] * V3.vector4_f32[0]) - (V2.vector4_f32[0] * V3.vector4_f32[1])) * V1.vector4_f32[2]), + } } }; + return Result.v; + +#elif defined(_XM_ARM_NEON_INTRINSICS_) + const float32x2_t select = vget_low_f32(g_XMMaskX); + + // Term1: V2zwyz * V3wzwy + const float32x2_t v2xy = vget_low_f32(V2); + const float32x2_t v2zw = vget_high_f32(V2); + const float32x2_t v2yx = vrev64_f32(v2xy); + const float32x2_t v2wz = vrev64_f32(v2zw); + const float32x2_t v2yz = vbsl_f32(select, v2yx, v2wz); + + const float32x2_t v3zw = vget_high_f32(V3); + const float32x2_t v3wz = vrev64_f32(v3zw); + const float32x2_t v3xy = vget_low_f32(V3); + const float32x2_t v3wy = vbsl_f32(select, v3wz, v3xy); + + float32x4_t vTemp1 = vcombine_f32(v2zw, v2yz); + float32x4_t vTemp2 = vcombine_f32(v3wz, v3wy); + XMVECTOR vResult = vmulq_f32(vTemp1, vTemp2); + + // - V2wzwy * V3zwyz + const float32x2_t v2wy = vbsl_f32(select, v2wz, v2xy); + + const float32x2_t v3yx = vrev64_f32(v3xy); + const float32x2_t v3yz = vbsl_f32(select, v3yx, v3wz); + + vTemp1 = vcombine_f32(v2wz, v2wy); + vTemp2 = vcombine_f32(v3zw, v3yz); + vResult = vmlsq_f32(vResult, vTemp1, vTemp2); + + // term1 * V1yxxx + const float32x2_t v1xy = vget_low_f32(V1); + const float32x2_t v1yx = vrev64_f32(v1xy); + + vTemp1 = vcombine_f32(v1yx, vdup_lane_f32(v1yx, 1)); + vResult = vmulq_f32(vResult, vTemp1); + + // Term2: V2ywxz * V3wxwx + const float32x2_t v2yw = vrev64_f32(v2wy); + const float32x2_t v2xz = vbsl_f32(select, v2xy, v2wz); + + const float32x2_t v3wx = vbsl_f32(select, v3wz, v3yx); + + vTemp1 = vcombine_f32(v2yw, v2xz); + vTemp2 = vcombine_f32(v3wx, v3wx); + float32x4_t vTerm = vmulq_f32(vTemp1, vTemp2); + + // - V2wxwx * V3ywxz + const float32x2_t v2wx = vbsl_f32(select, v2wz, v2yx); + + const float32x2_t v3yw = vrev64_f32(v3wy); + const float32x2_t v3xz = vbsl_f32(select, v3xy, v3wz); + + vTemp1 = vcombine_f32(v2wx, v2wx); + vTemp2 = vcombine_f32(v3yw, v3xz); + vTerm = vmlsq_f32(vTerm, vTemp1, vTemp2); + + // vResult - term2 * V1zzyy + const float32x2_t v1zw = vget_high_f32(V1); + + vTemp1 = vcombine_f32(vdup_lane_f32(v1zw, 0), vdup_lane_f32(v1yx, 0)); + vResult = vmlsq_f32(vResult, vTerm, vTemp1); + + // Term3: V2yzxy * V3zxyx + const float32x2_t v3zx = vrev64_f32(v3xz); + + vTemp1 = vcombine_f32(v2yz, v2xy); + vTemp2 = vcombine_f32(v3zx, v3yx); + vTerm = vmulq_f32(vTemp1, vTemp2); + + // - V2zxyx * V3yzxy + const float32x2_t v2zx = vrev64_f32(v2xz); + + vTemp1 = vcombine_f32(v2zx, v2yx); + vTemp2 = vcombine_f32(v3yz, v3xy); + vTerm = vmlsq_f32(vTerm, vTemp1, vTemp2); + + // vResult + term3 * V1wwwz + const float32x2_t v1wz = vrev64_f32(v1zw); + + vTemp1 = vcombine_f32(vdup_lane_f32(v1wz, 0), v1wz); + return vmlaq_f32(vResult, vTerm, vTemp1); +#elif defined(_XM_SSE_INTRINSICS_) + // V2zwyz * V3wzwy + XMVECTOR vResult = XM_PERMUTE_PS(V2, _MM_SHUFFLE(2, 1, 3, 2)); + XMVECTOR vTemp3 = XM_PERMUTE_PS(V3, _MM_SHUFFLE(1, 3, 2, 3)); + vResult = _mm_mul_ps(vResult, vTemp3); + // - V2wzwy * V3zwyz + XMVECTOR vTemp2 = XM_PERMUTE_PS(V2, _MM_SHUFFLE(1, 3, 2, 3)); + vTemp3 = XM_PERMUTE_PS(vTemp3, _MM_SHUFFLE(1, 3, 0, 1)); + vResult = XM_FNMADD_PS(vTemp2, vTemp3, vResult); + // term1 * V1yxxx + XMVECTOR vTemp1 = XM_PERMUTE_PS(V1, _MM_SHUFFLE(0, 0, 0, 1)); + vResult = _mm_mul_ps(vResult, vTemp1); + + // V2ywxz * V3wxwx + vTemp2 = XM_PERMUTE_PS(V2, _MM_SHUFFLE(2, 0, 3, 1)); + vTemp3 = XM_PERMUTE_PS(V3, _MM_SHUFFLE(0, 3, 0, 3)); + vTemp3 = _mm_mul_ps(vTemp3, vTemp2); + // - V2wxwx * V3ywxz + vTemp2 = XM_PERMUTE_PS(vTemp2, _MM_SHUFFLE(2, 1, 2, 1)); + vTemp1 = XM_PERMUTE_PS(V3, _MM_SHUFFLE(2, 0, 3, 1)); + vTemp3 = XM_FNMADD_PS(vTemp2, vTemp1, vTemp3); + // vResult - temp * V1zzyy + vTemp1 = XM_PERMUTE_PS(V1, _MM_SHUFFLE(1, 1, 2, 2)); + vResult = XM_FNMADD_PS(vTemp1, vTemp3, vResult); + + // V2yzxy * V3zxyx + vTemp2 = XM_PERMUTE_PS(V2, _MM_SHUFFLE(1, 0, 2, 1)); + vTemp3 = XM_PERMUTE_PS(V3, _MM_SHUFFLE(0, 1, 0, 2)); + vTemp3 = _mm_mul_ps(vTemp3, vTemp2); + // - V2zxyx * V3yzxy + vTemp2 = XM_PERMUTE_PS(vTemp2, _MM_SHUFFLE(2, 0, 2, 1)); + vTemp1 = XM_PERMUTE_PS(V3, _MM_SHUFFLE(1, 0, 2, 1)); + vTemp3 = XM_FNMADD_PS(vTemp1, vTemp2, vTemp3); + // vResult + term * V1wwwz + vTemp1 = XM_PERMUTE_PS(V1, _MM_SHUFFLE(2, 3, 3, 3)); + vResult = XM_FMADD_PS(vTemp3, vTemp1, vResult); + return vResult; +#endif +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMVector4LengthSq(FXMVECTOR V) noexcept +{ + return XMVector4Dot(V, V); +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMVector4ReciprocalLengthEst(FXMVECTOR V) noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + + XMVECTOR Result; + + Result = XMVector4LengthSq(V); + Result = XMVectorReciprocalSqrtEst(Result); + + return Result; + +#elif defined(_XM_ARM_NEON_INTRINSICS_) + // Dot4 + float32x4_t vTemp = vmulq_f32(V, V); + float32x2_t v1 = vget_low_f32(vTemp); + float32x2_t v2 = vget_high_f32(vTemp); + v1 = vadd_f32(v1, v2); + v1 = vpadd_f32(v1, v1); + // Reciprocal sqrt (estimate) + v2 = vrsqrte_f32(v1); + return vcombine_f32(v2, v2); +#elif defined(_XM_SSE4_INTRINSICS_) + XMVECTOR vTemp = _mm_dp_ps(V, V, 0xff); + return _mm_rsqrt_ps(vTemp); +#elif defined(_XM_SSE3_INTRINSICS_) + XMVECTOR vLengthSq = _mm_mul_ps(V, V); + vLengthSq = _mm_hadd_ps(vLengthSq, vLengthSq); + vLengthSq = _mm_hadd_ps(vLengthSq, vLengthSq); + vLengthSq = _mm_rsqrt_ps(vLengthSq); + return vLengthSq; +#elif defined(_XM_SSE_INTRINSICS_) + // Perform the dot product on x,y,z and w + XMVECTOR vLengthSq = _mm_mul_ps(V, V); + // vTemp has z and w + XMVECTOR vTemp = XM_PERMUTE_PS(vLengthSq, _MM_SHUFFLE(3, 2, 3, 2)); + // x+z, y+w + vLengthSq = _mm_add_ps(vLengthSq, vTemp); + // x+z,x+z,x+z,y+w + vLengthSq = XM_PERMUTE_PS(vLengthSq, _MM_SHUFFLE(1, 0, 0, 0)); + // ??,??,y+w,y+w + vTemp = _mm_shuffle_ps(vTemp, vLengthSq, _MM_SHUFFLE(3, 3, 0, 0)); + // ??,??,x+z+y+w,?? + vLengthSq = _mm_add_ps(vLengthSq, vTemp); + // Splat the length + vLengthSq = XM_PERMUTE_PS(vLengthSq, _MM_SHUFFLE(2, 2, 2, 2)); + // Get the reciprocal + vLengthSq = _mm_rsqrt_ps(vLengthSq); + return vLengthSq; +#endif +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMVector4ReciprocalLength(FXMVECTOR V) noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + + XMVECTOR Result; + + Result = XMVector4LengthSq(V); + Result = XMVectorReciprocalSqrt(Result); + + return Result; + +#elif defined(_XM_ARM_NEON_INTRINSICS_) + // Dot4 + float32x4_t vTemp = vmulq_f32(V, V); + float32x2_t v1 = vget_low_f32(vTemp); + float32x2_t v2 = vget_high_f32(vTemp); + v1 = vadd_f32(v1, v2); + v1 = vpadd_f32(v1, v1); + // Reciprocal sqrt + float32x2_t S0 = vrsqrte_f32(v1); + float32x2_t P0 = vmul_f32(v1, S0); + float32x2_t R0 = vrsqrts_f32(P0, S0); + float32x2_t S1 = vmul_f32(S0, R0); + float32x2_t P1 = vmul_f32(v1, S1); + float32x2_t R1 = vrsqrts_f32(P1, S1); + float32x2_t Result = vmul_f32(S1, R1); + return vcombine_f32(Result, Result); +#elif defined(_XM_SSE4_INTRINSICS_) + XMVECTOR vTemp = _mm_dp_ps(V, V, 0xff); + XMVECTOR vLengthSq = _mm_sqrt_ps(vTemp); + return _mm_div_ps(g_XMOne, vLengthSq); +#elif defined(_XM_SSE3_INTRINSICS_) + XMVECTOR vLengthSq = _mm_mul_ps(V, V); + vLengthSq = _mm_hadd_ps(vLengthSq, vLengthSq); + vLengthSq = _mm_hadd_ps(vLengthSq, vLengthSq); + vLengthSq = _mm_sqrt_ps(vLengthSq); + vLengthSq = _mm_div_ps(g_XMOne, vLengthSq); + return vLengthSq; +#elif defined(_XM_SSE_INTRINSICS_) + // Perform the dot product on x,y,z and w + XMVECTOR vLengthSq = _mm_mul_ps(V, V); + // vTemp has z and w + XMVECTOR vTemp = XM_PERMUTE_PS(vLengthSq, _MM_SHUFFLE(3, 2, 3, 2)); + // x+z, y+w + vLengthSq = _mm_add_ps(vLengthSq, vTemp); + // x+z,x+z,x+z,y+w + vLengthSq = XM_PERMUTE_PS(vLengthSq, _MM_SHUFFLE(1, 0, 0, 0)); + // ??,??,y+w,y+w + vTemp = _mm_shuffle_ps(vTemp, vLengthSq, _MM_SHUFFLE(3, 3, 0, 0)); + // ??,??,x+z+y+w,?? + vLengthSq = _mm_add_ps(vLengthSq, vTemp); + // Splat the length + vLengthSq = XM_PERMUTE_PS(vLengthSq, _MM_SHUFFLE(2, 2, 2, 2)); + // Get the reciprocal + vLengthSq = _mm_sqrt_ps(vLengthSq); + // Accurate! + vLengthSq = _mm_div_ps(g_XMOne, vLengthSq); + return vLengthSq; +#endif +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMVector4LengthEst(FXMVECTOR V) noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + + XMVECTOR Result; + + Result = XMVector4LengthSq(V); + Result = XMVectorSqrtEst(Result); + + return Result; + +#elif defined(_XM_ARM_NEON_INTRINSICS_) + // Dot4 + float32x4_t vTemp = vmulq_f32(V, V); + float32x2_t v1 = vget_low_f32(vTemp); + float32x2_t v2 = vget_high_f32(vTemp); + v1 = vadd_f32(v1, v2); + v1 = vpadd_f32(v1, v1); + const float32x2_t zero = vdup_n_f32(0); + uint32x2_t VEqualsZero = vceq_f32(v1, zero); + // Sqrt (estimate) + float32x2_t Result = vrsqrte_f32(v1); + Result = vmul_f32(v1, Result); + Result = vbsl_f32(VEqualsZero, zero, Result); + return vcombine_f32(Result, Result); +#elif defined(_XM_SSE4_INTRINSICS_) + XMVECTOR vTemp = _mm_dp_ps(V, V, 0xff); + return _mm_sqrt_ps(vTemp); +#elif defined(_XM_SSE3_INTRINSICS_) + XMVECTOR vLengthSq = _mm_mul_ps(V, V); + vLengthSq = _mm_hadd_ps(vLengthSq, vLengthSq); + vLengthSq = _mm_hadd_ps(vLengthSq, vLengthSq); + vLengthSq = _mm_sqrt_ps(vLengthSq); + return vLengthSq; +#elif defined(_XM_SSE_INTRINSICS_) + // Perform the dot product on x,y,z and w + XMVECTOR vLengthSq = _mm_mul_ps(V, V); + // vTemp has z and w + XMVECTOR vTemp = XM_PERMUTE_PS(vLengthSq, _MM_SHUFFLE(3, 2, 3, 2)); + // x+z, y+w + vLengthSq = _mm_add_ps(vLengthSq, vTemp); + // x+z,x+z,x+z,y+w + vLengthSq = XM_PERMUTE_PS(vLengthSq, _MM_SHUFFLE(1, 0, 0, 0)); + // ??,??,y+w,y+w + vTemp = _mm_shuffle_ps(vTemp, vLengthSq, _MM_SHUFFLE(3, 3, 0, 0)); + // ??,??,x+z+y+w,?? + vLengthSq = _mm_add_ps(vLengthSq, vTemp); + // Splat the length + vLengthSq = XM_PERMUTE_PS(vLengthSq, _MM_SHUFFLE(2, 2, 2, 2)); + // Get the length + vLengthSq = _mm_sqrt_ps(vLengthSq); + return vLengthSq; +#endif +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMVector4Length(FXMVECTOR V) noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + + XMVECTOR Result; + + Result = XMVector4LengthSq(V); + Result = XMVectorSqrt(Result); + + return Result; + +#elif defined(_XM_ARM_NEON_INTRINSICS_) + // Dot4 + float32x4_t vTemp = vmulq_f32(V, V); + float32x2_t v1 = vget_low_f32(vTemp); + float32x2_t v2 = vget_high_f32(vTemp); + v1 = vadd_f32(v1, v2); + v1 = vpadd_f32(v1, v1); + const float32x2_t zero = vdup_n_f32(0); + uint32x2_t VEqualsZero = vceq_f32(v1, zero); + // Sqrt + float32x2_t S0 = vrsqrte_f32(v1); + float32x2_t P0 = vmul_f32(v1, S0); + float32x2_t R0 = vrsqrts_f32(P0, S0); + float32x2_t S1 = vmul_f32(S0, R0); + float32x2_t P1 = vmul_f32(v1, S1); + float32x2_t R1 = vrsqrts_f32(P1, S1); + float32x2_t Result = vmul_f32(S1, R1); + Result = vmul_f32(v1, Result); + Result = vbsl_f32(VEqualsZero, zero, Result); + return vcombine_f32(Result, Result); +#elif defined(_XM_SSE4_INTRINSICS_) + XMVECTOR vTemp = _mm_dp_ps(V, V, 0xff); + return _mm_sqrt_ps(vTemp); +#elif defined(_XM_SSE3_INTRINSICS_) + XMVECTOR vLengthSq = _mm_mul_ps(V, V); + vLengthSq = _mm_hadd_ps(vLengthSq, vLengthSq); + vLengthSq = _mm_hadd_ps(vLengthSq, vLengthSq); + vLengthSq = _mm_sqrt_ps(vLengthSq); + return vLengthSq; +#elif defined(_XM_SSE_INTRINSICS_) + // Perform the dot product on x,y,z and w + XMVECTOR vLengthSq = _mm_mul_ps(V, V); + // vTemp has z and w + XMVECTOR vTemp = XM_PERMUTE_PS(vLengthSq, _MM_SHUFFLE(3, 2, 3, 2)); + // x+z, y+w + vLengthSq = _mm_add_ps(vLengthSq, vTemp); + // x+z,x+z,x+z,y+w + vLengthSq = XM_PERMUTE_PS(vLengthSq, _MM_SHUFFLE(1, 0, 0, 0)); + // ??,??,y+w,y+w + vTemp = _mm_shuffle_ps(vTemp, vLengthSq, _MM_SHUFFLE(3, 3, 0, 0)); + // ??,??,x+z+y+w,?? + vLengthSq = _mm_add_ps(vLengthSq, vTemp); + // Splat the length + vLengthSq = XM_PERMUTE_PS(vLengthSq, _MM_SHUFFLE(2, 2, 2, 2)); + // Get the length + vLengthSq = _mm_sqrt_ps(vLengthSq); + return vLengthSq; +#endif +} + +//------------------------------------------------------------------------------ +// XMVector4NormalizeEst uses a reciprocal estimate and +// returns QNaN on zero and infinite vectors. + +inline XMVECTOR XM_CALLCONV XMVector4NormalizeEst(FXMVECTOR V) noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + + XMVECTOR Result; + Result = XMVector4ReciprocalLength(V); + Result = XMVectorMultiply(V, Result); + return Result; + +#elif defined(_XM_ARM_NEON_INTRINSICS_) + // Dot4 + float32x4_t vTemp = vmulq_f32(V, V); + float32x2_t v1 = vget_low_f32(vTemp); + float32x2_t v2 = vget_high_f32(vTemp); + v1 = vadd_f32(v1, v2); + v1 = vpadd_f32(v1, v1); + // Reciprocal sqrt (estimate) + v2 = vrsqrte_f32(v1); + // Normalize + return vmulq_f32(V, vcombine_f32(v2, v2)); +#elif defined(_XM_SSE4_INTRINSICS_) + XMVECTOR vTemp = _mm_dp_ps(V, V, 0xff); + XMVECTOR vResult = _mm_rsqrt_ps(vTemp); + return _mm_mul_ps(vResult, V); +#elif defined(_XM_SSE3_INTRINSICS_) + XMVECTOR vDot = _mm_mul_ps(V, V); + vDot = _mm_hadd_ps(vDot, vDot); + vDot = _mm_hadd_ps(vDot, vDot); + vDot = _mm_rsqrt_ps(vDot); + vDot = _mm_mul_ps(vDot, V); + return vDot; +#elif defined(_XM_SSE_INTRINSICS_) + // Perform the dot product on x,y,z and w + XMVECTOR vLengthSq = _mm_mul_ps(V, V); + // vTemp has z and w + XMVECTOR vTemp = XM_PERMUTE_PS(vLengthSq, _MM_SHUFFLE(3, 2, 3, 2)); + // x+z, y+w + vLengthSq = _mm_add_ps(vLengthSq, vTemp); + // x+z,x+z,x+z,y+w + vLengthSq = XM_PERMUTE_PS(vLengthSq, _MM_SHUFFLE(1, 0, 0, 0)); + // ??,??,y+w,y+w + vTemp = _mm_shuffle_ps(vTemp, vLengthSq, _MM_SHUFFLE(3, 3, 0, 0)); + // ??,??,x+z+y+w,?? + vLengthSq = _mm_add_ps(vLengthSq, vTemp); + // Splat the length + vLengthSq = XM_PERMUTE_PS(vLengthSq, _MM_SHUFFLE(2, 2, 2, 2)); + // Get the reciprocal + XMVECTOR vResult = _mm_rsqrt_ps(vLengthSq); + // Reciprocal mul to perform the normalization + vResult = _mm_mul_ps(vResult, V); + return vResult; +#endif +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMVector4Normalize(FXMVECTOR V) noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + float fLength; + XMVECTOR vResult; + + vResult = XMVector4Length(V); + fLength = vResult.vector4_f32[0]; + + // Prevent divide by zero + if (fLength > 0) + { + fLength = 1.0f / fLength; + } + + vResult.vector4_f32[0] = V.vector4_f32[0] * fLength; + vResult.vector4_f32[1] = V.vector4_f32[1] * fLength; + vResult.vector4_f32[2] = V.vector4_f32[2] * fLength; + vResult.vector4_f32[3] = V.vector4_f32[3] * fLength; + return vResult; + +#elif defined(_XM_ARM_NEON_INTRINSICS_) + // Dot4 + float32x4_t vTemp = vmulq_f32(V, V); + float32x2_t v1 = vget_low_f32(vTemp); + float32x2_t v2 = vget_high_f32(vTemp); + v1 = vadd_f32(v1, v2); + v1 = vpadd_f32(v1, v1); + uint32x2_t VEqualsZero = vceq_f32(v1, vdup_n_f32(0)); + uint32x2_t VEqualsInf = vceq_f32(v1, vget_low_f32(g_XMInfinity)); + // Reciprocal sqrt (2 iterations of Newton-Raphson) + float32x2_t S0 = vrsqrte_f32(v1); + float32x2_t P0 = vmul_f32(v1, S0); + float32x2_t R0 = vrsqrts_f32(P0, S0); + float32x2_t S1 = vmul_f32(S0, R0); + float32x2_t P1 = vmul_f32(v1, S1); + float32x2_t R1 = vrsqrts_f32(P1, S1); + v2 = vmul_f32(S1, R1); + // Normalize + XMVECTOR vResult = vmulq_f32(V, vcombine_f32(v2, v2)); + vResult = vbslq_f32(vcombine_f32(VEqualsZero, VEqualsZero), vdupq_n_f32(0), vResult); + return vbslq_f32(vcombine_f32(VEqualsInf, VEqualsInf), g_XMQNaN, vResult); +#elif defined(_XM_SSE4_INTRINSICS_) + XMVECTOR vLengthSq = _mm_dp_ps(V, V, 0xff); + // Prepare for the division + XMVECTOR vResult = _mm_sqrt_ps(vLengthSq); + // Create zero with a single instruction + XMVECTOR vZeroMask = _mm_setzero_ps(); + // Test for a divide by zero (Must be FP to detect -0.0) + vZeroMask = _mm_cmpneq_ps(vZeroMask, vResult); + // Failsafe on zero (Or epsilon) length planes + // If the length is infinity, set the elements to zero + vLengthSq = _mm_cmpneq_ps(vLengthSq, g_XMInfinity); + // Divide to perform the normalization + vResult = _mm_div_ps(V, vResult); + // Any that are infinity, set to zero + vResult = _mm_and_ps(vResult, vZeroMask); + // Select qnan or result based on infinite length + XMVECTOR vTemp1 = _mm_andnot_ps(vLengthSq, g_XMQNaN); + XMVECTOR vTemp2 = _mm_and_ps(vResult, vLengthSq); + vResult = _mm_or_ps(vTemp1, vTemp2); + return vResult; +#elif defined(_XM_SSE3_INTRINSICS_) + // Perform the dot product on x,y,z and w + XMVECTOR vLengthSq = _mm_mul_ps(V, V); + vLengthSq = _mm_hadd_ps(vLengthSq, vLengthSq); + vLengthSq = _mm_hadd_ps(vLengthSq, vLengthSq); + // Prepare for the division + XMVECTOR vResult = _mm_sqrt_ps(vLengthSq); + // Create zero with a single instruction + XMVECTOR vZeroMask = _mm_setzero_ps(); + // Test for a divide by zero (Must be FP to detect -0.0) + vZeroMask = _mm_cmpneq_ps(vZeroMask, vResult); + // Failsafe on zero (Or epsilon) length planes + // If the length is infinity, set the elements to zero + vLengthSq = _mm_cmpneq_ps(vLengthSq, g_XMInfinity); + // Divide to perform the normalization + vResult = _mm_div_ps(V, vResult); + // Any that are infinity, set to zero + vResult = _mm_and_ps(vResult, vZeroMask); + // Select qnan or result based on infinite length + XMVECTOR vTemp1 = _mm_andnot_ps(vLengthSq, g_XMQNaN); + XMVECTOR vTemp2 = _mm_and_ps(vResult, vLengthSq); + vResult = _mm_or_ps(vTemp1, vTemp2); + return vResult; +#elif defined(_XM_SSE_INTRINSICS_) + // Perform the dot product on x,y,z and w + XMVECTOR vLengthSq = _mm_mul_ps(V, V); + // vTemp has z and w + XMVECTOR vTemp = XM_PERMUTE_PS(vLengthSq, _MM_SHUFFLE(3, 2, 3, 2)); + // x+z, y+w + vLengthSq = _mm_add_ps(vLengthSq, vTemp); + // x+z,x+z,x+z,y+w + vLengthSq = XM_PERMUTE_PS(vLengthSq, _MM_SHUFFLE(1, 0, 0, 0)); + // ??,??,y+w,y+w + vTemp = _mm_shuffle_ps(vTemp, vLengthSq, _MM_SHUFFLE(3, 3, 0, 0)); + // ??,??,x+z+y+w,?? + vLengthSq = _mm_add_ps(vLengthSq, vTemp); + // Splat the length + vLengthSq = XM_PERMUTE_PS(vLengthSq, _MM_SHUFFLE(2, 2, 2, 2)); + // Prepare for the division + XMVECTOR vResult = _mm_sqrt_ps(vLengthSq); + // Create zero with a single instruction + XMVECTOR vZeroMask = _mm_setzero_ps(); + // Test for a divide by zero (Must be FP to detect -0.0) + vZeroMask = _mm_cmpneq_ps(vZeroMask, vResult); + // Failsafe on zero (Or epsilon) length planes + // If the length is infinity, set the elements to zero + vLengthSq = _mm_cmpneq_ps(vLengthSq, g_XMInfinity); + // Divide to perform the normalization + vResult = _mm_div_ps(V, vResult); + // Any that are infinity, set to zero + vResult = _mm_and_ps(vResult, vZeroMask); + // Select qnan or result based on infinite length + XMVECTOR vTemp1 = _mm_andnot_ps(vLengthSq, g_XMQNaN); + XMVECTOR vTemp2 = _mm_and_ps(vResult, vLengthSq); + vResult = _mm_or_ps(vTemp1, vTemp2); + return vResult; +#endif +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMVector4ClampLength +( + FXMVECTOR V, + float LengthMin, + float LengthMax +) noexcept +{ + XMVECTOR ClampMax = XMVectorReplicate(LengthMax); + XMVECTOR ClampMin = XMVectorReplicate(LengthMin); + + return XMVector4ClampLengthV(V, ClampMin, ClampMax); +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMVector4ClampLengthV +( + FXMVECTOR V, + FXMVECTOR LengthMin, + FXMVECTOR LengthMax +) noexcept +{ + assert((XMVectorGetY(LengthMin) == XMVectorGetX(LengthMin)) && (XMVectorGetZ(LengthMin) == XMVectorGetX(LengthMin)) && (XMVectorGetW(LengthMin) == XMVectorGetX(LengthMin))); + assert((XMVectorGetY(LengthMax) == XMVectorGetX(LengthMax)) && (XMVectorGetZ(LengthMax) == XMVectorGetX(LengthMax)) && (XMVectorGetW(LengthMax) == XMVectorGetX(LengthMax))); + assert(XMVector4GreaterOrEqual(LengthMin, XMVectorZero())); + assert(XMVector4GreaterOrEqual(LengthMax, XMVectorZero())); + assert(XMVector4GreaterOrEqual(LengthMax, LengthMin)); + + XMVECTOR LengthSq = XMVector4LengthSq(V); + + const XMVECTOR Zero = XMVectorZero(); + + XMVECTOR RcpLength = XMVectorReciprocalSqrt(LengthSq); + + XMVECTOR InfiniteLength = XMVectorEqualInt(LengthSq, g_XMInfinity.v); + XMVECTOR ZeroLength = XMVectorEqual(LengthSq, Zero); + + XMVECTOR Normal = XMVectorMultiply(V, RcpLength); + + XMVECTOR Length = XMVectorMultiply(LengthSq, RcpLength); + + XMVECTOR Select = XMVectorEqualInt(InfiniteLength, ZeroLength); + Length = XMVectorSelect(LengthSq, Length, Select); + Normal = XMVectorSelect(LengthSq, Normal, Select); + + XMVECTOR ControlMax = XMVectorGreater(Length, LengthMax); + XMVECTOR ControlMin = XMVectorLess(Length, LengthMin); + + XMVECTOR ClampLength = XMVectorSelect(Length, LengthMax, ControlMax); + ClampLength = XMVectorSelect(ClampLength, LengthMin, ControlMin); + + XMVECTOR Result = XMVectorMultiply(Normal, ClampLength); + + // Preserve the original vector (with no precision loss) if the length falls within the given range + XMVECTOR Control = XMVectorEqualInt(ControlMax, ControlMin); + Result = XMVectorSelect(Result, V, Control); + + return Result; +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMVector4Reflect +( + FXMVECTOR Incident, + FXMVECTOR Normal +) noexcept +{ + // Result = Incident - (2 * dot(Incident, Normal)) * Normal + + XMVECTOR Result = XMVector4Dot(Incident, Normal); + Result = XMVectorAdd(Result, Result); + Result = XMVectorNegativeMultiplySubtract(Result, Normal, Incident); + + return Result; +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMVector4Refract +( + FXMVECTOR Incident, + FXMVECTOR Normal, + float RefractionIndex +) noexcept +{ + XMVECTOR Index = XMVectorReplicate(RefractionIndex); + return XMVector4RefractV(Incident, Normal, Index); +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMVector4RefractV +( + FXMVECTOR Incident, + FXMVECTOR Normal, + FXMVECTOR RefractionIndex +) noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + + XMVECTOR IDotN; + XMVECTOR R; + const XMVECTOR Zero = XMVectorZero(); + + // Result = RefractionIndex * Incident - Normal * (RefractionIndex * dot(Incident, Normal) + + // sqrt(1 - RefractionIndex * RefractionIndex * (1 - dot(Incident, Normal) * dot(Incident, Normal)))) + + IDotN = XMVector4Dot(Incident, Normal); + + // R = 1.0f - RefractionIndex * RefractionIndex * (1.0f - IDotN * IDotN) + R = XMVectorNegativeMultiplySubtract(IDotN, IDotN, g_XMOne.v); + R = XMVectorMultiply(R, RefractionIndex); + R = XMVectorNegativeMultiplySubtract(R, RefractionIndex, g_XMOne.v); + + if (XMVector4LessOrEqual(R, Zero)) + { + // Total internal reflection + return Zero; + } + else + { + XMVECTOR Result; + + // R = RefractionIndex * IDotN + sqrt(R) + R = XMVectorSqrt(R); + R = XMVectorMultiplyAdd(RefractionIndex, IDotN, R); + + // Result = RefractionIndex * Incident - Normal * R + Result = XMVectorMultiply(RefractionIndex, Incident); + Result = XMVectorNegativeMultiplySubtract(Normal, R, Result); + + return Result; + } + +#elif defined(_XM_ARM_NEON_INTRINSICS_) + XMVECTOR IDotN = XMVector4Dot(Incident, Normal); + + // R = 1.0f - RefractionIndex * RefractionIndex * (1.0f - IDotN * IDotN) + float32x4_t R = vmlsq_f32(g_XMOne, IDotN, IDotN); + R = vmulq_f32(R, RefractionIndex); + R = vmlsq_f32(g_XMOne, R, RefractionIndex); + + uint32x4_t vResult = vcleq_f32(R, g_XMZero); + uint8x8x2_t vTemp = vzip_u8(vget_low_u8(vResult), vget_high_u8(vResult)); + uint16x4x2_t vTemp2 = vzip_u16(vTemp.val[0], vTemp.val[1]); + if (vget_lane_u32(vTemp2.val[1], 1) == 0xFFFFFFFFU) + { + // Total internal reflection + vResult = g_XMZero; + } + else + { + // Sqrt(R) + float32x4_t S0 = vrsqrteq_f32(R); + float32x4_t P0 = vmulq_f32(R, S0); + float32x4_t R0 = vrsqrtsq_f32(P0, S0); + float32x4_t S1 = vmulq_f32(S0, R0); + float32x4_t P1 = vmulq_f32(R, S1); + float32x4_t R1 = vrsqrtsq_f32(P1, S1); + float32x4_t S2 = vmulq_f32(S1, R1); + R = vmulq_f32(R, S2); + // R = RefractionIndex * IDotN + sqrt(R) + R = vmlaq_f32(R, RefractionIndex, IDotN); + // Result = RefractionIndex * Incident - Normal * R + vResult = vmulq_f32(RefractionIndex, Incident); + vResult = vmlsq_f32(vResult, R, Normal); + } + return vResult; +#elif defined(_XM_SSE_INTRINSICS_) + XMVECTOR IDotN = XMVector4Dot(Incident, Normal); + + // R = 1.0f - RefractionIndex * RefractionIndex * (1.0f - IDotN * IDotN) + XMVECTOR R = XM_FNMADD_PS(IDotN, IDotN, g_XMOne); + XMVECTOR R2 = _mm_mul_ps(RefractionIndex, RefractionIndex); + R = XM_FNMADD_PS(R, R2, g_XMOne); + + XMVECTOR vResult = _mm_cmple_ps(R, g_XMZero); + if (_mm_movemask_ps(vResult) == 0x0f) + { + // Total internal reflection + vResult = g_XMZero; + } + else + { + // R = RefractionIndex * IDotN + sqrt(R) + R = _mm_sqrt_ps(R); + R = XM_FMADD_PS(RefractionIndex, IDotN, R); + // Result = RefractionIndex * Incident - Normal * R + vResult = _mm_mul_ps(RefractionIndex, Incident); + vResult = XM_FNMADD_PS(R, Normal, vResult); + } + return vResult; +#endif +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMVector4Orthogonal(FXMVECTOR V) noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + + XMVECTORF32 Result = { { { + V.vector4_f32[2], + V.vector4_f32[3], + -V.vector4_f32[0], + -V.vector4_f32[1] + } } }; + return Result.v; + +#elif defined(_XM_ARM_NEON_INTRINSICS_) + static const XMVECTORF32 Negate = { { { 1.f, 1.f, -1.f, -1.f } } }; + + float32x4_t Result = vcombine_f32(vget_high_f32(V), vget_low_f32(V)); + return vmulq_f32(Result, Negate); +#elif defined(_XM_SSE_INTRINSICS_) + static const XMVECTORF32 FlipZW = { { { 1.0f, 1.0f, -1.0f, -1.0f } } }; + XMVECTOR vResult = XM_PERMUTE_PS(V, _MM_SHUFFLE(1, 0, 3, 2)); + vResult = _mm_mul_ps(vResult, FlipZW); + return vResult; +#endif +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMVector4AngleBetweenNormalsEst +( + FXMVECTOR N1, + FXMVECTOR N2 +) noexcept +{ + XMVECTOR Result = XMVector4Dot(N1, N2); + Result = XMVectorClamp(Result, g_XMNegativeOne.v, g_XMOne.v); + Result = XMVectorACosEst(Result); + return Result; +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMVector4AngleBetweenNormals +( + FXMVECTOR N1, + FXMVECTOR N2 +) noexcept +{ + XMVECTOR Result = XMVector4Dot(N1, N2); + Result = XMVectorClamp(Result, g_XMNegativeOne.v, g_XMOne.v); + Result = XMVectorACos(Result); + return Result; +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMVector4AngleBetweenVectors +( + FXMVECTOR V1, + FXMVECTOR V2 +) noexcept +{ + XMVECTOR L1 = XMVector4ReciprocalLength(V1); + XMVECTOR L2 = XMVector4ReciprocalLength(V2); + + XMVECTOR Dot = XMVector4Dot(V1, V2); + + L1 = XMVectorMultiply(L1, L2); + + XMVECTOR CosAngle = XMVectorMultiply(Dot, L1); + CosAngle = XMVectorClamp(CosAngle, g_XMNegativeOne.v, g_XMOne.v); + + return XMVectorACos(CosAngle); +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV XMVector4Transform +( + FXMVECTOR V, + FXMMATRIX M +) noexcept +{ +#if defined(_XM_NO_INTRINSICS_) + + float fX = (M.m[0][0] * V.vector4_f32[0]) + (M.m[1][0] * V.vector4_f32[1]) + (M.m[2][0] * V.vector4_f32[2]) + (M.m[3][0] * V.vector4_f32[3]); + float fY = (M.m[0][1] * V.vector4_f32[0]) + (M.m[1][1] * V.vector4_f32[1]) + (M.m[2][1] * V.vector4_f32[2]) + (M.m[3][1] * V.vector4_f32[3]); + float fZ = (M.m[0][2] * V.vector4_f32[0]) + (M.m[1][2] * V.vector4_f32[1]) + (M.m[2][2] * V.vector4_f32[2]) + (M.m[3][2] * V.vector4_f32[3]); + float fW = (M.m[0][3] * V.vector4_f32[0]) + (M.m[1][3] * V.vector4_f32[1]) + (M.m[2][3] * V.vector4_f32[2]) + (M.m[3][3] * V.vector4_f32[3]); + XMVECTORF32 vResult = { { { fX, fY, fZ, fW } } }; + return vResult.v; + +#elif defined(_XM_ARM_NEON_INTRINSICS_) + float32x2_t VL = vget_low_f32(V); + XMVECTOR vResult = vmulq_lane_f32(M.r[0], VL, 0); // X + vResult = vmlaq_lane_f32(vResult, M.r[1], VL, 1); // Y + float32x2_t VH = vget_high_f32(V); + vResult = vmlaq_lane_f32(vResult, M.r[2], VH, 0); // Z + return vmlaq_lane_f32(vResult, M.r[3], VH, 1); // W +#elif defined(_XM_SSE_INTRINSICS_) + XMVECTOR vResult = XM_PERMUTE_PS(V, _MM_SHUFFLE(3, 3, 3, 3)); // W + vResult = _mm_mul_ps(vResult, M.r[3]); + XMVECTOR vTemp = XM_PERMUTE_PS(V, _MM_SHUFFLE(2, 2, 2, 2)); // Z + vResult = XM_FMADD_PS(vTemp, M.r[2], vResult); + vTemp = XM_PERMUTE_PS(V, _MM_SHUFFLE(1, 1, 1, 1)); // Y + vResult = XM_FMADD_PS(vTemp, M.r[1], vResult); + vTemp = XM_PERMUTE_PS(V, _MM_SHUFFLE(0, 0, 0, 0)); // X + vResult = XM_FMADD_PS(vTemp, M.r[0], vResult); + return vResult; +#endif +} + +//------------------------------------------------------------------------------ +_Use_decl_annotations_ +inline XMFLOAT4* XM_CALLCONV XMVector4TransformStream +( + XMFLOAT4* pOutputStream, + size_t OutputStride, + const XMFLOAT4* pInputStream, + size_t InputStride, + size_t VectorCount, + FXMMATRIX M +) noexcept +{ + assert(pOutputStream != nullptr); + assert(pInputStream != nullptr); + + assert(InputStride >= sizeof(XMFLOAT4)); + _Analysis_assume_(InputStride >= sizeof(XMFLOAT4)); + + assert(OutputStride >= sizeof(XMFLOAT4)); + _Analysis_assume_(OutputStride >= sizeof(XMFLOAT4)); + +#if defined(_XM_NO_INTRINSICS_) + + auto pInputVector = reinterpret_cast(pInputStream); + auto pOutputVector = reinterpret_cast(pOutputStream); + + const XMVECTOR row0 = M.r[0]; + const XMVECTOR row1 = M.r[1]; + const XMVECTOR row2 = M.r[2]; + const XMVECTOR row3 = M.r[3]; + + for (size_t i = 0; i < VectorCount; i++) + { + XMVECTOR V = XMLoadFloat4(reinterpret_cast(pInputVector)); + XMVECTOR W = XMVectorSplatW(V); + XMVECTOR Z = XMVectorSplatZ(V); + XMVECTOR Y = XMVectorSplatY(V); + XMVECTOR X = XMVectorSplatX(V); + + XMVECTOR Result = XMVectorMultiply(W, row3); + Result = XMVectorMultiplyAdd(Z, row2, Result); + Result = XMVectorMultiplyAdd(Y, row1, Result); + Result = XMVectorMultiplyAdd(X, row0, Result); + +#ifdef _PREFAST_ +#pragma prefast(push) +#pragma prefast(disable : 26015, "PREfast noise: Esp:1307" ) +#endif + + XMStoreFloat4(reinterpret_cast(pOutputVector), Result); + +#ifdef _PREFAST_ +#pragma prefast(pop) +#endif + + pInputVector += InputStride; + pOutputVector += OutputStride; + } + + return pOutputStream; + +#elif defined(_XM_ARM_NEON_INTRINSICS_) + auto pInputVector = reinterpret_cast(pInputStream); + auto pOutputVector = reinterpret_cast(pOutputStream); + + const XMVECTOR row0 = M.r[0]; + const XMVECTOR row1 = M.r[1]; + const XMVECTOR row2 = M.r[2]; + const XMVECTOR row3 = M.r[3]; + + size_t i = 0; + size_t four = VectorCount >> 2; + if (four > 0) + { + if ((InputStride == sizeof(XMFLOAT4)) && (OutputStride == sizeof(XMFLOAT4))) + { + for (size_t j = 0; j < four; ++j) + { + float32x4x4_t V = vld4q_f32(reinterpret_cast(pInputVector)); + pInputVector += sizeof(XMFLOAT4) * 4; + + float32x2_t r = vget_low_f32(row0); + XMVECTOR vResult0 = vmulq_lane_f32(V.val[0], r, 0); // Ax + XMVECTOR vResult1 = vmulq_lane_f32(V.val[0], r, 1); // Bx + + XM_PREFETCH(pInputVector); + + r = vget_high_f32(row0); + XMVECTOR vResult2 = vmulq_lane_f32(V.val[0], r, 0); // Cx + XMVECTOR vResult3 = vmulq_lane_f32(V.val[0], r, 1); // Dx + + XM_PREFETCH(pInputVector + XM_CACHE_LINE_SIZE); + + r = vget_low_f32(row1); + vResult0 = vmlaq_lane_f32(vResult0, V.val[1], r, 0); // Ax+Ey + vResult1 = vmlaq_lane_f32(vResult1, V.val[1], r, 1); // Bx+Fy + + XM_PREFETCH(pInputVector + (XM_CACHE_LINE_SIZE * 2)); + + r = vget_high_f32(row1); + vResult2 = vmlaq_lane_f32(vResult2, V.val[1], r, 0); // Cx+Gy + vResult3 = vmlaq_lane_f32(vResult3, V.val[1], r, 1); // Dx+Hy + + XM_PREFETCH(pInputVector + (XM_CACHE_LINE_SIZE * 3)); + + r = vget_low_f32(row2); + vResult0 = vmlaq_lane_f32(vResult0, V.val[2], r, 0); // Ax+Ey+Iz + vResult1 = vmlaq_lane_f32(vResult1, V.val[2], r, 1); // Bx+Fy+Jz + + XM_PREFETCH(pInputVector + (XM_CACHE_LINE_SIZE * 4)); + + r = vget_high_f32(row2); + vResult2 = vmlaq_lane_f32(vResult2, V.val[2], r, 0); // Cx+Gy+Kz + vResult3 = vmlaq_lane_f32(vResult3, V.val[2], r, 1); // Dx+Hy+Lz + + XM_PREFETCH(pInputVector + (XM_CACHE_LINE_SIZE * 5)); + + r = vget_low_f32(row3); + vResult0 = vmlaq_lane_f32(vResult0, V.val[3], r, 0); // Ax+Ey+Iz+Mw + vResult1 = vmlaq_lane_f32(vResult1, V.val[3], r, 1); // Bx+Fy+Jz+Nw + + XM_PREFETCH(pInputVector + (XM_CACHE_LINE_SIZE * 6)); + + r = vget_high_f32(row3); + vResult2 = vmlaq_lane_f32(vResult2, V.val[3], r, 0); // Cx+Gy+Kz+Ow + vResult3 = vmlaq_lane_f32(vResult3, V.val[3], r, 1); // Dx+Hy+Lz+Pw + + XM_PREFETCH(pInputVector + (XM_CACHE_LINE_SIZE * 7)); + + V.val[0] = vResult0; + V.val[1] = vResult1; + V.val[2] = vResult2; + V.val[3] = vResult3; + + vst4q_f32(reinterpret_cast(pOutputVector), V); + pOutputVector += sizeof(XMFLOAT4) * 4; + + i += 4; + } + } + } + + for (; i < VectorCount; i++) + { + XMVECTOR V = vld1q_f32(reinterpret_cast(pInputVector)); + pInputVector += InputStride; + + float32x2_t VL = vget_low_f32(V); + XMVECTOR vResult = vmulq_lane_f32(row0, VL, 0); // X + vResult = vmlaq_lane_f32(vResult, row1, VL, 1); // Y + float32x2_t VH = vget_high_f32(V); + vResult = vmlaq_lane_f32(vResult, row2, VH, 0); // Z + vResult = vmlaq_lane_f32(vResult, row3, VH, 1); // W + + vst1q_f32(reinterpret_cast(pOutputVector), vResult); + pOutputVector += OutputStride; + } + + return pOutputStream; +#elif defined(_XM_AVX2_INTRINSICS_) + auto pInputVector = reinterpret_cast(pInputStream); + auto pOutputVector = reinterpret_cast(pOutputStream); + + size_t i = 0; + size_t two = VectorCount >> 1; + if (two > 0) + { + __m256 row0 = _mm256_broadcast_ps(&M.r[0]); + __m256 row1 = _mm256_broadcast_ps(&M.r[1]); + __m256 row2 = _mm256_broadcast_ps(&M.r[2]); + __m256 row3 = _mm256_broadcast_ps(&M.r[3]); + + if (InputStride == sizeof(XMFLOAT4)) + { + if (OutputStride == sizeof(XMFLOAT4)) + { + if (!(reinterpret_cast(pOutputStream) & 0x1F)) + { + // Packed input, aligned & packed output + for (size_t j = 0; j < two; ++j) + { + __m256 VV = _mm256_loadu_ps(reinterpret_cast(pInputVector)); + pInputVector += sizeof(XMFLOAT4) * 2; + + __m256 vTempX = _mm256_shuffle_ps(VV, VV, _MM_SHUFFLE(0, 0, 0, 0)); + __m256 vTempY = _mm256_shuffle_ps(VV, VV, _MM_SHUFFLE(1, 1, 1, 1)); + __m256 vTempZ = _mm256_shuffle_ps(VV, VV, _MM_SHUFFLE(2, 2, 2, 2)); + __m256 vTempW = _mm256_shuffle_ps(VV, VV, _MM_SHUFFLE(3, 3, 3, 3)); + + vTempX = _mm256_mul_ps(vTempX, row0); + vTempY = _mm256_mul_ps(vTempY, row1); + vTempZ = _mm256_fmadd_ps(vTempZ, row2, vTempX); + vTempW = _mm256_fmadd_ps(vTempW, row3, vTempY); + vTempX = _mm256_add_ps(vTempZ, vTempW); + + XM256_STREAM_PS(reinterpret_cast(pOutputVector), vTempX); + pOutputVector += sizeof(XMFLOAT4) * 2; + + i += 2; + } + } + else + { + // Packed input, packed output + for (size_t j = 0; j < two; ++j) + { + __m256 VV = _mm256_loadu_ps(reinterpret_cast(pInputVector)); + pInputVector += sizeof(XMFLOAT4) * 2; + + __m256 vTempX = _mm256_shuffle_ps(VV, VV, _MM_SHUFFLE(0, 0, 0, 0)); + __m256 vTempY = _mm256_shuffle_ps(VV, VV, _MM_SHUFFLE(1, 1, 1, 1)); + __m256 vTempZ = _mm256_shuffle_ps(VV, VV, _MM_SHUFFLE(2, 2, 2, 2)); + __m256 vTempW = _mm256_shuffle_ps(VV, VV, _MM_SHUFFLE(3, 3, 3, 3)); + + vTempX = _mm256_mul_ps(vTempX, row0); + vTempY = _mm256_mul_ps(vTempY, row1); + vTempZ = _mm256_fmadd_ps(vTempZ, row2, vTempX); + vTempW = _mm256_fmadd_ps(vTempW, row3, vTempY); + vTempX = _mm256_add_ps(vTempZ, vTempW); + + _mm256_storeu_ps(reinterpret_cast(pOutputVector), vTempX); + pOutputVector += sizeof(XMFLOAT4) * 2; + + i += 2; + } + } + } + else + { + // Packed input, unpacked output + for (size_t j = 0; j < two; ++j) + { + __m256 VV = _mm256_loadu_ps(reinterpret_cast(pInputVector)); + pInputVector += sizeof(XMFLOAT4) * 2; + + __m256 vTempX = _mm256_shuffle_ps(VV, VV, _MM_SHUFFLE(0, 0, 0, 0)); + __m256 vTempY = _mm256_shuffle_ps(VV, VV, _MM_SHUFFLE(1, 1, 1, 1)); + __m256 vTempZ = _mm256_shuffle_ps(VV, VV, _MM_SHUFFLE(2, 2, 2, 2)); + __m256 vTempW = _mm256_shuffle_ps(VV, VV, _MM_SHUFFLE(3, 3, 3, 3)); + + vTempX = _mm256_mul_ps(vTempX, row0); + vTempY = _mm256_mul_ps(vTempY, row1); + vTempZ = _mm256_fmadd_ps(vTempZ, row2, vTempX); + vTempW = _mm256_fmadd_ps(vTempW, row3, vTempY); + vTempX = _mm256_add_ps(vTempZ, vTempW); + + _mm_storeu_ps(reinterpret_cast(pOutputVector), _mm256_castps256_ps128(vTempX)); + pOutputVector += OutputStride; + + _mm_storeu_ps(reinterpret_cast(pOutputVector), _mm256_extractf128_ps(vTempX, 1)); + pOutputVector += OutputStride; + i += 2; + } + } + } + } + + if (i < VectorCount) + { + const XMVECTOR row0 = M.r[0]; + const XMVECTOR row1 = M.r[1]; + const XMVECTOR row2 = M.r[2]; + const XMVECTOR row3 = M.r[3]; + + for (; i < VectorCount; i++) + { + __m128 V = _mm_loadu_ps(reinterpret_cast(pInputVector)); + pInputVector += InputStride; + + XMVECTOR vTempX = XM_PERMUTE_PS(V, _MM_SHUFFLE(0, 0, 0, 0)); + XMVECTOR vTempY = XM_PERMUTE_PS(V, _MM_SHUFFLE(1, 1, 1, 1)); + XMVECTOR vTempZ = XM_PERMUTE_PS(V, _MM_SHUFFLE(2, 2, 2, 2)); + XMVECTOR vTempW = XM_PERMUTE_PS(V, _MM_SHUFFLE(3, 3, 3, 3)); + + vTempX = _mm_mul_ps(vTempX, row0); + vTempY = _mm_mul_ps(vTempY, row1); + vTempZ = XM_FMADD_PS(vTempZ, row2, vTempX); + vTempW = XM_FMADD_PS(vTempW, row3, vTempY); + vTempX = _mm_add_ps(vTempZ, vTempW); + + _mm_storeu_ps(reinterpret_cast(pOutputVector), vTempX); + pOutputVector += OutputStride; + } + } + + XM_SFENCE(); + + return pOutputStream; +#elif defined(_XM_SSE_INTRINSICS_) + auto pInputVector = reinterpret_cast(pInputStream); + auto pOutputVector = reinterpret_cast(pOutputStream); + + const XMVECTOR row0 = M.r[0]; + const XMVECTOR row1 = M.r[1]; + const XMVECTOR row2 = M.r[2]; + const XMVECTOR row3 = M.r[3]; + + if (!(reinterpret_cast(pOutputStream) & 0xF) && !(OutputStride & 0xF)) + { + if (!(reinterpret_cast(pInputStream) & 0xF) && !(InputStride & 0xF)) + { + // Aligned input, aligned output + for (size_t i = 0; i < VectorCount; i++) + { + __m128 V = _mm_load_ps(reinterpret_cast(pInputVector)); + pInputVector += InputStride; + + XMVECTOR vTempX = XM_PERMUTE_PS(V, _MM_SHUFFLE(0, 0, 0, 0)); + XMVECTOR vTempY = XM_PERMUTE_PS(V, _MM_SHUFFLE(1, 1, 1, 1)); + XMVECTOR vTempZ = XM_PERMUTE_PS(V, _MM_SHUFFLE(2, 2, 2, 2)); + XMVECTOR vTempW = XM_PERMUTE_PS(V, _MM_SHUFFLE(3, 3, 3, 3)); + + vTempX = _mm_mul_ps(vTempX, row0); + vTempY = _mm_mul_ps(vTempY, row1); + vTempZ = XM_FMADD_PS(vTempZ, row2, vTempX); + vTempW = XM_FMADD_PS(vTempW, row3, vTempY); + vTempX = _mm_add_ps(vTempZ, vTempW); + + XM_STREAM_PS(reinterpret_cast(pOutputVector), vTempX); + pOutputVector += OutputStride; + } + } + else + { + // Unaligned input, aligned output + for (size_t i = 0; i < VectorCount; i++) + { + __m128 V = _mm_loadu_ps(reinterpret_cast(pInputVector)); + pInputVector += InputStride; + + XMVECTOR vTempX = XM_PERMUTE_PS(V, _MM_SHUFFLE(0, 0, 0, 0)); + XMVECTOR vTempY = XM_PERMUTE_PS(V, _MM_SHUFFLE(1, 1, 1, 1)); + XMVECTOR vTempZ = XM_PERMUTE_PS(V, _MM_SHUFFLE(2, 2, 2, 2)); + XMVECTOR vTempW = XM_PERMUTE_PS(V, _MM_SHUFFLE(3, 3, 3, 3)); + + vTempX = _mm_mul_ps(vTempX, row0); + vTempY = _mm_mul_ps(vTempY, row1); + vTempZ = XM_FMADD_PS(vTempZ, row2, vTempX); + vTempW = XM_FMADD_PS(vTempW, row3, vTempY); + vTempX = _mm_add_ps(vTempZ, vTempW); + + XM_STREAM_PS(reinterpret_cast(pOutputVector), vTempX); + pOutputVector += OutputStride; + } + } + } + else + { + if (!(reinterpret_cast(pInputStream) & 0xF) && !(InputStride & 0xF)) + { + // Aligned input, unaligned output + for (size_t i = 0; i < VectorCount; i++) + { + __m128 V = _mm_load_ps(reinterpret_cast(pInputVector)); + pInputVector += InputStride; + + XMVECTOR vTempX = XM_PERMUTE_PS(V, _MM_SHUFFLE(0, 0, 0, 0)); + XMVECTOR vTempY = XM_PERMUTE_PS(V, _MM_SHUFFLE(1, 1, 1, 1)); + XMVECTOR vTempZ = XM_PERMUTE_PS(V, _MM_SHUFFLE(2, 2, 2, 2)); + XMVECTOR vTempW = XM_PERMUTE_PS(V, _MM_SHUFFLE(3, 3, 3, 3)); + + vTempX = _mm_mul_ps(vTempX, row0); + vTempY = _mm_mul_ps(vTempY, row1); + vTempZ = XM_FMADD_PS(vTempZ, row2, vTempX); + vTempW = XM_FMADD_PS(vTempW, row3, vTempY); + vTempX = _mm_add_ps(vTempZ, vTempW); + + _mm_storeu_ps(reinterpret_cast(pOutputVector), vTempX); + pOutputVector += OutputStride; + } + } + else + { + // Unaligned input, unaligned output + for (size_t i = 0; i < VectorCount; i++) + { + __m128 V = _mm_loadu_ps(reinterpret_cast(pInputVector)); + pInputVector += InputStride; + + XMVECTOR vTempX = XM_PERMUTE_PS(V, _MM_SHUFFLE(0, 0, 0, 0)); + XMVECTOR vTempY = XM_PERMUTE_PS(V, _MM_SHUFFLE(1, 1, 1, 1)); + XMVECTOR vTempZ = XM_PERMUTE_PS(V, _MM_SHUFFLE(2, 2, 2, 2)); + XMVECTOR vTempW = XM_PERMUTE_PS(V, _MM_SHUFFLE(3, 3, 3, 3)); + + vTempX = _mm_mul_ps(vTempX, row0); + vTempY = _mm_mul_ps(vTempY, row1); + vTempZ = XM_FMADD_PS(vTempZ, row2, vTempX); + vTempW = XM_FMADD_PS(vTempW, row3, vTempY); + vTempX = _mm_add_ps(vTempZ, vTempW); + + _mm_storeu_ps(reinterpret_cast(pOutputVector), vTempX); + pOutputVector += OutputStride; + } + } + } + + XM_SFENCE(); + + return pOutputStream; +#endif +} + +/**************************************************************************** + * + * XMVECTOR operators + * + ****************************************************************************/ + +#ifndef _XM_NO_XMVECTOR_OVERLOADS_ + + //------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV operator+ (FXMVECTOR V) noexcept +{ + return V; +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV operator- (FXMVECTOR V) noexcept +{ + return XMVectorNegate(V); +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR& XM_CALLCONV operator+= +( + XMVECTOR& V1, + FXMVECTOR V2 +) noexcept +{ + V1 = XMVectorAdd(V1, V2); + return V1; +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR& XM_CALLCONV operator-= +( + XMVECTOR& V1, + FXMVECTOR V2 +) noexcept +{ + V1 = XMVectorSubtract(V1, V2); + return V1; +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR& XM_CALLCONV operator*= +( + XMVECTOR& V1, + FXMVECTOR V2 +) noexcept +{ + V1 = XMVectorMultiply(V1, V2); + return V1; +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR& XM_CALLCONV operator/= +( + XMVECTOR& V1, + FXMVECTOR V2 +) noexcept +{ + V1 = XMVectorDivide(V1, V2); + return V1; +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR& operator*= +( + XMVECTOR& V, + const float S +) noexcept +{ + V = XMVectorScale(V, S); + return V; +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR& operator/= +( + XMVECTOR& V, + const float S +) noexcept +{ + XMVECTOR vS = XMVectorReplicate(S); + V = XMVectorDivide(V, vS); + return V; +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV operator+ +( + FXMVECTOR V1, + FXMVECTOR V2 +) noexcept +{ + return XMVectorAdd(V1, V2); +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV operator- +( + FXMVECTOR V1, + FXMVECTOR V2 +) noexcept +{ + return XMVectorSubtract(V1, V2); +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV operator* +( + FXMVECTOR V1, + FXMVECTOR V2 +) noexcept +{ + return XMVectorMultiply(V1, V2); +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV operator/ +( + FXMVECTOR V1, + FXMVECTOR V2 +) noexcept +{ + return XMVectorDivide(V1, V2); +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV operator* +( + FXMVECTOR V, + const float S +) noexcept +{ + return XMVectorScale(V, S); +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV operator/ +( + FXMVECTOR V, + const float S +) noexcept +{ + XMVECTOR vS = XMVectorReplicate(S); + return XMVectorDivide(V, vS); +} + +//------------------------------------------------------------------------------ + +inline XMVECTOR XM_CALLCONV operator* +( + float S, + FXMVECTOR V +) noexcept +{ + return XMVectorScale(V, S); +} + +#endif /* !_XM_NO_XMVECTOR_OVERLOADS_ */ + +#if defined(_XM_NO_INTRINSICS_) +#undef XMISNAN +#undef XMISINF +#endif + +#if defined(_XM_SSE_INTRINSICS_) +#undef XM3UNPACK3INTO4 +#undef XM3PACK4INTO3 +#endif + diff --git a/include/glad/gl.h b/include/glad/gl.h new file mode 100644 index 0000000..a9fb7c7 --- /dev/null +++ b/include/glad/gl.h @@ -0,0 +1,3199 @@ +/** + * Loader generated by glad 2.0.8 on Thu Mar 5 16:16:42 2026 + * + * SPDX-License-Identifier: (WTFPL OR CC0-1.0) AND Apache-2.0 + * + * Generator: C/C++ + * Specification: gl + * Extensions: 0 + * + * APIs: + * - gl:core=4.3 + * + * Options: + * - ALIAS = False + * - DEBUG = False + * - HEADER_ONLY = False + * - LOADER = False + * - MX = False + * - ON_DEMAND = False + * + * Commandline: + * --api='gl:core=4.3' --extensions='' c + * + * Online: + * http://glad.sh/#api=gl%3Acore%3D4.3&extensions=&generator=c&options= + * + */ + +#ifndef GLAD_GL_H_ +#define GLAD_GL_H_ + +#ifdef __clang__ +#pragma clang diagnostic push +#pragma clang diagnostic ignored "-Wreserved-id-macro" +#endif +#ifdef __gl_h_ + #error OpenGL (gl.h) header already included (API: gl), remove previous include! +#endif +#define __gl_h_ 1 +#ifdef __gl3_h_ + #error OpenGL (gl3.h) header already included (API: gl), remove previous include! +#endif +#define __gl3_h_ 1 +#ifdef __glext_h_ + #error OpenGL (glext.h) header already included (API: gl), remove previous include! +#endif +#define __glext_h_ 1 +#ifdef __gl3ext_h_ + #error OpenGL (gl3ext.h) header already included (API: gl), remove previous include! +#endif +#define __gl3ext_h_ 1 +#ifdef __clang__ +#pragma clang diagnostic pop +#endif + +#define GLAD_GL + +#ifdef __cplusplus +extern "C" { +#endif + +#ifndef GLAD_PLATFORM_H_ +#define GLAD_PLATFORM_H_ + +#ifndef GLAD_PLATFORM_WIN32 + #if defined(_WIN32) || defined(__WIN32__) || defined(WIN32) || defined(__MINGW32__) + #define GLAD_PLATFORM_WIN32 1 + #else + #define GLAD_PLATFORM_WIN32 0 + #endif +#endif + +#ifndef GLAD_PLATFORM_APPLE + #ifdef __APPLE__ + #define GLAD_PLATFORM_APPLE 1 + #else + #define GLAD_PLATFORM_APPLE 0 + #endif +#endif + +#ifndef GLAD_PLATFORM_EMSCRIPTEN + #ifdef __EMSCRIPTEN__ + #define GLAD_PLATFORM_EMSCRIPTEN 1 + #else + #define GLAD_PLATFORM_EMSCRIPTEN 0 + #endif +#endif + +#ifndef GLAD_PLATFORM_UWP + #if defined(_MSC_VER) && !defined(GLAD_INTERNAL_HAVE_WINAPIFAMILY) + #ifdef __has_include + #if __has_include() + #define GLAD_INTERNAL_HAVE_WINAPIFAMILY 1 + #endif + #elif _MSC_VER >= 1700 && !_USING_V110_SDK71_ + #define GLAD_INTERNAL_HAVE_WINAPIFAMILY 1 + #endif + #endif + + #ifdef GLAD_INTERNAL_HAVE_WINAPIFAMILY + #include + #if !WINAPI_FAMILY_PARTITION(WINAPI_PARTITION_DESKTOP) && WINAPI_FAMILY_PARTITION(WINAPI_PARTITION_APP) + #define GLAD_PLATFORM_UWP 1 + #endif + #endif + + #ifndef GLAD_PLATFORM_UWP + #define GLAD_PLATFORM_UWP 0 + #endif +#endif + +#ifdef __GNUC__ + #define GLAD_GNUC_EXTENSION __extension__ +#else + #define GLAD_GNUC_EXTENSION +#endif + +#define GLAD_UNUSED(x) (void)(x) + +#ifndef GLAD_API_CALL + #if defined(GLAD_API_CALL_EXPORT) + #if GLAD_PLATFORM_WIN32 || defined(__CYGWIN__) + #if defined(GLAD_API_CALL_EXPORT_BUILD) + #if defined(__GNUC__) + #define GLAD_API_CALL __attribute__ ((dllexport)) extern + #else + #define GLAD_API_CALL __declspec(dllexport) extern + #endif + #else + #if defined(__GNUC__) + #define GLAD_API_CALL __attribute__ ((dllimport)) extern + #else + #define GLAD_API_CALL __declspec(dllimport) extern + #endif + #endif + #elif defined(__GNUC__) && defined(GLAD_API_CALL_EXPORT_BUILD) + #define GLAD_API_CALL __attribute__ ((visibility ("default"))) extern + #else + #define GLAD_API_CALL extern + #endif + #else + #define GLAD_API_CALL extern + #endif +#endif + +#ifdef APIENTRY + #define GLAD_API_PTR APIENTRY +#elif GLAD_PLATFORM_WIN32 + #define GLAD_API_PTR __stdcall +#else + #define GLAD_API_PTR +#endif + +#ifndef GLAPI +#define GLAPI GLAD_API_CALL +#endif + +#ifndef GLAPIENTRY +#define GLAPIENTRY GLAD_API_PTR +#endif + +#define GLAD_MAKE_VERSION(major, minor) (major * 10000 + minor) +#define GLAD_VERSION_MAJOR(version) (version / 10000) +#define GLAD_VERSION_MINOR(version) (version % 10000) + +#define GLAD_GENERATOR_VERSION "2.0.8" + +typedef void (*GLADapiproc)(void); + +typedef GLADapiproc (*GLADloadfunc)(const char *name); +typedef GLADapiproc (*GLADuserptrloadfunc)(void *userptr, const char *name); + +typedef void (*GLADprecallback)(const char *name, GLADapiproc apiproc, int len_args, ...); +typedef void (*GLADpostcallback)(void *ret, const char *name, GLADapiproc apiproc, int len_args, ...); + +#endif /* GLAD_PLATFORM_H_ */ + +#define GL_ACTIVE_ATOMIC_COUNTER_BUFFERS 0x92D9 +#define GL_ACTIVE_ATTRIBUTES 0x8B89 +#define GL_ACTIVE_ATTRIBUTE_MAX_LENGTH 0x8B8A +#define GL_ACTIVE_PROGRAM 0x8259 +#define GL_ACTIVE_RESOURCES 0x92F5 +#define GL_ACTIVE_SUBROUTINES 0x8DE5 +#define GL_ACTIVE_SUBROUTINE_MAX_LENGTH 0x8E48 +#define GL_ACTIVE_SUBROUTINE_UNIFORMS 0x8DE6 +#define GL_ACTIVE_SUBROUTINE_UNIFORM_LOCATIONS 0x8E47 +#define GL_ACTIVE_SUBROUTINE_UNIFORM_MAX_LENGTH 0x8E49 +#define GL_ACTIVE_TEXTURE 0x84E0 +#define GL_ACTIVE_UNIFORMS 0x8B86 +#define GL_ACTIVE_UNIFORM_BLOCKS 0x8A36 +#define GL_ACTIVE_UNIFORM_BLOCK_MAX_NAME_LENGTH 0x8A35 +#define GL_ACTIVE_UNIFORM_MAX_LENGTH 0x8B87 +#define GL_ACTIVE_VARIABLES 0x9305 +#define GL_ALIASED_LINE_WIDTH_RANGE 0x846E +#define GL_ALL_BARRIER_BITS 0xFFFFFFFF +#define GL_ALL_SHADER_BITS 0xFFFFFFFF +#define GL_ALPHA 0x1906 +#define GL_ALREADY_SIGNALED 0x911A +#define GL_ALWAYS 0x0207 +#define GL_AND 0x1501 +#define GL_AND_INVERTED 0x1504 +#define GL_AND_REVERSE 0x1502 +#define GL_ANY_SAMPLES_PASSED 0x8C2F +#define GL_ANY_SAMPLES_PASSED_CONSERVATIVE 0x8D6A +#define GL_ARRAY_BUFFER 0x8892 +#define GL_ARRAY_BUFFER_BINDING 0x8894 +#define GL_ARRAY_SIZE 0x92FB +#define GL_ARRAY_STRIDE 0x92FE +#define GL_ATOMIC_COUNTER_BARRIER_BIT 0x00001000 +#define GL_ATOMIC_COUNTER_BUFFER 0x92C0 +#define GL_ATOMIC_COUNTER_BUFFER_ACTIVE_ATOMIC_COUNTERS 0x92C5 +#define GL_ATOMIC_COUNTER_BUFFER_ACTIVE_ATOMIC_COUNTER_INDICES 0x92C6 +#define GL_ATOMIC_COUNTER_BUFFER_BINDING 0x92C1 +#define GL_ATOMIC_COUNTER_BUFFER_DATA_SIZE 0x92C4 +#define GL_ATOMIC_COUNTER_BUFFER_INDEX 0x9301 +#define GL_ATOMIC_COUNTER_BUFFER_REFERENCED_BY_COMPUTE_SHADER 0x90ED +#define GL_ATOMIC_COUNTER_BUFFER_REFERENCED_BY_FRAGMENT_SHADER 0x92CB +#define GL_ATOMIC_COUNTER_BUFFER_REFERENCED_BY_GEOMETRY_SHADER 0x92CA +#define GL_ATOMIC_COUNTER_BUFFER_REFERENCED_BY_TESS_CONTROL_SHADER 0x92C8 +#define GL_ATOMIC_COUNTER_BUFFER_REFERENCED_BY_TESS_EVALUATION_SHADER 0x92C9 +#define GL_ATOMIC_COUNTER_BUFFER_REFERENCED_BY_VERTEX_SHADER 0x92C7 +#define GL_ATOMIC_COUNTER_BUFFER_SIZE 0x92C3 +#define GL_ATOMIC_COUNTER_BUFFER_START 0x92C2 +#define GL_ATTACHED_SHADERS 0x8B85 +#define GL_AUTO_GENERATE_MIPMAP 0x8295 +#define GL_BACK 0x0405 +#define GL_BACK_LEFT 0x0402 +#define GL_BACK_RIGHT 0x0403 +#define GL_BGR 0x80E0 +#define GL_BGRA 0x80E1 +#define GL_BGRA_INTEGER 0x8D9B +#define GL_BGR_INTEGER 0x8D9A +#define GL_BLEND 0x0BE2 +#define GL_BLEND_COLOR 0x8005 +#define GL_BLEND_DST 0x0BE0 +#define GL_BLEND_DST_ALPHA 0x80CA +#define GL_BLEND_DST_RGB 0x80C8 +#define GL_BLEND_EQUATION 0x8009 +#define GL_BLEND_EQUATION_ALPHA 0x883D +#define GL_BLEND_EQUATION_RGB 0x8009 +#define GL_BLEND_SRC 0x0BE1 +#define GL_BLEND_SRC_ALPHA 0x80CB +#define GL_BLEND_SRC_RGB 0x80C9 +#define GL_BLOCK_INDEX 0x92FD +#define GL_BLUE 0x1905 +#define GL_BLUE_INTEGER 0x8D96 +#define GL_BOOL 0x8B56 +#define GL_BOOL_VEC2 0x8B57 +#define GL_BOOL_VEC3 0x8B58 +#define GL_BOOL_VEC4 0x8B59 +#define GL_BUFFER 0x82E0 +#define GL_BUFFER_ACCESS 0x88BB +#define GL_BUFFER_ACCESS_FLAGS 0x911F +#define GL_BUFFER_BINDING 0x9302 +#define GL_BUFFER_DATA_SIZE 0x9303 +#define GL_BUFFER_MAPPED 0x88BC +#define GL_BUFFER_MAP_LENGTH 0x9120 +#define GL_BUFFER_MAP_OFFSET 0x9121 +#define GL_BUFFER_MAP_POINTER 0x88BD +#define GL_BUFFER_SIZE 0x8764 +#define GL_BUFFER_UPDATE_BARRIER_BIT 0x00000200 +#define GL_BUFFER_USAGE 0x8765 +#define GL_BUFFER_VARIABLE 0x92E5 +#define GL_BYTE 0x1400 +#define GL_CAVEAT_SUPPORT 0x82B8 +#define GL_CCW 0x0901 +#define GL_CLAMP_READ_COLOR 0x891C +#define GL_CLAMP_TO_BORDER 0x812D +#define GL_CLAMP_TO_EDGE 0x812F +#define GL_CLEAR 0x1500 +#define GL_CLEAR_BUFFER 0x82B4 +#define GL_CLIP_DISTANCE0 0x3000 +#define GL_CLIP_DISTANCE1 0x3001 +#define GL_CLIP_DISTANCE2 0x3002 +#define GL_CLIP_DISTANCE3 0x3003 +#define GL_CLIP_DISTANCE4 0x3004 +#define GL_CLIP_DISTANCE5 0x3005 +#define GL_CLIP_DISTANCE6 0x3006 +#define GL_CLIP_DISTANCE7 0x3007 +#define GL_COLOR 0x1800 +#define GL_COLOR_ATTACHMENT0 0x8CE0 +#define GL_COLOR_ATTACHMENT1 0x8CE1 +#define GL_COLOR_ATTACHMENT10 0x8CEA +#define GL_COLOR_ATTACHMENT11 0x8CEB +#define GL_COLOR_ATTACHMENT12 0x8CEC +#define GL_COLOR_ATTACHMENT13 0x8CED +#define GL_COLOR_ATTACHMENT14 0x8CEE +#define GL_COLOR_ATTACHMENT15 0x8CEF +#define GL_COLOR_ATTACHMENT16 0x8CF0 +#define GL_COLOR_ATTACHMENT17 0x8CF1 +#define GL_COLOR_ATTACHMENT18 0x8CF2 +#define GL_COLOR_ATTACHMENT19 0x8CF3 +#define GL_COLOR_ATTACHMENT2 0x8CE2 +#define GL_COLOR_ATTACHMENT20 0x8CF4 +#define GL_COLOR_ATTACHMENT21 0x8CF5 +#define GL_COLOR_ATTACHMENT22 0x8CF6 +#define GL_COLOR_ATTACHMENT23 0x8CF7 +#define GL_COLOR_ATTACHMENT24 0x8CF8 +#define GL_COLOR_ATTACHMENT25 0x8CF9 +#define GL_COLOR_ATTACHMENT26 0x8CFA +#define GL_COLOR_ATTACHMENT27 0x8CFB +#define GL_COLOR_ATTACHMENT28 0x8CFC +#define GL_COLOR_ATTACHMENT29 0x8CFD +#define GL_COLOR_ATTACHMENT3 0x8CE3 +#define GL_COLOR_ATTACHMENT30 0x8CFE +#define GL_COLOR_ATTACHMENT31 0x8CFF +#define GL_COLOR_ATTACHMENT4 0x8CE4 +#define GL_COLOR_ATTACHMENT5 0x8CE5 +#define GL_COLOR_ATTACHMENT6 0x8CE6 +#define GL_COLOR_ATTACHMENT7 0x8CE7 +#define GL_COLOR_ATTACHMENT8 0x8CE8 +#define GL_COLOR_ATTACHMENT9 0x8CE9 +#define GL_COLOR_BUFFER_BIT 0x00004000 +#define GL_COLOR_CLEAR_VALUE 0x0C22 +#define GL_COLOR_COMPONENTS 0x8283 +#define GL_COLOR_ENCODING 0x8296 +#define GL_COLOR_LOGIC_OP 0x0BF2 +#define GL_COLOR_RENDERABLE 0x8286 +#define GL_COLOR_WRITEMASK 0x0C23 +#define GL_COMMAND_BARRIER_BIT 0x00000040 +#define GL_COMPARE_REF_TO_TEXTURE 0x884E +#define GL_COMPATIBLE_SUBROUTINES 0x8E4B +#define GL_COMPILE_STATUS 0x8B81 +#define GL_COMPRESSED_R11_EAC 0x9270 +#define GL_COMPRESSED_RED 0x8225 +#define GL_COMPRESSED_RED_RGTC1 0x8DBB +#define GL_COMPRESSED_RG 0x8226 +#define GL_COMPRESSED_RG11_EAC 0x9272 +#define GL_COMPRESSED_RGB 0x84ED +#define GL_COMPRESSED_RGB8_ETC2 0x9274 +#define GL_COMPRESSED_RGB8_PUNCHTHROUGH_ALPHA1_ETC2 0x9276 +#define GL_COMPRESSED_RGBA 0x84EE +#define GL_COMPRESSED_RGBA8_ETC2_EAC 0x9278 +#define GL_COMPRESSED_RGBA_BPTC_UNORM 0x8E8C +#define GL_COMPRESSED_RGB_BPTC_SIGNED_FLOAT 0x8E8E +#define GL_COMPRESSED_RGB_BPTC_UNSIGNED_FLOAT 0x8E8F +#define GL_COMPRESSED_RG_RGTC2 0x8DBD +#define GL_COMPRESSED_SIGNED_R11_EAC 0x9271 +#define GL_COMPRESSED_SIGNED_RED_RGTC1 0x8DBC +#define GL_COMPRESSED_SIGNED_RG11_EAC 0x9273 +#define GL_COMPRESSED_SIGNED_RG_RGTC2 0x8DBE +#define GL_COMPRESSED_SRGB 0x8C48 +#define GL_COMPRESSED_SRGB8_ALPHA8_ETC2_EAC 0x9279 +#define GL_COMPRESSED_SRGB8_ETC2 0x9275 +#define GL_COMPRESSED_SRGB8_PUNCHTHROUGH_ALPHA1_ETC2 0x9277 +#define GL_COMPRESSED_SRGB_ALPHA 0x8C49 +#define GL_COMPRESSED_SRGB_ALPHA_BPTC_UNORM 0x8E8D +#define GL_COMPRESSED_TEXTURE_FORMATS 0x86A3 +#define GL_COMPUTE_SHADER 0x91B9 +#define GL_COMPUTE_SHADER_BIT 0x00000020 +#define GL_COMPUTE_SUBROUTINE 0x92ED +#define GL_COMPUTE_SUBROUTINE_UNIFORM 0x92F3 +#define GL_COMPUTE_TEXTURE 0x82A0 +#define GL_COMPUTE_WORK_GROUP_SIZE 0x8267 +#define GL_CONDITION_SATISFIED 0x911C +#define GL_CONSTANT_ALPHA 0x8003 +#define GL_CONSTANT_COLOR 0x8001 +#define GL_CONTEXT_COMPATIBILITY_PROFILE_BIT 0x00000002 +#define GL_CONTEXT_CORE_PROFILE_BIT 0x00000001 +#define GL_CONTEXT_FLAGS 0x821E +#define GL_CONTEXT_FLAG_DEBUG_BIT 0x00000002 +#define GL_CONTEXT_FLAG_FORWARD_COMPATIBLE_BIT 0x00000001 +#define GL_CONTEXT_PROFILE_MASK 0x9126 +#define GL_COPY 0x1503 +#define GL_COPY_INVERTED 0x150C +#define GL_COPY_READ_BUFFER 0x8F36 +#define GL_COPY_READ_BUFFER_BINDING 0x8F36 +#define GL_COPY_WRITE_BUFFER 0x8F37 +#define GL_COPY_WRITE_BUFFER_BINDING 0x8F37 +#define GL_CULL_FACE 0x0B44 +#define GL_CULL_FACE_MODE 0x0B45 +#define GL_CURRENT_PROGRAM 0x8B8D +#define GL_CURRENT_QUERY 0x8865 +#define GL_CURRENT_VERTEX_ATTRIB 0x8626 +#define GL_CW 0x0900 +#define GL_DEBUG_CALLBACK_FUNCTION 0x8244 +#define GL_DEBUG_CALLBACK_USER_PARAM 0x8245 +#define GL_DEBUG_GROUP_STACK_DEPTH 0x826D +#define GL_DEBUG_LOGGED_MESSAGES 0x9145 +#define GL_DEBUG_NEXT_LOGGED_MESSAGE_LENGTH 0x8243 +#define GL_DEBUG_OUTPUT 0x92E0 +#define GL_DEBUG_OUTPUT_SYNCHRONOUS 0x8242 +#define GL_DEBUG_SEVERITY_HIGH 0x9146 +#define GL_DEBUG_SEVERITY_LOW 0x9148 +#define GL_DEBUG_SEVERITY_MEDIUM 0x9147 +#define GL_DEBUG_SEVERITY_NOTIFICATION 0x826B +#define GL_DEBUG_SOURCE_API 0x8246 +#define GL_DEBUG_SOURCE_APPLICATION 0x824A +#define GL_DEBUG_SOURCE_OTHER 0x824B +#define GL_DEBUG_SOURCE_SHADER_COMPILER 0x8248 +#define GL_DEBUG_SOURCE_THIRD_PARTY 0x8249 +#define GL_DEBUG_SOURCE_WINDOW_SYSTEM 0x8247 +#define GL_DEBUG_TYPE_DEPRECATED_BEHAVIOR 0x824D +#define GL_DEBUG_TYPE_ERROR 0x824C +#define GL_DEBUG_TYPE_MARKER 0x8268 +#define GL_DEBUG_TYPE_OTHER 0x8251 +#define GL_DEBUG_TYPE_PERFORMANCE 0x8250 +#define GL_DEBUG_TYPE_POP_GROUP 0x826A +#define GL_DEBUG_TYPE_PORTABILITY 0x824F +#define GL_DEBUG_TYPE_PUSH_GROUP 0x8269 +#define GL_DEBUG_TYPE_UNDEFINED_BEHAVIOR 0x824E +#define GL_DECR 0x1E03 +#define GL_DECR_WRAP 0x8508 +#define GL_DELETE_STATUS 0x8B80 +#define GL_DEPTH 0x1801 +#define GL_DEPTH24_STENCIL8 0x88F0 +#define GL_DEPTH32F_STENCIL8 0x8CAD +#define GL_DEPTH_ATTACHMENT 0x8D00 +#define GL_DEPTH_BUFFER_BIT 0x00000100 +#define GL_DEPTH_CLAMP 0x864F +#define GL_DEPTH_CLEAR_VALUE 0x0B73 +#define GL_DEPTH_COMPONENT 0x1902 +#define GL_DEPTH_COMPONENT16 0x81A5 +#define GL_DEPTH_COMPONENT24 0x81A6 +#define GL_DEPTH_COMPONENT32 0x81A7 +#define GL_DEPTH_COMPONENT32F 0x8CAC +#define GL_DEPTH_COMPONENTS 0x8284 +#define GL_DEPTH_FUNC 0x0B74 +#define GL_DEPTH_RANGE 0x0B70 +#define GL_DEPTH_RENDERABLE 0x8287 +#define GL_DEPTH_STENCIL 0x84F9 +#define GL_DEPTH_STENCIL_ATTACHMENT 0x821A +#define GL_DEPTH_STENCIL_TEXTURE_MODE 0x90EA +#define GL_DEPTH_TEST 0x0B71 +#define GL_DEPTH_WRITEMASK 0x0B72 +#define GL_DISPATCH_INDIRECT_BUFFER 0x90EE +#define GL_DISPATCH_INDIRECT_BUFFER_BINDING 0x90EF +#define GL_DITHER 0x0BD0 +#define GL_DONT_CARE 0x1100 +#define GL_DOUBLE 0x140A +#define GL_DOUBLEBUFFER 0x0C32 +#define GL_DOUBLE_MAT2 0x8F46 +#define GL_DOUBLE_MAT2x3 0x8F49 +#define GL_DOUBLE_MAT2x4 0x8F4A +#define GL_DOUBLE_MAT3 0x8F47 +#define GL_DOUBLE_MAT3x2 0x8F4B +#define GL_DOUBLE_MAT3x4 0x8F4C +#define GL_DOUBLE_MAT4 0x8F48 +#define GL_DOUBLE_MAT4x2 0x8F4D +#define GL_DOUBLE_MAT4x3 0x8F4E +#define GL_DOUBLE_VEC2 0x8FFC +#define GL_DOUBLE_VEC3 0x8FFD +#define GL_DOUBLE_VEC4 0x8FFE +#define GL_DRAW_BUFFER 0x0C01 +#define GL_DRAW_BUFFER0 0x8825 +#define GL_DRAW_BUFFER1 0x8826 +#define GL_DRAW_BUFFER10 0x882F +#define GL_DRAW_BUFFER11 0x8830 +#define GL_DRAW_BUFFER12 0x8831 +#define GL_DRAW_BUFFER13 0x8832 +#define GL_DRAW_BUFFER14 0x8833 +#define GL_DRAW_BUFFER15 0x8834 +#define GL_DRAW_BUFFER2 0x8827 +#define GL_DRAW_BUFFER3 0x8828 +#define GL_DRAW_BUFFER4 0x8829 +#define GL_DRAW_BUFFER5 0x882A +#define GL_DRAW_BUFFER6 0x882B +#define GL_DRAW_BUFFER7 0x882C +#define GL_DRAW_BUFFER8 0x882D +#define GL_DRAW_BUFFER9 0x882E +#define GL_DRAW_FRAMEBUFFER 0x8CA9 +#define GL_DRAW_FRAMEBUFFER_BINDING 0x8CA6 +#define GL_DRAW_INDIRECT_BUFFER 0x8F3F +#define GL_DRAW_INDIRECT_BUFFER_BINDING 0x8F43 +#define GL_DST_ALPHA 0x0304 +#define GL_DST_COLOR 0x0306 +#define GL_DYNAMIC_COPY 0x88EA +#define GL_DYNAMIC_DRAW 0x88E8 +#define GL_DYNAMIC_READ 0x88E9 +#define GL_ELEMENT_ARRAY_BARRIER_BIT 0x00000002 +#define GL_ELEMENT_ARRAY_BUFFER 0x8893 +#define GL_ELEMENT_ARRAY_BUFFER_BINDING 0x8895 +#define GL_EQUAL 0x0202 +#define GL_EQUIV 0x1509 +#define GL_EXTENSIONS 0x1F03 +#define GL_FALSE 0 +#define GL_FASTEST 0x1101 +#define GL_FILL 0x1B02 +#define GL_FILTER 0x829A +#define GL_FIRST_VERTEX_CONVENTION 0x8E4D +#define GL_FIXED 0x140C +#define GL_FIXED_ONLY 0x891D +#define GL_FLOAT 0x1406 +#define GL_FLOAT_32_UNSIGNED_INT_24_8_REV 0x8DAD +#define GL_FLOAT_MAT2 0x8B5A +#define GL_FLOAT_MAT2x3 0x8B65 +#define GL_FLOAT_MAT2x4 0x8B66 +#define GL_FLOAT_MAT3 0x8B5B +#define GL_FLOAT_MAT3x2 0x8B67 +#define GL_FLOAT_MAT3x4 0x8B68 +#define GL_FLOAT_MAT4 0x8B5C +#define GL_FLOAT_MAT4x2 0x8B69 +#define GL_FLOAT_MAT4x3 0x8B6A +#define GL_FLOAT_VEC2 0x8B50 +#define GL_FLOAT_VEC3 0x8B51 +#define GL_FLOAT_VEC4 0x8B52 +#define GL_FRACTIONAL_EVEN 0x8E7C +#define GL_FRACTIONAL_ODD 0x8E7B +#define GL_FRAGMENT_INTERPOLATION_OFFSET_BITS 0x8E5D +#define GL_FRAGMENT_SHADER 0x8B30 +#define GL_FRAGMENT_SHADER_BIT 0x00000002 +#define GL_FRAGMENT_SHADER_DERIVATIVE_HINT 0x8B8B +#define GL_FRAGMENT_SUBROUTINE 0x92EC +#define GL_FRAGMENT_SUBROUTINE_UNIFORM 0x92F2 +#define GL_FRAGMENT_TEXTURE 0x829F +#define GL_FRAMEBUFFER 0x8D40 +#define GL_FRAMEBUFFER_ATTACHMENT_ALPHA_SIZE 0x8215 +#define GL_FRAMEBUFFER_ATTACHMENT_BLUE_SIZE 0x8214 +#define GL_FRAMEBUFFER_ATTACHMENT_COLOR_ENCODING 0x8210 +#define GL_FRAMEBUFFER_ATTACHMENT_COMPONENT_TYPE 0x8211 +#define GL_FRAMEBUFFER_ATTACHMENT_DEPTH_SIZE 0x8216 +#define GL_FRAMEBUFFER_ATTACHMENT_GREEN_SIZE 0x8213 +#define GL_FRAMEBUFFER_ATTACHMENT_LAYERED 0x8DA7 +#define GL_FRAMEBUFFER_ATTACHMENT_OBJECT_NAME 0x8CD1 +#define GL_FRAMEBUFFER_ATTACHMENT_OBJECT_TYPE 0x8CD0 +#define GL_FRAMEBUFFER_ATTACHMENT_RED_SIZE 0x8212 +#define GL_FRAMEBUFFER_ATTACHMENT_STENCIL_SIZE 0x8217 +#define GL_FRAMEBUFFER_ATTACHMENT_TEXTURE_CUBE_MAP_FACE 0x8CD3 +#define GL_FRAMEBUFFER_ATTACHMENT_TEXTURE_LAYER 0x8CD4 +#define GL_FRAMEBUFFER_ATTACHMENT_TEXTURE_LEVEL 0x8CD2 +#define GL_FRAMEBUFFER_BARRIER_BIT 0x00000400 +#define GL_FRAMEBUFFER_BINDING 0x8CA6 +#define GL_FRAMEBUFFER_BLEND 0x828B +#define GL_FRAMEBUFFER_COMPLETE 0x8CD5 +#define GL_FRAMEBUFFER_DEFAULT 0x8218 +#define GL_FRAMEBUFFER_DEFAULT_FIXED_SAMPLE_LOCATIONS 0x9314 +#define GL_FRAMEBUFFER_DEFAULT_HEIGHT 0x9311 +#define GL_FRAMEBUFFER_DEFAULT_LAYERS 0x9312 +#define GL_FRAMEBUFFER_DEFAULT_SAMPLES 0x9313 +#define GL_FRAMEBUFFER_DEFAULT_WIDTH 0x9310 +#define GL_FRAMEBUFFER_INCOMPLETE_ATTACHMENT 0x8CD6 +#define GL_FRAMEBUFFER_INCOMPLETE_DRAW_BUFFER 0x8CDB +#define GL_FRAMEBUFFER_INCOMPLETE_LAYER_TARGETS 0x8DA8 +#define GL_FRAMEBUFFER_INCOMPLETE_MISSING_ATTACHMENT 0x8CD7 +#define GL_FRAMEBUFFER_INCOMPLETE_MULTISAMPLE 0x8D56 +#define GL_FRAMEBUFFER_INCOMPLETE_READ_BUFFER 0x8CDC +#define GL_FRAMEBUFFER_RENDERABLE 0x8289 +#define GL_FRAMEBUFFER_RENDERABLE_LAYERED 0x828A +#define GL_FRAMEBUFFER_SRGB 0x8DB9 +#define GL_FRAMEBUFFER_UNDEFINED 0x8219 +#define GL_FRAMEBUFFER_UNSUPPORTED 0x8CDD +#define GL_FRONT 0x0404 +#define GL_FRONT_AND_BACK 0x0408 +#define GL_FRONT_FACE 0x0B46 +#define GL_FRONT_LEFT 0x0400 +#define GL_FRONT_RIGHT 0x0401 +#define GL_FULL_SUPPORT 0x82B7 +#define GL_FUNC_ADD 0x8006 +#define GL_FUNC_REVERSE_SUBTRACT 0x800B +#define GL_FUNC_SUBTRACT 0x800A +#define GL_GEOMETRY_INPUT_TYPE 0x8917 +#define GL_GEOMETRY_OUTPUT_TYPE 0x8918 +#define GL_GEOMETRY_SHADER 0x8DD9 +#define GL_GEOMETRY_SHADER_BIT 0x00000004 +#define GL_GEOMETRY_SHADER_INVOCATIONS 0x887F +#define GL_GEOMETRY_SUBROUTINE 0x92EB +#define GL_GEOMETRY_SUBROUTINE_UNIFORM 0x92F1 +#define GL_GEOMETRY_TEXTURE 0x829E +#define GL_GEOMETRY_VERTICES_OUT 0x8916 +#define GL_GEQUAL 0x0206 +#define GL_GET_TEXTURE_IMAGE_FORMAT 0x8291 +#define GL_GET_TEXTURE_IMAGE_TYPE 0x8292 +#define GL_GREATER 0x0204 +#define GL_GREEN 0x1904 +#define GL_GREEN_INTEGER 0x8D95 +#define GL_HALF_FLOAT 0x140B +#define GL_HIGH_FLOAT 0x8DF2 +#define GL_HIGH_INT 0x8DF5 +#define GL_IMAGE_1D 0x904C +#define GL_IMAGE_1D_ARRAY 0x9052 +#define GL_IMAGE_2D 0x904D +#define GL_IMAGE_2D_ARRAY 0x9053 +#define GL_IMAGE_2D_MULTISAMPLE 0x9055 +#define GL_IMAGE_2D_MULTISAMPLE_ARRAY 0x9056 +#define GL_IMAGE_2D_RECT 0x904F +#define GL_IMAGE_3D 0x904E +#define GL_IMAGE_BINDING_ACCESS 0x8F3E +#define GL_IMAGE_BINDING_FORMAT 0x906E +#define GL_IMAGE_BINDING_LAYER 0x8F3D +#define GL_IMAGE_BINDING_LAYERED 0x8F3C +#define GL_IMAGE_BINDING_LEVEL 0x8F3B +#define GL_IMAGE_BINDING_NAME 0x8F3A +#define GL_IMAGE_BUFFER 0x9051 +#define GL_IMAGE_CLASS_10_10_10_2 0x82C3 +#define GL_IMAGE_CLASS_11_11_10 0x82C2 +#define GL_IMAGE_CLASS_1_X_16 0x82BE +#define GL_IMAGE_CLASS_1_X_32 0x82BB +#define GL_IMAGE_CLASS_1_X_8 0x82C1 +#define GL_IMAGE_CLASS_2_X_16 0x82BD +#define GL_IMAGE_CLASS_2_X_32 0x82BA +#define GL_IMAGE_CLASS_2_X_8 0x82C0 +#define GL_IMAGE_CLASS_4_X_16 0x82BC +#define GL_IMAGE_CLASS_4_X_32 0x82B9 +#define GL_IMAGE_CLASS_4_X_8 0x82BF +#define GL_IMAGE_COMPATIBILITY_CLASS 0x82A8 +#define GL_IMAGE_CUBE 0x9050 +#define GL_IMAGE_CUBE_MAP_ARRAY 0x9054 +#define GL_IMAGE_FORMAT_COMPATIBILITY_BY_CLASS 0x90C9 +#define GL_IMAGE_FORMAT_COMPATIBILITY_BY_SIZE 0x90C8 +#define GL_IMAGE_FORMAT_COMPATIBILITY_TYPE 0x90C7 +#define GL_IMAGE_PIXEL_FORMAT 0x82A9 +#define GL_IMAGE_PIXEL_TYPE 0x82AA +#define GL_IMAGE_TEXEL_SIZE 0x82A7 +#define GL_IMPLEMENTATION_COLOR_READ_FORMAT 0x8B9B +#define GL_IMPLEMENTATION_COLOR_READ_TYPE 0x8B9A +#define GL_INCR 0x1E02 +#define GL_INCR_WRAP 0x8507 +#define GL_INFO_LOG_LENGTH 0x8B84 +#define GL_INT 0x1404 +#define GL_INTERLEAVED_ATTRIBS 0x8C8C +#define GL_INTERNALFORMAT_ALPHA_SIZE 0x8274 +#define GL_INTERNALFORMAT_ALPHA_TYPE 0x827B +#define GL_INTERNALFORMAT_BLUE_SIZE 0x8273 +#define GL_INTERNALFORMAT_BLUE_TYPE 0x827A +#define GL_INTERNALFORMAT_DEPTH_SIZE 0x8275 +#define GL_INTERNALFORMAT_DEPTH_TYPE 0x827C +#define GL_INTERNALFORMAT_GREEN_SIZE 0x8272 +#define GL_INTERNALFORMAT_GREEN_TYPE 0x8279 +#define GL_INTERNALFORMAT_PREFERRED 0x8270 +#define GL_INTERNALFORMAT_RED_SIZE 0x8271 +#define GL_INTERNALFORMAT_RED_TYPE 0x8278 +#define GL_INTERNALFORMAT_SHARED_SIZE 0x8277 +#define GL_INTERNALFORMAT_STENCIL_SIZE 0x8276 +#define GL_INTERNALFORMAT_STENCIL_TYPE 0x827D +#define GL_INTERNALFORMAT_SUPPORTED 0x826F +#define GL_INT_2_10_10_10_REV 0x8D9F +#define GL_INT_IMAGE_1D 0x9057 +#define GL_INT_IMAGE_1D_ARRAY 0x905D +#define GL_INT_IMAGE_2D 0x9058 +#define GL_INT_IMAGE_2D_ARRAY 0x905E +#define GL_INT_IMAGE_2D_MULTISAMPLE 0x9060 +#define GL_INT_IMAGE_2D_MULTISAMPLE_ARRAY 0x9061 +#define GL_INT_IMAGE_2D_RECT 0x905A +#define GL_INT_IMAGE_3D 0x9059 +#define GL_INT_IMAGE_BUFFER 0x905C +#define GL_INT_IMAGE_CUBE 0x905B +#define GL_INT_IMAGE_CUBE_MAP_ARRAY 0x905F +#define GL_INT_SAMPLER_1D 0x8DC9 +#define GL_INT_SAMPLER_1D_ARRAY 0x8DCE +#define GL_INT_SAMPLER_2D 0x8DCA +#define GL_INT_SAMPLER_2D_ARRAY 0x8DCF +#define GL_INT_SAMPLER_2D_MULTISAMPLE 0x9109 +#define GL_INT_SAMPLER_2D_MULTISAMPLE_ARRAY 0x910C +#define GL_INT_SAMPLER_2D_RECT 0x8DCD +#define GL_INT_SAMPLER_3D 0x8DCB +#define GL_INT_SAMPLER_BUFFER 0x8DD0 +#define GL_INT_SAMPLER_CUBE 0x8DCC +#define GL_INT_SAMPLER_CUBE_MAP_ARRAY 0x900E +#define GL_INT_VEC2 0x8B53 +#define GL_INT_VEC3 0x8B54 +#define GL_INT_VEC4 0x8B55 +#define GL_INVALID_ENUM 0x0500 +#define GL_INVALID_FRAMEBUFFER_OPERATION 0x0506 +#define GL_INVALID_INDEX 0xFFFFFFFF +#define GL_INVALID_OPERATION 0x0502 +#define GL_INVALID_VALUE 0x0501 +#define GL_INVERT 0x150A +#define GL_ISOLINES 0x8E7A +#define GL_IS_PER_PATCH 0x92E7 +#define GL_IS_ROW_MAJOR 0x9300 +#define GL_KEEP 0x1E00 +#define GL_LAST_VERTEX_CONVENTION 0x8E4E +#define GL_LAYER_PROVOKING_VERTEX 0x825E +#define GL_LEFT 0x0406 +#define GL_LEQUAL 0x0203 +#define GL_LESS 0x0201 +#define GL_LINE 0x1B01 +#define GL_LINEAR 0x2601 +#define GL_LINEAR_MIPMAP_LINEAR 0x2703 +#define GL_LINEAR_MIPMAP_NEAREST 0x2701 +#define GL_LINES 0x0001 +#define GL_LINES_ADJACENCY 0x000A +#define GL_LINE_LOOP 0x0002 +#define GL_LINE_SMOOTH 0x0B20 +#define GL_LINE_SMOOTH_HINT 0x0C52 +#define GL_LINE_STRIP 0x0003 +#define GL_LINE_STRIP_ADJACENCY 0x000B +#define GL_LINE_WIDTH 0x0B21 +#define GL_LINE_WIDTH_GRANULARITY 0x0B23 +#define GL_LINE_WIDTH_RANGE 0x0B22 +#define GL_LINK_STATUS 0x8B82 +#define GL_LOCATION 0x930E +#define GL_LOCATION_INDEX 0x930F +#define GL_LOGIC_OP_MODE 0x0BF0 +#define GL_LOWER_LEFT 0x8CA1 +#define GL_LOW_FLOAT 0x8DF0 +#define GL_LOW_INT 0x8DF3 +#define GL_MAJOR_VERSION 0x821B +#define GL_MANUAL_GENERATE_MIPMAP 0x8294 +#define GL_MAP_FLUSH_EXPLICIT_BIT 0x0010 +#define GL_MAP_INVALIDATE_BUFFER_BIT 0x0008 +#define GL_MAP_INVALIDATE_RANGE_BIT 0x0004 +#define GL_MAP_READ_BIT 0x0001 +#define GL_MAP_UNSYNCHRONIZED_BIT 0x0020 +#define GL_MAP_WRITE_BIT 0x0002 +#define GL_MATRIX_STRIDE 0x92FF +#define GL_MAX 0x8008 +#define GL_MAX_3D_TEXTURE_SIZE 0x8073 +#define GL_MAX_ARRAY_TEXTURE_LAYERS 0x88FF +#define GL_MAX_ATOMIC_COUNTER_BUFFER_BINDINGS 0x92DC +#define GL_MAX_ATOMIC_COUNTER_BUFFER_SIZE 0x92D8 +#define GL_MAX_CLIP_DISTANCES 0x0D32 +#define GL_MAX_COLOR_ATTACHMENTS 0x8CDF +#define GL_MAX_COLOR_TEXTURE_SAMPLES 0x910E +#define GL_MAX_COMBINED_ATOMIC_COUNTERS 0x92D7 +#define GL_MAX_COMBINED_ATOMIC_COUNTER_BUFFERS 0x92D1 +#define GL_MAX_COMBINED_COMPUTE_UNIFORM_COMPONENTS 0x8266 +#define GL_MAX_COMBINED_DIMENSIONS 0x8282 +#define GL_MAX_COMBINED_FRAGMENT_UNIFORM_COMPONENTS 0x8A33 +#define GL_MAX_COMBINED_GEOMETRY_UNIFORM_COMPONENTS 0x8A32 +#define GL_MAX_COMBINED_IMAGE_UNIFORMS 0x90CF +#define GL_MAX_COMBINED_IMAGE_UNITS_AND_FRAGMENT_OUTPUTS 0x8F39 +#define GL_MAX_COMBINED_SHADER_OUTPUT_RESOURCES 0x8F39 +#define GL_MAX_COMBINED_SHADER_STORAGE_BLOCKS 0x90DC +#define GL_MAX_COMBINED_TESS_CONTROL_UNIFORM_COMPONENTS 0x8E1E +#define GL_MAX_COMBINED_TESS_EVALUATION_UNIFORM_COMPONENTS 0x8E1F +#define GL_MAX_COMBINED_TEXTURE_IMAGE_UNITS 0x8B4D +#define GL_MAX_COMBINED_UNIFORM_BLOCKS 0x8A2E +#define GL_MAX_COMBINED_VERTEX_UNIFORM_COMPONENTS 0x8A31 +#define GL_MAX_COMPUTE_ATOMIC_COUNTERS 0x8265 +#define GL_MAX_COMPUTE_ATOMIC_COUNTER_BUFFERS 0x8264 +#define GL_MAX_COMPUTE_IMAGE_UNIFORMS 0x91BD +#define GL_MAX_COMPUTE_SHADER_STORAGE_BLOCKS 0x90DB +#define GL_MAX_COMPUTE_SHARED_MEMORY_SIZE 0x8262 +#define GL_MAX_COMPUTE_TEXTURE_IMAGE_UNITS 0x91BC +#define GL_MAX_COMPUTE_UNIFORM_BLOCKS 0x91BB +#define GL_MAX_COMPUTE_UNIFORM_COMPONENTS 0x8263 +#define GL_MAX_COMPUTE_WORK_GROUP_COUNT 0x91BE +#define GL_MAX_COMPUTE_WORK_GROUP_INVOCATIONS 0x90EB +#define GL_MAX_COMPUTE_WORK_GROUP_SIZE 0x91BF +#define GL_MAX_CUBE_MAP_TEXTURE_SIZE 0x851C +#define GL_MAX_DEBUG_GROUP_STACK_DEPTH 0x826C +#define GL_MAX_DEBUG_LOGGED_MESSAGES 0x9144 +#define GL_MAX_DEBUG_MESSAGE_LENGTH 0x9143 +#define GL_MAX_DEPTH 0x8280 +#define GL_MAX_DEPTH_TEXTURE_SAMPLES 0x910F +#define GL_MAX_DRAW_BUFFERS 0x8824 +#define GL_MAX_DUAL_SOURCE_DRAW_BUFFERS 0x88FC +#define GL_MAX_ELEMENTS_INDICES 0x80E9 +#define GL_MAX_ELEMENTS_VERTICES 0x80E8 +#define GL_MAX_ELEMENT_INDEX 0x8D6B +#define GL_MAX_FRAGMENT_ATOMIC_COUNTERS 0x92D6 +#define GL_MAX_FRAGMENT_ATOMIC_COUNTER_BUFFERS 0x92D0 +#define GL_MAX_FRAGMENT_IMAGE_UNIFORMS 0x90CE +#define GL_MAX_FRAGMENT_INPUT_COMPONENTS 0x9125 +#define GL_MAX_FRAGMENT_INTERPOLATION_OFFSET 0x8E5C +#define GL_MAX_FRAGMENT_SHADER_STORAGE_BLOCKS 0x90DA +#define GL_MAX_FRAGMENT_UNIFORM_BLOCKS 0x8A2D +#define GL_MAX_FRAGMENT_UNIFORM_COMPONENTS 0x8B49 +#define GL_MAX_FRAGMENT_UNIFORM_VECTORS 0x8DFD +#define GL_MAX_FRAMEBUFFER_HEIGHT 0x9316 +#define GL_MAX_FRAMEBUFFER_LAYERS 0x9317 +#define GL_MAX_FRAMEBUFFER_SAMPLES 0x9318 +#define GL_MAX_FRAMEBUFFER_WIDTH 0x9315 +#define GL_MAX_GEOMETRY_ATOMIC_COUNTERS 0x92D5 +#define GL_MAX_GEOMETRY_ATOMIC_COUNTER_BUFFERS 0x92CF +#define GL_MAX_GEOMETRY_IMAGE_UNIFORMS 0x90CD +#define GL_MAX_GEOMETRY_INPUT_COMPONENTS 0x9123 +#define GL_MAX_GEOMETRY_OUTPUT_COMPONENTS 0x9124 +#define GL_MAX_GEOMETRY_OUTPUT_VERTICES 0x8DE0 +#define GL_MAX_GEOMETRY_SHADER_INVOCATIONS 0x8E5A +#define GL_MAX_GEOMETRY_SHADER_STORAGE_BLOCKS 0x90D7 +#define GL_MAX_GEOMETRY_TEXTURE_IMAGE_UNITS 0x8C29 +#define GL_MAX_GEOMETRY_TOTAL_OUTPUT_COMPONENTS 0x8DE1 +#define GL_MAX_GEOMETRY_UNIFORM_BLOCKS 0x8A2C +#define GL_MAX_GEOMETRY_UNIFORM_COMPONENTS 0x8DDF +#define GL_MAX_HEIGHT 0x827F +#define GL_MAX_IMAGE_SAMPLES 0x906D +#define GL_MAX_IMAGE_UNITS 0x8F38 +#define GL_MAX_INTEGER_SAMPLES 0x9110 +#define GL_MAX_LABEL_LENGTH 0x82E8 +#define GL_MAX_LAYERS 0x8281 +#define GL_MAX_NAME_LENGTH 0x92F6 +#define GL_MAX_NUM_ACTIVE_VARIABLES 0x92F7 +#define GL_MAX_NUM_COMPATIBLE_SUBROUTINES 0x92F8 +#define GL_MAX_PATCH_VERTICES 0x8E7D +#define GL_MAX_PROGRAM_TEXEL_OFFSET 0x8905 +#define GL_MAX_PROGRAM_TEXTURE_GATHER_OFFSET 0x8E5F +#define GL_MAX_RECTANGLE_TEXTURE_SIZE 0x84F8 +#define GL_MAX_RENDERBUFFER_SIZE 0x84E8 +#define GL_MAX_SAMPLES 0x8D57 +#define GL_MAX_SAMPLE_MASK_WORDS 0x8E59 +#define GL_MAX_SERVER_WAIT_TIMEOUT 0x9111 +#define GL_MAX_SHADER_STORAGE_BLOCK_SIZE 0x90DE +#define GL_MAX_SHADER_STORAGE_BUFFER_BINDINGS 0x90DD +#define GL_MAX_SUBROUTINES 0x8DE7 +#define GL_MAX_SUBROUTINE_UNIFORM_LOCATIONS 0x8DE8 +#define GL_MAX_TESS_CONTROL_ATOMIC_COUNTERS 0x92D3 +#define GL_MAX_TESS_CONTROL_ATOMIC_COUNTER_BUFFERS 0x92CD +#define GL_MAX_TESS_CONTROL_IMAGE_UNIFORMS 0x90CB +#define GL_MAX_TESS_CONTROL_INPUT_COMPONENTS 0x886C +#define GL_MAX_TESS_CONTROL_OUTPUT_COMPONENTS 0x8E83 +#define GL_MAX_TESS_CONTROL_SHADER_STORAGE_BLOCKS 0x90D8 +#define GL_MAX_TESS_CONTROL_TEXTURE_IMAGE_UNITS 0x8E81 +#define GL_MAX_TESS_CONTROL_TOTAL_OUTPUT_COMPONENTS 0x8E85 +#define GL_MAX_TESS_CONTROL_UNIFORM_BLOCKS 0x8E89 +#define GL_MAX_TESS_CONTROL_UNIFORM_COMPONENTS 0x8E7F +#define GL_MAX_TESS_EVALUATION_ATOMIC_COUNTERS 0x92D4 +#define GL_MAX_TESS_EVALUATION_ATOMIC_COUNTER_BUFFERS 0x92CE +#define GL_MAX_TESS_EVALUATION_IMAGE_UNIFORMS 0x90CC +#define GL_MAX_TESS_EVALUATION_INPUT_COMPONENTS 0x886D +#define GL_MAX_TESS_EVALUATION_OUTPUT_COMPONENTS 0x8E86 +#define GL_MAX_TESS_EVALUATION_SHADER_STORAGE_BLOCKS 0x90D9 +#define GL_MAX_TESS_EVALUATION_TEXTURE_IMAGE_UNITS 0x8E82 +#define GL_MAX_TESS_EVALUATION_UNIFORM_BLOCKS 0x8E8A +#define GL_MAX_TESS_EVALUATION_UNIFORM_COMPONENTS 0x8E80 +#define GL_MAX_TESS_GEN_LEVEL 0x8E7E +#define GL_MAX_TESS_PATCH_COMPONENTS 0x8E84 +#define GL_MAX_TEXTURE_BUFFER_SIZE 0x8C2B +#define GL_MAX_TEXTURE_IMAGE_UNITS 0x8872 +#define GL_MAX_TEXTURE_LOD_BIAS 0x84FD +#define GL_MAX_TEXTURE_SIZE 0x0D33 +#define GL_MAX_TRANSFORM_FEEDBACK_BUFFERS 0x8E70 +#define GL_MAX_TRANSFORM_FEEDBACK_INTERLEAVED_COMPONENTS 0x8C8A +#define GL_MAX_TRANSFORM_FEEDBACK_SEPARATE_ATTRIBS 0x8C8B +#define GL_MAX_TRANSFORM_FEEDBACK_SEPARATE_COMPONENTS 0x8C80 +#define GL_MAX_UNIFORM_BLOCK_SIZE 0x8A30 +#define GL_MAX_UNIFORM_BUFFER_BINDINGS 0x8A2F +#define GL_MAX_UNIFORM_LOCATIONS 0x826E +#define GL_MAX_VARYING_COMPONENTS 0x8B4B +#define GL_MAX_VARYING_FLOATS 0x8B4B +#define GL_MAX_VARYING_VECTORS 0x8DFC +#define GL_MAX_VERTEX_ATOMIC_COUNTERS 0x92D2 +#define GL_MAX_VERTEX_ATOMIC_COUNTER_BUFFERS 0x92CC +#define GL_MAX_VERTEX_ATTRIBS 0x8869 +#define GL_MAX_VERTEX_ATTRIB_BINDINGS 0x82DA +#define GL_MAX_VERTEX_ATTRIB_RELATIVE_OFFSET 0x82D9 +#define GL_MAX_VERTEX_IMAGE_UNIFORMS 0x90CA +#define GL_MAX_VERTEX_OUTPUT_COMPONENTS 0x9122 +#define GL_MAX_VERTEX_SHADER_STORAGE_BLOCKS 0x90D6 +#define GL_MAX_VERTEX_STREAMS 0x8E71 +#define GL_MAX_VERTEX_TEXTURE_IMAGE_UNITS 0x8B4C +#define GL_MAX_VERTEX_UNIFORM_BLOCKS 0x8A2B +#define GL_MAX_VERTEX_UNIFORM_COMPONENTS 0x8B4A +#define GL_MAX_VERTEX_UNIFORM_VECTORS 0x8DFB +#define GL_MAX_VIEWPORTS 0x825B +#define GL_MAX_VIEWPORT_DIMS 0x0D3A +#define GL_MAX_WIDTH 0x827E +#define GL_MEDIUM_FLOAT 0x8DF1 +#define GL_MEDIUM_INT 0x8DF4 +#define GL_MIN 0x8007 +#define GL_MINOR_VERSION 0x821C +#define GL_MIN_FRAGMENT_INTERPOLATION_OFFSET 0x8E5B +#define GL_MIN_MAP_BUFFER_ALIGNMENT 0x90BC +#define GL_MIN_PROGRAM_TEXEL_OFFSET 0x8904 +#define GL_MIN_PROGRAM_TEXTURE_GATHER_OFFSET 0x8E5E +#define GL_MIN_SAMPLE_SHADING_VALUE 0x8C37 +#define GL_MIPMAP 0x8293 +#define GL_MIRRORED_REPEAT 0x8370 +#define GL_MULTISAMPLE 0x809D +#define GL_NAME_LENGTH 0x92F9 +#define GL_NAND 0x150E +#define GL_NEAREST 0x2600 +#define GL_NEAREST_MIPMAP_LINEAR 0x2702 +#define GL_NEAREST_MIPMAP_NEAREST 0x2700 +#define GL_NEVER 0x0200 +#define GL_NICEST 0x1102 +#define GL_NONE 0 +#define GL_NOOP 0x1505 +#define GL_NOR 0x1508 +#define GL_NOTEQUAL 0x0205 +#define GL_NO_ERROR 0 +#define GL_NUM_ACTIVE_VARIABLES 0x9304 +#define GL_NUM_COMPATIBLE_SUBROUTINES 0x8E4A +#define GL_NUM_COMPRESSED_TEXTURE_FORMATS 0x86A2 +#define GL_NUM_EXTENSIONS 0x821D +#define GL_NUM_PROGRAM_BINARY_FORMATS 0x87FE +#define GL_NUM_SAMPLE_COUNTS 0x9380 +#define GL_NUM_SHADER_BINARY_FORMATS 0x8DF9 +#define GL_NUM_SHADING_LANGUAGE_VERSIONS 0x82E9 +#define GL_OBJECT_TYPE 0x9112 +#define GL_OFFSET 0x92FC +#define GL_ONE 1 +#define GL_ONE_MINUS_CONSTANT_ALPHA 0x8004 +#define GL_ONE_MINUS_CONSTANT_COLOR 0x8002 +#define GL_ONE_MINUS_DST_ALPHA 0x0305 +#define GL_ONE_MINUS_DST_COLOR 0x0307 +#define GL_ONE_MINUS_SRC1_ALPHA 0x88FB +#define GL_ONE_MINUS_SRC1_COLOR 0x88FA +#define GL_ONE_MINUS_SRC_ALPHA 0x0303 +#define GL_ONE_MINUS_SRC_COLOR 0x0301 +#define GL_OR 0x1507 +#define GL_OR_INVERTED 0x150D +#define GL_OR_REVERSE 0x150B +#define GL_OUT_OF_MEMORY 0x0505 +#define GL_PACK_ALIGNMENT 0x0D05 +#define GL_PACK_COMPRESSED_BLOCK_DEPTH 0x912D +#define GL_PACK_COMPRESSED_BLOCK_HEIGHT 0x912C +#define GL_PACK_COMPRESSED_BLOCK_SIZE 0x912E +#define GL_PACK_COMPRESSED_BLOCK_WIDTH 0x912B +#define GL_PACK_IMAGE_HEIGHT 0x806C +#define GL_PACK_LSB_FIRST 0x0D01 +#define GL_PACK_ROW_LENGTH 0x0D02 +#define GL_PACK_SKIP_IMAGES 0x806B +#define GL_PACK_SKIP_PIXELS 0x0D04 +#define GL_PACK_SKIP_ROWS 0x0D03 +#define GL_PACK_SWAP_BYTES 0x0D00 +#define GL_PATCHES 0x000E +#define GL_PATCH_DEFAULT_INNER_LEVEL 0x8E73 +#define GL_PATCH_DEFAULT_OUTER_LEVEL 0x8E74 +#define GL_PATCH_VERTICES 0x8E72 +#define GL_PIXEL_BUFFER_BARRIER_BIT 0x00000080 +#define GL_PIXEL_PACK_BUFFER 0x88EB +#define GL_PIXEL_PACK_BUFFER_BINDING 0x88ED +#define GL_PIXEL_UNPACK_BUFFER 0x88EC +#define GL_PIXEL_UNPACK_BUFFER_BINDING 0x88EF +#define GL_POINT 0x1B00 +#define GL_POINTS 0x0000 +#define GL_POINT_FADE_THRESHOLD_SIZE 0x8128 +#define GL_POINT_SIZE 0x0B11 +#define GL_POINT_SIZE_GRANULARITY 0x0B13 +#define GL_POINT_SIZE_RANGE 0x0B12 +#define GL_POINT_SPRITE_COORD_ORIGIN 0x8CA0 +#define GL_POLYGON_MODE 0x0B40 +#define GL_POLYGON_OFFSET_FACTOR 0x8038 +#define GL_POLYGON_OFFSET_FILL 0x8037 +#define GL_POLYGON_OFFSET_LINE 0x2A02 +#define GL_POLYGON_OFFSET_POINT 0x2A01 +#define GL_POLYGON_OFFSET_UNITS 0x2A00 +#define GL_POLYGON_SMOOTH 0x0B41 +#define GL_POLYGON_SMOOTH_HINT 0x0C53 +#define GL_PRIMITIVES_GENERATED 0x8C87 +#define GL_PRIMITIVE_RESTART 0x8F9D +#define GL_PRIMITIVE_RESTART_FIXED_INDEX 0x8D69 +#define GL_PRIMITIVE_RESTART_INDEX 0x8F9E +#define GL_PROGRAM 0x82E2 +#define GL_PROGRAM_BINARY_FORMATS 0x87FF +#define GL_PROGRAM_BINARY_LENGTH 0x8741 +#define GL_PROGRAM_BINARY_RETRIEVABLE_HINT 0x8257 +#define GL_PROGRAM_INPUT 0x92E3 +#define GL_PROGRAM_OUTPUT 0x92E4 +#define GL_PROGRAM_PIPELINE 0x82E4 +#define GL_PROGRAM_PIPELINE_BINDING 0x825A +#define GL_PROGRAM_POINT_SIZE 0x8642 +#define GL_PROGRAM_SEPARABLE 0x8258 +#define GL_PROVOKING_VERTEX 0x8E4F +#define GL_PROXY_TEXTURE_1D 0x8063 +#define GL_PROXY_TEXTURE_1D_ARRAY 0x8C19 +#define GL_PROXY_TEXTURE_2D 0x8064 +#define GL_PROXY_TEXTURE_2D_ARRAY 0x8C1B +#define GL_PROXY_TEXTURE_2D_MULTISAMPLE 0x9101 +#define GL_PROXY_TEXTURE_2D_MULTISAMPLE_ARRAY 0x9103 +#define GL_PROXY_TEXTURE_3D 0x8070 +#define GL_PROXY_TEXTURE_CUBE_MAP 0x851B +#define GL_PROXY_TEXTURE_CUBE_MAP_ARRAY 0x900B +#define GL_PROXY_TEXTURE_RECTANGLE 0x84F7 +#define GL_QUADS 0x0007 +#define GL_QUADS_FOLLOW_PROVOKING_VERTEX_CONVENTION 0x8E4C +#define GL_QUERY 0x82E3 +#define GL_QUERY_BY_REGION_NO_WAIT 0x8E16 +#define GL_QUERY_BY_REGION_WAIT 0x8E15 +#define GL_QUERY_COUNTER_BITS 0x8864 +#define GL_QUERY_NO_WAIT 0x8E14 +#define GL_QUERY_RESULT 0x8866 +#define GL_QUERY_RESULT_AVAILABLE 0x8867 +#define GL_QUERY_WAIT 0x8E13 +#define GL_R11F_G11F_B10F 0x8C3A +#define GL_R16 0x822A +#define GL_R16F 0x822D +#define GL_R16I 0x8233 +#define GL_R16UI 0x8234 +#define GL_R16_SNORM 0x8F98 +#define GL_R32F 0x822E +#define GL_R32I 0x8235 +#define GL_R32UI 0x8236 +#define GL_R3_G3_B2 0x2A10 +#define GL_R8 0x8229 +#define GL_R8I 0x8231 +#define GL_R8UI 0x8232 +#define GL_R8_SNORM 0x8F94 +#define GL_RASTERIZER_DISCARD 0x8C89 +#define GL_READ_BUFFER 0x0C02 +#define GL_READ_FRAMEBUFFER 0x8CA8 +#define GL_READ_FRAMEBUFFER_BINDING 0x8CAA +#define GL_READ_ONLY 0x88B8 +#define GL_READ_PIXELS 0x828C +#define GL_READ_PIXELS_FORMAT 0x828D +#define GL_READ_PIXELS_TYPE 0x828E +#define GL_READ_WRITE 0x88BA +#define GL_RED 0x1903 +#define GL_RED_INTEGER 0x8D94 +#define GL_REFERENCED_BY_COMPUTE_SHADER 0x930B +#define GL_REFERENCED_BY_FRAGMENT_SHADER 0x930A +#define GL_REFERENCED_BY_GEOMETRY_SHADER 0x9309 +#define GL_REFERENCED_BY_TESS_CONTROL_SHADER 0x9307 +#define GL_REFERENCED_BY_TESS_EVALUATION_SHADER 0x9308 +#define GL_REFERENCED_BY_VERTEX_SHADER 0x9306 +#define GL_RENDERBUFFER 0x8D41 +#define GL_RENDERBUFFER_ALPHA_SIZE 0x8D53 +#define GL_RENDERBUFFER_BINDING 0x8CA7 +#define GL_RENDERBUFFER_BLUE_SIZE 0x8D52 +#define GL_RENDERBUFFER_DEPTH_SIZE 0x8D54 +#define GL_RENDERBUFFER_GREEN_SIZE 0x8D51 +#define GL_RENDERBUFFER_HEIGHT 0x8D43 +#define GL_RENDERBUFFER_INTERNAL_FORMAT 0x8D44 +#define GL_RENDERBUFFER_RED_SIZE 0x8D50 +#define GL_RENDERBUFFER_SAMPLES 0x8CAB +#define GL_RENDERBUFFER_STENCIL_SIZE 0x8D55 +#define GL_RENDERBUFFER_WIDTH 0x8D42 +#define GL_RENDERER 0x1F01 +#define GL_REPEAT 0x2901 +#define GL_REPLACE 0x1E01 +#define GL_RG 0x8227 +#define GL_RG16 0x822C +#define GL_RG16F 0x822F +#define GL_RG16I 0x8239 +#define GL_RG16UI 0x823A +#define GL_RG16_SNORM 0x8F99 +#define GL_RG32F 0x8230 +#define GL_RG32I 0x823B +#define GL_RG32UI 0x823C +#define GL_RG8 0x822B +#define GL_RG8I 0x8237 +#define GL_RG8UI 0x8238 +#define GL_RG8_SNORM 0x8F95 +#define GL_RGB 0x1907 +#define GL_RGB10 0x8052 +#define GL_RGB10_A2 0x8059 +#define GL_RGB10_A2UI 0x906F +#define GL_RGB12 0x8053 +#define GL_RGB16 0x8054 +#define GL_RGB16F 0x881B +#define GL_RGB16I 0x8D89 +#define GL_RGB16UI 0x8D77 +#define GL_RGB16_SNORM 0x8F9A +#define GL_RGB32F 0x8815 +#define GL_RGB32I 0x8D83 +#define GL_RGB32UI 0x8D71 +#define GL_RGB4 0x804F +#define GL_RGB5 0x8050 +#define GL_RGB565 0x8D62 +#define GL_RGB5_A1 0x8057 +#define GL_RGB8 0x8051 +#define GL_RGB8I 0x8D8F +#define GL_RGB8UI 0x8D7D +#define GL_RGB8_SNORM 0x8F96 +#define GL_RGB9_E5 0x8C3D +#define GL_RGBA 0x1908 +#define GL_RGBA12 0x805A +#define GL_RGBA16 0x805B +#define GL_RGBA16F 0x881A +#define GL_RGBA16I 0x8D88 +#define GL_RGBA16UI 0x8D76 +#define GL_RGBA16_SNORM 0x8F9B +#define GL_RGBA2 0x8055 +#define GL_RGBA32F 0x8814 +#define GL_RGBA32I 0x8D82 +#define GL_RGBA32UI 0x8D70 +#define GL_RGBA4 0x8056 +#define GL_RGBA8 0x8058 +#define GL_RGBA8I 0x8D8E +#define GL_RGBA8UI 0x8D7C +#define GL_RGBA8_SNORM 0x8F97 +#define GL_RGBA_INTEGER 0x8D99 +#define GL_RGB_INTEGER 0x8D98 +#define GL_RG_INTEGER 0x8228 +#define GL_RIGHT 0x0407 +#define GL_SAMPLER 0x82E6 +#define GL_SAMPLER_1D 0x8B5D +#define GL_SAMPLER_1D_ARRAY 0x8DC0 +#define GL_SAMPLER_1D_ARRAY_SHADOW 0x8DC3 +#define GL_SAMPLER_1D_SHADOW 0x8B61 +#define GL_SAMPLER_2D 0x8B5E +#define GL_SAMPLER_2D_ARRAY 0x8DC1 +#define GL_SAMPLER_2D_ARRAY_SHADOW 0x8DC4 +#define GL_SAMPLER_2D_MULTISAMPLE 0x9108 +#define GL_SAMPLER_2D_MULTISAMPLE_ARRAY 0x910B +#define GL_SAMPLER_2D_RECT 0x8B63 +#define GL_SAMPLER_2D_RECT_SHADOW 0x8B64 +#define GL_SAMPLER_2D_SHADOW 0x8B62 +#define GL_SAMPLER_3D 0x8B5F +#define GL_SAMPLER_BINDING 0x8919 +#define GL_SAMPLER_BUFFER 0x8DC2 +#define GL_SAMPLER_CUBE 0x8B60 +#define GL_SAMPLER_CUBE_MAP_ARRAY 0x900C +#define GL_SAMPLER_CUBE_MAP_ARRAY_SHADOW 0x900D +#define GL_SAMPLER_CUBE_SHADOW 0x8DC5 +#define GL_SAMPLES 0x80A9 +#define GL_SAMPLES_PASSED 0x8914 +#define GL_SAMPLE_ALPHA_TO_COVERAGE 0x809E +#define GL_SAMPLE_ALPHA_TO_ONE 0x809F +#define GL_SAMPLE_BUFFERS 0x80A8 +#define GL_SAMPLE_COVERAGE 0x80A0 +#define GL_SAMPLE_COVERAGE_INVERT 0x80AB +#define GL_SAMPLE_COVERAGE_VALUE 0x80AA +#define GL_SAMPLE_MASK 0x8E51 +#define GL_SAMPLE_MASK_VALUE 0x8E52 +#define GL_SAMPLE_POSITION 0x8E50 +#define GL_SAMPLE_SHADING 0x8C36 +#define GL_SCISSOR_BOX 0x0C10 +#define GL_SCISSOR_TEST 0x0C11 +#define GL_SEPARATE_ATTRIBS 0x8C8D +#define GL_SET 0x150F +#define GL_SHADER 0x82E1 +#define GL_SHADER_BINARY_FORMATS 0x8DF8 +#define GL_SHADER_COMPILER 0x8DFA +#define GL_SHADER_IMAGE_ACCESS_BARRIER_BIT 0x00000020 +#define GL_SHADER_IMAGE_ATOMIC 0x82A6 +#define GL_SHADER_IMAGE_LOAD 0x82A4 +#define GL_SHADER_IMAGE_STORE 0x82A5 +#define GL_SHADER_SOURCE_LENGTH 0x8B88 +#define GL_SHADER_STORAGE_BARRIER_BIT 0x00002000 +#define GL_SHADER_STORAGE_BLOCK 0x92E6 +#define GL_SHADER_STORAGE_BUFFER 0x90D2 +#define GL_SHADER_STORAGE_BUFFER_BINDING 0x90D3 +#define GL_SHADER_STORAGE_BUFFER_OFFSET_ALIGNMENT 0x90DF +#define GL_SHADER_STORAGE_BUFFER_SIZE 0x90D5 +#define GL_SHADER_STORAGE_BUFFER_START 0x90D4 +#define GL_SHADER_TYPE 0x8B4F +#define GL_SHADING_LANGUAGE_VERSION 0x8B8C +#define GL_SHORT 0x1402 +#define GL_SIGNALED 0x9119 +#define GL_SIGNED_NORMALIZED 0x8F9C +#define GL_SIMULTANEOUS_TEXTURE_AND_DEPTH_TEST 0x82AC +#define GL_SIMULTANEOUS_TEXTURE_AND_DEPTH_WRITE 0x82AE +#define GL_SIMULTANEOUS_TEXTURE_AND_STENCIL_TEST 0x82AD +#define GL_SIMULTANEOUS_TEXTURE_AND_STENCIL_WRITE 0x82AF +#define GL_SMOOTH_LINE_WIDTH_GRANULARITY 0x0B23 +#define GL_SMOOTH_LINE_WIDTH_RANGE 0x0B22 +#define GL_SMOOTH_POINT_SIZE_GRANULARITY 0x0B13 +#define GL_SMOOTH_POINT_SIZE_RANGE 0x0B12 +#define GL_SRC1_ALPHA 0x8589 +#define GL_SRC1_COLOR 0x88F9 +#define GL_SRC_ALPHA 0x0302 +#define GL_SRC_ALPHA_SATURATE 0x0308 +#define GL_SRC_COLOR 0x0300 +#define GL_SRGB 0x8C40 +#define GL_SRGB8 0x8C41 +#define GL_SRGB8_ALPHA8 0x8C43 +#define GL_SRGB_ALPHA 0x8C42 +#define GL_SRGB_READ 0x8297 +#define GL_SRGB_WRITE 0x8298 +#define GL_STACK_OVERFLOW 0x0503 +#define GL_STACK_UNDERFLOW 0x0504 +#define GL_STATIC_COPY 0x88E6 +#define GL_STATIC_DRAW 0x88E4 +#define GL_STATIC_READ 0x88E5 +#define GL_STENCIL 0x1802 +#define GL_STENCIL_ATTACHMENT 0x8D20 +#define GL_STENCIL_BACK_FAIL 0x8801 +#define GL_STENCIL_BACK_FUNC 0x8800 +#define GL_STENCIL_BACK_PASS_DEPTH_FAIL 0x8802 +#define GL_STENCIL_BACK_PASS_DEPTH_PASS 0x8803 +#define GL_STENCIL_BACK_REF 0x8CA3 +#define GL_STENCIL_BACK_VALUE_MASK 0x8CA4 +#define GL_STENCIL_BACK_WRITEMASK 0x8CA5 +#define GL_STENCIL_BUFFER_BIT 0x00000400 +#define GL_STENCIL_CLEAR_VALUE 0x0B91 +#define GL_STENCIL_COMPONENTS 0x8285 +#define GL_STENCIL_FAIL 0x0B94 +#define GL_STENCIL_FUNC 0x0B92 +#define GL_STENCIL_INDEX 0x1901 +#define GL_STENCIL_INDEX1 0x8D46 +#define GL_STENCIL_INDEX16 0x8D49 +#define GL_STENCIL_INDEX4 0x8D47 +#define GL_STENCIL_INDEX8 0x8D48 +#define GL_STENCIL_PASS_DEPTH_FAIL 0x0B95 +#define GL_STENCIL_PASS_DEPTH_PASS 0x0B96 +#define GL_STENCIL_REF 0x0B97 +#define GL_STENCIL_RENDERABLE 0x8288 +#define GL_STENCIL_TEST 0x0B90 +#define GL_STENCIL_VALUE_MASK 0x0B93 +#define GL_STENCIL_WRITEMASK 0x0B98 +#define GL_STEREO 0x0C33 +#define GL_STREAM_COPY 0x88E2 +#define GL_STREAM_DRAW 0x88E0 +#define GL_STREAM_READ 0x88E1 +#define GL_SUBPIXEL_BITS 0x0D50 +#define GL_SYNC_CONDITION 0x9113 +#define GL_SYNC_FENCE 0x9116 +#define GL_SYNC_FLAGS 0x9115 +#define GL_SYNC_FLUSH_COMMANDS_BIT 0x00000001 +#define GL_SYNC_GPU_COMMANDS_COMPLETE 0x9117 +#define GL_SYNC_STATUS 0x9114 +#define GL_TESS_CONTROL_OUTPUT_VERTICES 0x8E75 +#define GL_TESS_CONTROL_SHADER 0x8E88 +#define GL_TESS_CONTROL_SHADER_BIT 0x00000008 +#define GL_TESS_CONTROL_SUBROUTINE 0x92E9 +#define GL_TESS_CONTROL_SUBROUTINE_UNIFORM 0x92EF +#define GL_TESS_CONTROL_TEXTURE 0x829C +#define GL_TESS_EVALUATION_SHADER 0x8E87 +#define GL_TESS_EVALUATION_SHADER_BIT 0x00000010 +#define GL_TESS_EVALUATION_SUBROUTINE 0x92EA +#define GL_TESS_EVALUATION_SUBROUTINE_UNIFORM 0x92F0 +#define GL_TESS_EVALUATION_TEXTURE 0x829D +#define GL_TESS_GEN_MODE 0x8E76 +#define GL_TESS_GEN_POINT_MODE 0x8E79 +#define GL_TESS_GEN_SPACING 0x8E77 +#define GL_TESS_GEN_VERTEX_ORDER 0x8E78 +#define GL_TEXTURE 0x1702 +#define GL_TEXTURE0 0x84C0 +#define GL_TEXTURE1 0x84C1 +#define GL_TEXTURE10 0x84CA +#define GL_TEXTURE11 0x84CB +#define GL_TEXTURE12 0x84CC +#define GL_TEXTURE13 0x84CD +#define GL_TEXTURE14 0x84CE +#define GL_TEXTURE15 0x84CF +#define GL_TEXTURE16 0x84D0 +#define GL_TEXTURE17 0x84D1 +#define GL_TEXTURE18 0x84D2 +#define GL_TEXTURE19 0x84D3 +#define GL_TEXTURE2 0x84C2 +#define GL_TEXTURE20 0x84D4 +#define GL_TEXTURE21 0x84D5 +#define GL_TEXTURE22 0x84D6 +#define GL_TEXTURE23 0x84D7 +#define GL_TEXTURE24 0x84D8 +#define GL_TEXTURE25 0x84D9 +#define GL_TEXTURE26 0x84DA +#define GL_TEXTURE27 0x84DB +#define GL_TEXTURE28 0x84DC +#define GL_TEXTURE29 0x84DD +#define GL_TEXTURE3 0x84C3 +#define GL_TEXTURE30 0x84DE +#define GL_TEXTURE31 0x84DF +#define GL_TEXTURE4 0x84C4 +#define GL_TEXTURE5 0x84C5 +#define GL_TEXTURE6 0x84C6 +#define GL_TEXTURE7 0x84C7 +#define GL_TEXTURE8 0x84C8 +#define GL_TEXTURE9 0x84C9 +#define GL_TEXTURE_1D 0x0DE0 +#define GL_TEXTURE_1D_ARRAY 0x8C18 +#define GL_TEXTURE_2D 0x0DE1 +#define GL_TEXTURE_2D_ARRAY 0x8C1A +#define GL_TEXTURE_2D_MULTISAMPLE 0x9100 +#define GL_TEXTURE_2D_MULTISAMPLE_ARRAY 0x9102 +#define GL_TEXTURE_3D 0x806F +#define GL_TEXTURE_ALPHA_SIZE 0x805F +#define GL_TEXTURE_ALPHA_TYPE 0x8C13 +#define GL_TEXTURE_BASE_LEVEL 0x813C +#define GL_TEXTURE_BINDING_1D 0x8068 +#define GL_TEXTURE_BINDING_1D_ARRAY 0x8C1C +#define GL_TEXTURE_BINDING_2D 0x8069 +#define GL_TEXTURE_BINDING_2D_ARRAY 0x8C1D +#define GL_TEXTURE_BINDING_2D_MULTISAMPLE 0x9104 +#define GL_TEXTURE_BINDING_2D_MULTISAMPLE_ARRAY 0x9105 +#define GL_TEXTURE_BINDING_3D 0x806A +#define GL_TEXTURE_BINDING_BUFFER 0x8C2C +#define GL_TEXTURE_BINDING_CUBE_MAP 0x8514 +#define GL_TEXTURE_BINDING_CUBE_MAP_ARRAY 0x900A +#define GL_TEXTURE_BINDING_RECTANGLE 0x84F6 +#define GL_TEXTURE_BLUE_SIZE 0x805E +#define GL_TEXTURE_BLUE_TYPE 0x8C12 +#define GL_TEXTURE_BORDER_COLOR 0x1004 +#define GL_TEXTURE_BUFFER 0x8C2A +#define GL_TEXTURE_BUFFER_DATA_STORE_BINDING 0x8C2D +#define GL_TEXTURE_BUFFER_OFFSET 0x919D +#define GL_TEXTURE_BUFFER_OFFSET_ALIGNMENT 0x919F +#define GL_TEXTURE_BUFFER_SIZE 0x919E +#define GL_TEXTURE_COMPARE_FUNC 0x884D +#define GL_TEXTURE_COMPARE_MODE 0x884C +#define GL_TEXTURE_COMPRESSED 0x86A1 +#define GL_TEXTURE_COMPRESSED_BLOCK_HEIGHT 0x82B2 +#define GL_TEXTURE_COMPRESSED_BLOCK_SIZE 0x82B3 +#define GL_TEXTURE_COMPRESSED_BLOCK_WIDTH 0x82B1 +#define GL_TEXTURE_COMPRESSED_IMAGE_SIZE 0x86A0 +#define GL_TEXTURE_COMPRESSION_HINT 0x84EF +#define GL_TEXTURE_CUBE_MAP 0x8513 +#define GL_TEXTURE_CUBE_MAP_ARRAY 0x9009 +#define GL_TEXTURE_CUBE_MAP_NEGATIVE_X 0x8516 +#define GL_TEXTURE_CUBE_MAP_NEGATIVE_Y 0x8518 +#define GL_TEXTURE_CUBE_MAP_NEGATIVE_Z 0x851A +#define GL_TEXTURE_CUBE_MAP_POSITIVE_X 0x8515 +#define GL_TEXTURE_CUBE_MAP_POSITIVE_Y 0x8517 +#define GL_TEXTURE_CUBE_MAP_POSITIVE_Z 0x8519 +#define GL_TEXTURE_CUBE_MAP_SEAMLESS 0x884F +#define GL_TEXTURE_DEPTH 0x8071 +#define GL_TEXTURE_DEPTH_SIZE 0x884A +#define GL_TEXTURE_DEPTH_TYPE 0x8C16 +#define GL_TEXTURE_FETCH_BARRIER_BIT 0x00000008 +#define GL_TEXTURE_FIXED_SAMPLE_LOCATIONS 0x9107 +#define GL_TEXTURE_GATHER 0x82A2 +#define GL_TEXTURE_GATHER_SHADOW 0x82A3 +#define GL_TEXTURE_GREEN_SIZE 0x805D +#define GL_TEXTURE_GREEN_TYPE 0x8C11 +#define GL_TEXTURE_HEIGHT 0x1001 +#define GL_TEXTURE_IMAGE_FORMAT 0x828F +#define GL_TEXTURE_IMAGE_TYPE 0x8290 +#define GL_TEXTURE_IMMUTABLE_FORMAT 0x912F +#define GL_TEXTURE_IMMUTABLE_LEVELS 0x82DF +#define GL_TEXTURE_INTERNAL_FORMAT 0x1003 +#define GL_TEXTURE_LOD_BIAS 0x8501 +#define GL_TEXTURE_MAG_FILTER 0x2800 +#define GL_TEXTURE_MAX_LEVEL 0x813D +#define GL_TEXTURE_MAX_LOD 0x813B +#define GL_TEXTURE_MIN_FILTER 0x2801 +#define GL_TEXTURE_MIN_LOD 0x813A +#define GL_TEXTURE_RECTANGLE 0x84F5 +#define GL_TEXTURE_RED_SIZE 0x805C +#define GL_TEXTURE_RED_TYPE 0x8C10 +#define GL_TEXTURE_SAMPLES 0x9106 +#define GL_TEXTURE_SHADOW 0x82A1 +#define GL_TEXTURE_SHARED_SIZE 0x8C3F +#define GL_TEXTURE_STENCIL_SIZE 0x88F1 +#define GL_TEXTURE_SWIZZLE_A 0x8E45 +#define GL_TEXTURE_SWIZZLE_B 0x8E44 +#define GL_TEXTURE_SWIZZLE_G 0x8E43 +#define GL_TEXTURE_SWIZZLE_R 0x8E42 +#define GL_TEXTURE_SWIZZLE_RGBA 0x8E46 +#define GL_TEXTURE_UPDATE_BARRIER_BIT 0x00000100 +#define GL_TEXTURE_VIEW 0x82B5 +#define GL_TEXTURE_VIEW_MIN_LAYER 0x82DD +#define GL_TEXTURE_VIEW_MIN_LEVEL 0x82DB +#define GL_TEXTURE_VIEW_NUM_LAYERS 0x82DE +#define GL_TEXTURE_VIEW_NUM_LEVELS 0x82DC +#define GL_TEXTURE_WIDTH 0x1000 +#define GL_TEXTURE_WRAP_R 0x8072 +#define GL_TEXTURE_WRAP_S 0x2802 +#define GL_TEXTURE_WRAP_T 0x2803 +#define GL_TIMEOUT_EXPIRED 0x911B +#define GL_TIMEOUT_IGNORED 0xFFFFFFFFFFFFFFFF +#define GL_TIMESTAMP 0x8E28 +#define GL_TIME_ELAPSED 0x88BF +#define GL_TOP_LEVEL_ARRAY_SIZE 0x930C +#define GL_TOP_LEVEL_ARRAY_STRIDE 0x930D +#define GL_TRANSFORM_FEEDBACK 0x8E22 +#define GL_TRANSFORM_FEEDBACK_ACTIVE 0x8E24 +#define GL_TRANSFORM_FEEDBACK_BARRIER_BIT 0x00000800 +#define GL_TRANSFORM_FEEDBACK_BINDING 0x8E25 +#define GL_TRANSFORM_FEEDBACK_BUFFER 0x8C8E +#define GL_TRANSFORM_FEEDBACK_BUFFER_ACTIVE 0x8E24 +#define GL_TRANSFORM_FEEDBACK_BUFFER_BINDING 0x8C8F +#define GL_TRANSFORM_FEEDBACK_BUFFER_MODE 0x8C7F +#define GL_TRANSFORM_FEEDBACK_BUFFER_PAUSED 0x8E23 +#define GL_TRANSFORM_FEEDBACK_BUFFER_SIZE 0x8C85 +#define GL_TRANSFORM_FEEDBACK_BUFFER_START 0x8C84 +#define GL_TRANSFORM_FEEDBACK_PAUSED 0x8E23 +#define GL_TRANSFORM_FEEDBACK_PRIMITIVES_WRITTEN 0x8C88 +#define GL_TRANSFORM_FEEDBACK_VARYING 0x92F4 +#define GL_TRANSFORM_FEEDBACK_VARYINGS 0x8C83 +#define GL_TRANSFORM_FEEDBACK_VARYING_MAX_LENGTH 0x8C76 +#define GL_TRIANGLES 0x0004 +#define GL_TRIANGLES_ADJACENCY 0x000C +#define GL_TRIANGLE_FAN 0x0006 +#define GL_TRIANGLE_STRIP 0x0005 +#define GL_TRIANGLE_STRIP_ADJACENCY 0x000D +#define GL_TRUE 1 +#define GL_TYPE 0x92FA +#define GL_UNDEFINED_VERTEX 0x8260 +#define GL_UNIFORM 0x92E1 +#define GL_UNIFORM_ARRAY_STRIDE 0x8A3C +#define GL_UNIFORM_ATOMIC_COUNTER_BUFFER_INDEX 0x92DA +#define GL_UNIFORM_BARRIER_BIT 0x00000004 +#define GL_UNIFORM_BLOCK 0x92E2 +#define GL_UNIFORM_BLOCK_ACTIVE_UNIFORMS 0x8A42 +#define GL_UNIFORM_BLOCK_ACTIVE_UNIFORM_INDICES 0x8A43 +#define GL_UNIFORM_BLOCK_BINDING 0x8A3F +#define GL_UNIFORM_BLOCK_DATA_SIZE 0x8A40 +#define GL_UNIFORM_BLOCK_INDEX 0x8A3A +#define GL_UNIFORM_BLOCK_NAME_LENGTH 0x8A41 +#define GL_UNIFORM_BLOCK_REFERENCED_BY_COMPUTE_SHADER 0x90EC +#define GL_UNIFORM_BLOCK_REFERENCED_BY_FRAGMENT_SHADER 0x8A46 +#define GL_UNIFORM_BLOCK_REFERENCED_BY_GEOMETRY_SHADER 0x8A45 +#define GL_UNIFORM_BLOCK_REFERENCED_BY_TESS_CONTROL_SHADER 0x84F0 +#define GL_UNIFORM_BLOCK_REFERENCED_BY_TESS_EVALUATION_SHADER 0x84F1 +#define GL_UNIFORM_BLOCK_REFERENCED_BY_VERTEX_SHADER 0x8A44 +#define GL_UNIFORM_BUFFER 0x8A11 +#define GL_UNIFORM_BUFFER_BINDING 0x8A28 +#define GL_UNIFORM_BUFFER_OFFSET_ALIGNMENT 0x8A34 +#define GL_UNIFORM_BUFFER_SIZE 0x8A2A +#define GL_UNIFORM_BUFFER_START 0x8A29 +#define GL_UNIFORM_IS_ROW_MAJOR 0x8A3E +#define GL_UNIFORM_MATRIX_STRIDE 0x8A3D +#define GL_UNIFORM_NAME_LENGTH 0x8A39 +#define GL_UNIFORM_OFFSET 0x8A3B +#define GL_UNIFORM_SIZE 0x8A38 +#define GL_UNIFORM_TYPE 0x8A37 +#define GL_UNPACK_ALIGNMENT 0x0CF5 +#define GL_UNPACK_COMPRESSED_BLOCK_DEPTH 0x9129 +#define GL_UNPACK_COMPRESSED_BLOCK_HEIGHT 0x9128 +#define GL_UNPACK_COMPRESSED_BLOCK_SIZE 0x912A +#define GL_UNPACK_COMPRESSED_BLOCK_WIDTH 0x9127 +#define GL_UNPACK_IMAGE_HEIGHT 0x806E +#define GL_UNPACK_LSB_FIRST 0x0CF1 +#define GL_UNPACK_ROW_LENGTH 0x0CF2 +#define GL_UNPACK_SKIP_IMAGES 0x806D +#define GL_UNPACK_SKIP_PIXELS 0x0CF4 +#define GL_UNPACK_SKIP_ROWS 0x0CF3 +#define GL_UNPACK_SWAP_BYTES 0x0CF0 +#define GL_UNSIGNALED 0x9118 +#define GL_UNSIGNED_BYTE 0x1401 +#define GL_UNSIGNED_BYTE_2_3_3_REV 0x8362 +#define GL_UNSIGNED_BYTE_3_3_2 0x8032 +#define GL_UNSIGNED_INT 0x1405 +#define GL_UNSIGNED_INT_10F_11F_11F_REV 0x8C3B +#define GL_UNSIGNED_INT_10_10_10_2 0x8036 +#define GL_UNSIGNED_INT_24_8 0x84FA +#define GL_UNSIGNED_INT_2_10_10_10_REV 0x8368 +#define GL_UNSIGNED_INT_5_9_9_9_REV 0x8C3E +#define GL_UNSIGNED_INT_8_8_8_8 0x8035 +#define GL_UNSIGNED_INT_8_8_8_8_REV 0x8367 +#define GL_UNSIGNED_INT_ATOMIC_COUNTER 0x92DB +#define GL_UNSIGNED_INT_IMAGE_1D 0x9062 +#define GL_UNSIGNED_INT_IMAGE_1D_ARRAY 0x9068 +#define GL_UNSIGNED_INT_IMAGE_2D 0x9063 +#define GL_UNSIGNED_INT_IMAGE_2D_ARRAY 0x9069 +#define GL_UNSIGNED_INT_IMAGE_2D_MULTISAMPLE 0x906B +#define GL_UNSIGNED_INT_IMAGE_2D_MULTISAMPLE_ARRAY 0x906C +#define GL_UNSIGNED_INT_IMAGE_2D_RECT 0x9065 +#define GL_UNSIGNED_INT_IMAGE_3D 0x9064 +#define GL_UNSIGNED_INT_IMAGE_BUFFER 0x9067 +#define GL_UNSIGNED_INT_IMAGE_CUBE 0x9066 +#define GL_UNSIGNED_INT_IMAGE_CUBE_MAP_ARRAY 0x906A +#define GL_UNSIGNED_INT_SAMPLER_1D 0x8DD1 +#define GL_UNSIGNED_INT_SAMPLER_1D_ARRAY 0x8DD6 +#define GL_UNSIGNED_INT_SAMPLER_2D 0x8DD2 +#define GL_UNSIGNED_INT_SAMPLER_2D_ARRAY 0x8DD7 +#define GL_UNSIGNED_INT_SAMPLER_2D_MULTISAMPLE 0x910A +#define GL_UNSIGNED_INT_SAMPLER_2D_MULTISAMPLE_ARRAY 0x910D +#define GL_UNSIGNED_INT_SAMPLER_2D_RECT 0x8DD5 +#define GL_UNSIGNED_INT_SAMPLER_3D 0x8DD3 +#define GL_UNSIGNED_INT_SAMPLER_BUFFER 0x8DD8 +#define GL_UNSIGNED_INT_SAMPLER_CUBE 0x8DD4 +#define GL_UNSIGNED_INT_SAMPLER_CUBE_MAP_ARRAY 0x900F +#define GL_UNSIGNED_INT_VEC2 0x8DC6 +#define GL_UNSIGNED_INT_VEC3 0x8DC7 +#define GL_UNSIGNED_INT_VEC4 0x8DC8 +#define GL_UNSIGNED_NORMALIZED 0x8C17 +#define GL_UNSIGNED_SHORT 0x1403 +#define GL_UNSIGNED_SHORT_1_5_5_5_REV 0x8366 +#define GL_UNSIGNED_SHORT_4_4_4_4 0x8033 +#define GL_UNSIGNED_SHORT_4_4_4_4_REV 0x8365 +#define GL_UNSIGNED_SHORT_5_5_5_1 0x8034 +#define GL_UNSIGNED_SHORT_5_6_5 0x8363 +#define GL_UNSIGNED_SHORT_5_6_5_REV 0x8364 +#define GL_UPPER_LEFT 0x8CA2 +#define GL_VALIDATE_STATUS 0x8B83 +#define GL_VENDOR 0x1F00 +#define GL_VERSION 0x1F02 +#define GL_VERTEX_ARRAY 0x8074 +#define GL_VERTEX_ARRAY_BINDING 0x85B5 +#define GL_VERTEX_ATTRIB_ARRAY_BARRIER_BIT 0x00000001 +#define GL_VERTEX_ATTRIB_ARRAY_BUFFER_BINDING 0x889F +#define GL_VERTEX_ATTRIB_ARRAY_DIVISOR 0x88FE +#define GL_VERTEX_ATTRIB_ARRAY_ENABLED 0x8622 +#define GL_VERTEX_ATTRIB_ARRAY_INTEGER 0x88FD +#define GL_VERTEX_ATTRIB_ARRAY_LONG 0x874E +#define GL_VERTEX_ATTRIB_ARRAY_NORMALIZED 0x886A +#define GL_VERTEX_ATTRIB_ARRAY_POINTER 0x8645 +#define GL_VERTEX_ATTRIB_ARRAY_SIZE 0x8623 +#define GL_VERTEX_ATTRIB_ARRAY_STRIDE 0x8624 +#define GL_VERTEX_ATTRIB_ARRAY_TYPE 0x8625 +#define GL_VERTEX_ATTRIB_BINDING 0x82D4 +#define GL_VERTEX_ATTRIB_RELATIVE_OFFSET 0x82D5 +#define GL_VERTEX_BINDING_BUFFER 0x8F4F +#define GL_VERTEX_BINDING_DIVISOR 0x82D6 +#define GL_VERTEX_BINDING_OFFSET 0x82D7 +#define GL_VERTEX_BINDING_STRIDE 0x82D8 +#define GL_VERTEX_PROGRAM_POINT_SIZE 0x8642 +#define GL_VERTEX_SHADER 0x8B31 +#define GL_VERTEX_SHADER_BIT 0x00000001 +#define GL_VERTEX_SUBROUTINE 0x92E8 +#define GL_VERTEX_SUBROUTINE_UNIFORM 0x92EE +#define GL_VERTEX_TEXTURE 0x829B +#define GL_VIEWPORT 0x0BA2 +#define GL_VIEWPORT_BOUNDS_RANGE 0x825D +#define GL_VIEWPORT_INDEX_PROVOKING_VERTEX 0x825F +#define GL_VIEWPORT_SUBPIXEL_BITS 0x825C +#define GL_VIEW_CLASS_128_BITS 0x82C4 +#define GL_VIEW_CLASS_16_BITS 0x82CA +#define GL_VIEW_CLASS_24_BITS 0x82C9 +#define GL_VIEW_CLASS_32_BITS 0x82C8 +#define GL_VIEW_CLASS_48_BITS 0x82C7 +#define GL_VIEW_CLASS_64_BITS 0x82C6 +#define GL_VIEW_CLASS_8_BITS 0x82CB +#define GL_VIEW_CLASS_96_BITS 0x82C5 +#define GL_VIEW_CLASS_BPTC_FLOAT 0x82D3 +#define GL_VIEW_CLASS_BPTC_UNORM 0x82D2 +#define GL_VIEW_CLASS_RGTC1_RED 0x82D0 +#define GL_VIEW_CLASS_RGTC2_RG 0x82D1 +#define GL_VIEW_CLASS_S3TC_DXT1_RGB 0x82CC +#define GL_VIEW_CLASS_S3TC_DXT1_RGBA 0x82CD +#define GL_VIEW_CLASS_S3TC_DXT3_RGBA 0x82CE +#define GL_VIEW_CLASS_S3TC_DXT5_RGBA 0x82CF +#define GL_VIEW_COMPATIBILITY_CLASS 0x82B6 +#define GL_WAIT_FAILED 0x911D +#define GL_WRITE_ONLY 0x88B9 +#define GL_XOR 0x1506 +#define GL_ZERO 0 + + +#include +typedef unsigned int GLenum; +typedef unsigned char GLboolean; +typedef unsigned int GLbitfield; +typedef void GLvoid; +typedef khronos_int8_t GLbyte; +typedef khronos_uint8_t GLubyte; +typedef khronos_int16_t GLshort; +typedef khronos_uint16_t GLushort; +typedef int GLint; +typedef unsigned int GLuint; +typedef khronos_int32_t GLclampx; +typedef int GLsizei; +typedef khronos_float_t GLfloat; +typedef khronos_float_t GLclampf; +typedef double GLdouble; +typedef double GLclampd; +typedef void *GLeglClientBufferEXT; +typedef void *GLeglImageOES; +typedef char GLchar; +typedef char GLcharARB; +#ifdef __APPLE__ +typedef void *GLhandleARB; +#else +typedef unsigned int GLhandleARB; +#endif +typedef khronos_uint16_t GLhalf; +typedef khronos_uint16_t GLhalfARB; +typedef khronos_int32_t GLfixed; +#if defined(__ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__) && (__ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__ > 1060) +typedef khronos_intptr_t GLintptr; +#else +typedef khronos_intptr_t GLintptr; +#endif +#if defined(__ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__) && (__ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__ > 1060) +typedef khronos_intptr_t GLintptrARB; +#else +typedef khronos_intptr_t GLintptrARB; +#endif +#if defined(__ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__) && (__ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__ > 1060) +typedef khronos_ssize_t GLsizeiptr; +#else +typedef khronos_ssize_t GLsizeiptr; +#endif +#if defined(__ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__) && (__ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__ > 1060) +typedef khronos_ssize_t GLsizeiptrARB; +#else +typedef khronos_ssize_t GLsizeiptrARB; +#endif +typedef khronos_int64_t GLint64; +typedef khronos_int64_t GLint64EXT; +typedef khronos_uint64_t GLuint64; +typedef khronos_uint64_t GLuint64EXT; +typedef struct __GLsync *GLsync; +struct _cl_context; +struct _cl_event; +typedef void (GLAD_API_PTR *GLDEBUGPROC)(GLenum source,GLenum type,GLuint id,GLenum severity,GLsizei length,const GLchar *message,const void *userParam); +typedef void (GLAD_API_PTR *GLDEBUGPROCARB)(GLenum source,GLenum type,GLuint id,GLenum severity,GLsizei length,const GLchar *message,const void *userParam); +typedef void (GLAD_API_PTR *GLDEBUGPROCKHR)(GLenum source,GLenum type,GLuint id,GLenum severity,GLsizei length,const GLchar *message,const void *userParam); +typedef void (GLAD_API_PTR *GLDEBUGPROCAMD)(GLuint id,GLenum category,GLenum severity,GLsizei length,const GLchar *message,void *userParam); +typedef unsigned short GLhalfNV; +typedef GLintptr GLvdpauSurfaceNV; +typedef void (GLAD_API_PTR *GLVULKANPROCNV)(void); + + +#define GL_VERSION_1_0 1 +GLAD_API_CALL int GLAD_GL_VERSION_1_0; +#define GL_VERSION_1_1 1 +GLAD_API_CALL int GLAD_GL_VERSION_1_1; +#define GL_VERSION_1_2 1 +GLAD_API_CALL int GLAD_GL_VERSION_1_2; +#define GL_VERSION_1_3 1 +GLAD_API_CALL int GLAD_GL_VERSION_1_3; +#define GL_VERSION_1_4 1 +GLAD_API_CALL int GLAD_GL_VERSION_1_4; +#define GL_VERSION_1_5 1 +GLAD_API_CALL int GLAD_GL_VERSION_1_5; +#define GL_VERSION_2_0 1 +GLAD_API_CALL int GLAD_GL_VERSION_2_0; +#define GL_VERSION_2_1 1 +GLAD_API_CALL int GLAD_GL_VERSION_2_1; +#define GL_VERSION_3_0 1 +GLAD_API_CALL int GLAD_GL_VERSION_3_0; +#define GL_VERSION_3_1 1 +GLAD_API_CALL int GLAD_GL_VERSION_3_1; +#define GL_VERSION_3_2 1 +GLAD_API_CALL int GLAD_GL_VERSION_3_2; +#define GL_VERSION_3_3 1 +GLAD_API_CALL int GLAD_GL_VERSION_3_3; +#define GL_VERSION_4_0 1 +GLAD_API_CALL int GLAD_GL_VERSION_4_0; +#define GL_VERSION_4_1 1 +GLAD_API_CALL int GLAD_GL_VERSION_4_1; +#define GL_VERSION_4_2 1 +GLAD_API_CALL int GLAD_GL_VERSION_4_2; +#define GL_VERSION_4_3 1 +GLAD_API_CALL int GLAD_GL_VERSION_4_3; + + +typedef void (GLAD_API_PTR *PFNGLACTIVESHADERPROGRAMPROC)(GLuint pipeline, GLuint program); +typedef void (GLAD_API_PTR *PFNGLACTIVETEXTUREPROC)(GLenum texture); +typedef void (GLAD_API_PTR *PFNGLATTACHSHADERPROC)(GLuint program, GLuint shader); +typedef void (GLAD_API_PTR *PFNGLBEGINCONDITIONALRENDERPROC)(GLuint id, GLenum mode); +typedef void (GLAD_API_PTR *PFNGLBEGINQUERYPROC)(GLenum target, GLuint id); +typedef void (GLAD_API_PTR *PFNGLBEGINQUERYINDEXEDPROC)(GLenum target, GLuint index, GLuint id); +typedef void (GLAD_API_PTR *PFNGLBEGINTRANSFORMFEEDBACKPROC)(GLenum primitiveMode); +typedef void (GLAD_API_PTR *PFNGLBINDATTRIBLOCATIONPROC)(GLuint program, GLuint index, const GLchar * name); +typedef void (GLAD_API_PTR *PFNGLBINDBUFFERPROC)(GLenum target, GLuint buffer); +typedef void (GLAD_API_PTR *PFNGLBINDBUFFERBASEPROC)(GLenum target, GLuint index, GLuint buffer); +typedef void (GLAD_API_PTR *PFNGLBINDBUFFERRANGEPROC)(GLenum target, GLuint index, GLuint buffer, GLintptr offset, GLsizeiptr size); +typedef void (GLAD_API_PTR *PFNGLBINDFRAGDATALOCATIONPROC)(GLuint program, GLuint color, const GLchar * name); +typedef void (GLAD_API_PTR *PFNGLBINDFRAGDATALOCATIONINDEXEDPROC)(GLuint program, GLuint colorNumber, GLuint index, const GLchar * name); +typedef void (GLAD_API_PTR *PFNGLBINDFRAMEBUFFERPROC)(GLenum target, GLuint framebuffer); +typedef void (GLAD_API_PTR *PFNGLBINDIMAGETEXTUREPROC)(GLuint unit, GLuint texture, GLint level, GLboolean layered, GLint layer, GLenum access, GLenum format); +typedef void (GLAD_API_PTR *PFNGLBINDPROGRAMPIPELINEPROC)(GLuint pipeline); +typedef void (GLAD_API_PTR *PFNGLBINDRENDERBUFFERPROC)(GLenum target, GLuint renderbuffer); +typedef void (GLAD_API_PTR *PFNGLBINDSAMPLERPROC)(GLuint unit, GLuint sampler); +typedef void (GLAD_API_PTR *PFNGLBINDTEXTUREPROC)(GLenum target, GLuint texture); +typedef void (GLAD_API_PTR *PFNGLBINDTRANSFORMFEEDBACKPROC)(GLenum target, GLuint id); +typedef void (GLAD_API_PTR *PFNGLBINDVERTEXARRAYPROC)(GLuint array); +typedef void (GLAD_API_PTR *PFNGLBINDVERTEXBUFFERPROC)(GLuint bindingindex, GLuint buffer, GLintptr offset, GLsizei stride); +typedef void (GLAD_API_PTR *PFNGLBLENDCOLORPROC)(GLfloat red, GLfloat green, GLfloat blue, GLfloat alpha); +typedef void (GLAD_API_PTR *PFNGLBLENDEQUATIONPROC)(GLenum mode); +typedef void (GLAD_API_PTR *PFNGLBLENDEQUATIONSEPARATEPROC)(GLenum modeRGB, GLenum modeAlpha); +typedef void (GLAD_API_PTR *PFNGLBLENDEQUATIONSEPARATEIPROC)(GLuint buf, GLenum modeRGB, GLenum modeAlpha); +typedef void (GLAD_API_PTR *PFNGLBLENDEQUATIONIPROC)(GLuint buf, GLenum mode); +typedef void (GLAD_API_PTR *PFNGLBLENDFUNCPROC)(GLenum sfactor, GLenum dfactor); +typedef void (GLAD_API_PTR *PFNGLBLENDFUNCSEPARATEPROC)(GLenum sfactorRGB, GLenum dfactorRGB, GLenum sfactorAlpha, GLenum dfactorAlpha); +typedef void (GLAD_API_PTR *PFNGLBLENDFUNCSEPARATEIPROC)(GLuint buf, GLenum srcRGB, GLenum dstRGB, GLenum srcAlpha, GLenum dstAlpha); +typedef void (GLAD_API_PTR *PFNGLBLENDFUNCIPROC)(GLuint buf, GLenum src, GLenum dst); +typedef void (GLAD_API_PTR *PFNGLBLITFRAMEBUFFERPROC)(GLint srcX0, GLint srcY0, GLint srcX1, GLint srcY1, GLint dstX0, GLint dstY0, GLint dstX1, GLint dstY1, GLbitfield mask, GLenum filter); +typedef void (GLAD_API_PTR *PFNGLBUFFERDATAPROC)(GLenum target, GLsizeiptr size, const void * data, GLenum usage); +typedef void (GLAD_API_PTR *PFNGLBUFFERSUBDATAPROC)(GLenum target, GLintptr offset, GLsizeiptr size, const void * data); +typedef GLenum (GLAD_API_PTR *PFNGLCHECKFRAMEBUFFERSTATUSPROC)(GLenum target); +typedef void (GLAD_API_PTR *PFNGLCLAMPCOLORPROC)(GLenum target, GLenum clamp); +typedef void (GLAD_API_PTR *PFNGLCLEARPROC)(GLbitfield mask); +typedef void (GLAD_API_PTR *PFNGLCLEARBUFFERDATAPROC)(GLenum target, GLenum internalformat, GLenum format, GLenum type, const void * data); +typedef void (GLAD_API_PTR *PFNGLCLEARBUFFERSUBDATAPROC)(GLenum target, GLenum internalformat, GLintptr offset, GLsizeiptr size, GLenum format, GLenum type, const void * data); +typedef void (GLAD_API_PTR *PFNGLCLEARBUFFERFIPROC)(GLenum buffer, GLint drawbuffer, GLfloat depth, GLint stencil); +typedef void (GLAD_API_PTR *PFNGLCLEARBUFFERFVPROC)(GLenum buffer, GLint drawbuffer, const GLfloat * value); +typedef void (GLAD_API_PTR *PFNGLCLEARBUFFERIVPROC)(GLenum buffer, GLint drawbuffer, const GLint * value); +typedef void (GLAD_API_PTR *PFNGLCLEARBUFFERUIVPROC)(GLenum buffer, GLint drawbuffer, const GLuint * value); +typedef void (GLAD_API_PTR *PFNGLCLEARCOLORPROC)(GLfloat red, GLfloat green, GLfloat blue, GLfloat alpha); +typedef void (GLAD_API_PTR *PFNGLCLEARDEPTHPROC)(GLdouble depth); +typedef void (GLAD_API_PTR *PFNGLCLEARDEPTHFPROC)(GLfloat d); +typedef void (GLAD_API_PTR *PFNGLCLEARSTENCILPROC)(GLint s); +typedef GLenum (GLAD_API_PTR *PFNGLCLIENTWAITSYNCPROC)(GLsync sync, GLbitfield flags, GLuint64 timeout); +typedef void (GLAD_API_PTR *PFNGLCOLORMASKPROC)(GLboolean red, GLboolean green, GLboolean blue, GLboolean alpha); +typedef void (GLAD_API_PTR *PFNGLCOLORMASKIPROC)(GLuint index, GLboolean r, GLboolean g, GLboolean b, GLboolean a); +typedef void (GLAD_API_PTR *PFNGLCOMPILESHADERPROC)(GLuint shader); +typedef void (GLAD_API_PTR *PFNGLCOMPRESSEDTEXIMAGE1DPROC)(GLenum target, GLint level, GLenum internalformat, GLsizei width, GLint border, GLsizei imageSize, const void * data); +typedef void (GLAD_API_PTR *PFNGLCOMPRESSEDTEXIMAGE2DPROC)(GLenum target, GLint level, GLenum internalformat, GLsizei width, GLsizei height, GLint border, GLsizei imageSize, const void * data); +typedef void (GLAD_API_PTR *PFNGLCOMPRESSEDTEXIMAGE3DPROC)(GLenum target, GLint level, GLenum internalformat, GLsizei width, GLsizei height, GLsizei depth, GLint border, GLsizei imageSize, const void * data); +typedef void (GLAD_API_PTR *PFNGLCOMPRESSEDTEXSUBIMAGE1DPROC)(GLenum target, GLint level, GLint xoffset, GLsizei width, GLenum format, GLsizei imageSize, const void * data); +typedef void (GLAD_API_PTR *PFNGLCOMPRESSEDTEXSUBIMAGE2DPROC)(GLenum target, GLint level, GLint xoffset, GLint yoffset, GLsizei width, GLsizei height, GLenum format, GLsizei imageSize, const void * data); +typedef void (GLAD_API_PTR *PFNGLCOMPRESSEDTEXSUBIMAGE3DPROC)(GLenum target, GLint level, GLint xoffset, GLint yoffset, GLint zoffset, GLsizei width, GLsizei height, GLsizei depth, GLenum format, GLsizei imageSize, const void * data); +typedef void (GLAD_API_PTR *PFNGLCOPYBUFFERSUBDATAPROC)(GLenum readTarget, GLenum writeTarget, GLintptr readOffset, GLintptr writeOffset, GLsizeiptr size); +typedef void (GLAD_API_PTR *PFNGLCOPYIMAGESUBDATAPROC)(GLuint srcName, GLenum srcTarget, GLint srcLevel, GLint srcX, GLint srcY, GLint srcZ, GLuint dstName, GLenum dstTarget, GLint dstLevel, GLint dstX, GLint dstY, GLint dstZ, GLsizei srcWidth, GLsizei srcHeight, GLsizei srcDepth); +typedef void (GLAD_API_PTR *PFNGLCOPYTEXIMAGE1DPROC)(GLenum target, GLint level, GLenum internalformat, GLint x, GLint y, GLsizei width, GLint border); +typedef void (GLAD_API_PTR *PFNGLCOPYTEXIMAGE2DPROC)(GLenum target, GLint level, GLenum internalformat, GLint x, GLint y, GLsizei width, GLsizei height, GLint border); +typedef void (GLAD_API_PTR *PFNGLCOPYTEXSUBIMAGE1DPROC)(GLenum target, GLint level, GLint xoffset, GLint x, GLint y, GLsizei width); +typedef void (GLAD_API_PTR *PFNGLCOPYTEXSUBIMAGE2DPROC)(GLenum target, GLint level, GLint xoffset, GLint yoffset, GLint x, GLint y, GLsizei width, GLsizei height); +typedef void (GLAD_API_PTR *PFNGLCOPYTEXSUBIMAGE3DPROC)(GLenum target, GLint level, GLint xoffset, GLint yoffset, GLint zoffset, GLint x, GLint y, GLsizei width, GLsizei height); +typedef GLuint (GLAD_API_PTR *PFNGLCREATEPROGRAMPROC)(void); +typedef GLuint (GLAD_API_PTR *PFNGLCREATESHADERPROC)(GLenum type); +typedef GLuint (GLAD_API_PTR *PFNGLCREATESHADERPROGRAMVPROC)(GLenum type, GLsizei count, const GLchar *const* strings); +typedef void (GLAD_API_PTR *PFNGLCULLFACEPROC)(GLenum mode); +typedef void (GLAD_API_PTR *PFNGLDEBUGMESSAGECALLBACKPROC)(GLDEBUGPROC callback, const void * userParam); +typedef void (GLAD_API_PTR *PFNGLDEBUGMESSAGECONTROLPROC)(GLenum source, GLenum type, GLenum severity, GLsizei count, const GLuint * ids, GLboolean enabled); +typedef void (GLAD_API_PTR *PFNGLDEBUGMESSAGEINSERTPROC)(GLenum source, GLenum type, GLuint id, GLenum severity, GLsizei length, const GLchar * buf); +typedef void (GLAD_API_PTR *PFNGLDELETEBUFFERSPROC)(GLsizei n, const GLuint * buffers); +typedef void (GLAD_API_PTR *PFNGLDELETEFRAMEBUFFERSPROC)(GLsizei n, const GLuint * framebuffers); +typedef void (GLAD_API_PTR *PFNGLDELETEPROGRAMPROC)(GLuint program); +typedef void (GLAD_API_PTR *PFNGLDELETEPROGRAMPIPELINESPROC)(GLsizei n, const GLuint * pipelines); +typedef void (GLAD_API_PTR *PFNGLDELETEQUERIESPROC)(GLsizei n, const GLuint * ids); +typedef void (GLAD_API_PTR *PFNGLDELETERENDERBUFFERSPROC)(GLsizei n, const GLuint * renderbuffers); +typedef void (GLAD_API_PTR *PFNGLDELETESAMPLERSPROC)(GLsizei count, const GLuint * samplers); +typedef void (GLAD_API_PTR *PFNGLDELETESHADERPROC)(GLuint shader); +typedef void (GLAD_API_PTR *PFNGLDELETESYNCPROC)(GLsync sync); +typedef void (GLAD_API_PTR *PFNGLDELETETEXTURESPROC)(GLsizei n, const GLuint * textures); +typedef void (GLAD_API_PTR *PFNGLDELETETRANSFORMFEEDBACKSPROC)(GLsizei n, const GLuint * ids); +typedef void (GLAD_API_PTR *PFNGLDELETEVERTEXARRAYSPROC)(GLsizei n, const GLuint * arrays); +typedef void (GLAD_API_PTR *PFNGLDEPTHFUNCPROC)(GLenum func); +typedef void (GLAD_API_PTR *PFNGLDEPTHMASKPROC)(GLboolean flag); +typedef void (GLAD_API_PTR *PFNGLDEPTHRANGEPROC)(GLdouble n, GLdouble f); +typedef void (GLAD_API_PTR *PFNGLDEPTHRANGEARRAYVPROC)(GLuint first, GLsizei count, const GLdouble * v); +typedef void (GLAD_API_PTR *PFNGLDEPTHRANGEINDEXEDPROC)(GLuint index, GLdouble n, GLdouble f); +typedef void (GLAD_API_PTR *PFNGLDEPTHRANGEFPROC)(GLfloat n, GLfloat f); +typedef void (GLAD_API_PTR *PFNGLDETACHSHADERPROC)(GLuint program, GLuint shader); +typedef void (GLAD_API_PTR *PFNGLDISABLEPROC)(GLenum cap); +typedef void (GLAD_API_PTR *PFNGLDISABLEVERTEXATTRIBARRAYPROC)(GLuint index); +typedef void (GLAD_API_PTR *PFNGLDISABLEIPROC)(GLenum target, GLuint index); +typedef void (GLAD_API_PTR *PFNGLDISPATCHCOMPUTEPROC)(GLuint num_groups_x, GLuint num_groups_y, GLuint num_groups_z); +typedef void (GLAD_API_PTR *PFNGLDISPATCHCOMPUTEINDIRECTPROC)(GLintptr indirect); +typedef void (GLAD_API_PTR *PFNGLDRAWARRAYSPROC)(GLenum mode, GLint first, GLsizei count); +typedef void (GLAD_API_PTR *PFNGLDRAWARRAYSINDIRECTPROC)(GLenum mode, const void * indirect); +typedef void (GLAD_API_PTR *PFNGLDRAWARRAYSINSTANCEDPROC)(GLenum mode, GLint first, GLsizei count, GLsizei instancecount); +typedef void (GLAD_API_PTR *PFNGLDRAWARRAYSINSTANCEDBASEINSTANCEPROC)(GLenum mode, GLint first, GLsizei count, GLsizei instancecount, GLuint baseinstance); +typedef void (GLAD_API_PTR *PFNGLDRAWBUFFERPROC)(GLenum buf); +typedef void (GLAD_API_PTR *PFNGLDRAWBUFFERSPROC)(GLsizei n, const GLenum * bufs); +typedef void (GLAD_API_PTR *PFNGLDRAWELEMENTSPROC)(GLenum mode, GLsizei count, GLenum type, const void * indices); +typedef void (GLAD_API_PTR *PFNGLDRAWELEMENTSBASEVERTEXPROC)(GLenum mode, GLsizei count, GLenum type, const void * indices, GLint basevertex); +typedef void (GLAD_API_PTR *PFNGLDRAWELEMENTSINDIRECTPROC)(GLenum mode, GLenum type, const void * indirect); +typedef void (GLAD_API_PTR *PFNGLDRAWELEMENTSINSTANCEDPROC)(GLenum mode, GLsizei count, GLenum type, const void * indices, GLsizei instancecount); +typedef void (GLAD_API_PTR *PFNGLDRAWELEMENTSINSTANCEDBASEINSTANCEPROC)(GLenum mode, GLsizei count, GLenum type, const void * indices, GLsizei instancecount, GLuint baseinstance); +typedef void (GLAD_API_PTR *PFNGLDRAWELEMENTSINSTANCEDBASEVERTEXPROC)(GLenum mode, GLsizei count, GLenum type, const void * indices, GLsizei instancecount, GLint basevertex); +typedef void (GLAD_API_PTR *PFNGLDRAWELEMENTSINSTANCEDBASEVERTEXBASEINSTANCEPROC)(GLenum mode, GLsizei count, GLenum type, const void * indices, GLsizei instancecount, GLint basevertex, GLuint baseinstance); +typedef void (GLAD_API_PTR *PFNGLDRAWRANGEELEMENTSPROC)(GLenum mode, GLuint start, GLuint end, GLsizei count, GLenum type, const void * indices); +typedef void (GLAD_API_PTR *PFNGLDRAWRANGEELEMENTSBASEVERTEXPROC)(GLenum mode, GLuint start, GLuint end, GLsizei count, GLenum type, const void * indices, GLint basevertex); +typedef void (GLAD_API_PTR *PFNGLDRAWTRANSFORMFEEDBACKPROC)(GLenum mode, GLuint id); +typedef void (GLAD_API_PTR *PFNGLDRAWTRANSFORMFEEDBACKINSTANCEDPROC)(GLenum mode, GLuint id, GLsizei instancecount); +typedef void (GLAD_API_PTR *PFNGLDRAWTRANSFORMFEEDBACKSTREAMPROC)(GLenum mode, GLuint id, GLuint stream); +typedef void (GLAD_API_PTR *PFNGLDRAWTRANSFORMFEEDBACKSTREAMINSTANCEDPROC)(GLenum mode, GLuint id, GLuint stream, GLsizei instancecount); +typedef void (GLAD_API_PTR *PFNGLENABLEPROC)(GLenum cap); +typedef void (GLAD_API_PTR *PFNGLENABLEVERTEXATTRIBARRAYPROC)(GLuint index); +typedef void (GLAD_API_PTR *PFNGLENABLEIPROC)(GLenum target, GLuint index); +typedef void (GLAD_API_PTR *PFNGLENDCONDITIONALRENDERPROC)(void); +typedef void (GLAD_API_PTR *PFNGLENDQUERYPROC)(GLenum target); +typedef void (GLAD_API_PTR *PFNGLENDQUERYINDEXEDPROC)(GLenum target, GLuint index); +typedef void (GLAD_API_PTR *PFNGLENDTRANSFORMFEEDBACKPROC)(void); +typedef GLsync (GLAD_API_PTR *PFNGLFENCESYNCPROC)(GLenum condition, GLbitfield flags); +typedef void (GLAD_API_PTR *PFNGLFINISHPROC)(void); +typedef void (GLAD_API_PTR *PFNGLFLUSHPROC)(void); +typedef void (GLAD_API_PTR *PFNGLFLUSHMAPPEDBUFFERRANGEPROC)(GLenum target, GLintptr offset, GLsizeiptr length); +typedef void (GLAD_API_PTR *PFNGLFRAMEBUFFERPARAMETERIPROC)(GLenum target, GLenum pname, GLint param); +typedef void (GLAD_API_PTR *PFNGLFRAMEBUFFERRENDERBUFFERPROC)(GLenum target, GLenum attachment, GLenum renderbuffertarget, GLuint renderbuffer); +typedef void (GLAD_API_PTR *PFNGLFRAMEBUFFERTEXTUREPROC)(GLenum target, GLenum attachment, GLuint texture, GLint level); +typedef void (GLAD_API_PTR *PFNGLFRAMEBUFFERTEXTURE1DPROC)(GLenum target, GLenum attachment, GLenum textarget, GLuint texture, GLint level); +typedef void (GLAD_API_PTR *PFNGLFRAMEBUFFERTEXTURE2DPROC)(GLenum target, GLenum attachment, GLenum textarget, GLuint texture, GLint level); +typedef void (GLAD_API_PTR *PFNGLFRAMEBUFFERTEXTURE3DPROC)(GLenum target, GLenum attachment, GLenum textarget, GLuint texture, GLint level, GLint zoffset); +typedef void (GLAD_API_PTR *PFNGLFRAMEBUFFERTEXTURELAYERPROC)(GLenum target, GLenum attachment, GLuint texture, GLint level, GLint layer); +typedef void (GLAD_API_PTR *PFNGLFRONTFACEPROC)(GLenum mode); +typedef void (GLAD_API_PTR *PFNGLGENBUFFERSPROC)(GLsizei n, GLuint * buffers); +typedef void (GLAD_API_PTR *PFNGLGENFRAMEBUFFERSPROC)(GLsizei n, GLuint * framebuffers); +typedef void (GLAD_API_PTR *PFNGLGENPROGRAMPIPELINESPROC)(GLsizei n, GLuint * pipelines); +typedef void (GLAD_API_PTR *PFNGLGENQUERIESPROC)(GLsizei n, GLuint * ids); +typedef void (GLAD_API_PTR *PFNGLGENRENDERBUFFERSPROC)(GLsizei n, GLuint * renderbuffers); +typedef void (GLAD_API_PTR *PFNGLGENSAMPLERSPROC)(GLsizei count, GLuint * samplers); +typedef void (GLAD_API_PTR *PFNGLGENTEXTURESPROC)(GLsizei n, GLuint * textures); +typedef void (GLAD_API_PTR *PFNGLGENTRANSFORMFEEDBACKSPROC)(GLsizei n, GLuint * ids); +typedef void (GLAD_API_PTR *PFNGLGENVERTEXARRAYSPROC)(GLsizei n, GLuint * arrays); +typedef void (GLAD_API_PTR *PFNGLGENERATEMIPMAPPROC)(GLenum target); +typedef void (GLAD_API_PTR *PFNGLGETACTIVEATOMICCOUNTERBUFFERIVPROC)(GLuint program, GLuint bufferIndex, GLenum pname, GLint * params); +typedef void (GLAD_API_PTR *PFNGLGETACTIVEATTRIBPROC)(GLuint program, GLuint index, GLsizei bufSize, GLsizei * length, GLint * size, GLenum * type, GLchar * name); +typedef void (GLAD_API_PTR *PFNGLGETACTIVESUBROUTINENAMEPROC)(GLuint program, GLenum shadertype, GLuint index, GLsizei bufSize, GLsizei * length, GLchar * name); +typedef void (GLAD_API_PTR *PFNGLGETACTIVESUBROUTINEUNIFORMNAMEPROC)(GLuint program, GLenum shadertype, GLuint index, GLsizei bufSize, GLsizei * length, GLchar * name); +typedef void (GLAD_API_PTR *PFNGLGETACTIVESUBROUTINEUNIFORMIVPROC)(GLuint program, GLenum shadertype, GLuint index, GLenum pname, GLint * values); +typedef void (GLAD_API_PTR *PFNGLGETACTIVEUNIFORMPROC)(GLuint program, GLuint index, GLsizei bufSize, GLsizei * length, GLint * size, GLenum * type, GLchar * name); +typedef void (GLAD_API_PTR *PFNGLGETACTIVEUNIFORMBLOCKNAMEPROC)(GLuint program, GLuint uniformBlockIndex, GLsizei bufSize, GLsizei * length, GLchar * uniformBlockName); +typedef void (GLAD_API_PTR *PFNGLGETACTIVEUNIFORMBLOCKIVPROC)(GLuint program, GLuint uniformBlockIndex, GLenum pname, GLint * params); +typedef void (GLAD_API_PTR *PFNGLGETACTIVEUNIFORMNAMEPROC)(GLuint program, GLuint uniformIndex, GLsizei bufSize, GLsizei * length, GLchar * uniformName); +typedef void (GLAD_API_PTR *PFNGLGETACTIVEUNIFORMSIVPROC)(GLuint program, GLsizei uniformCount, const GLuint * uniformIndices, GLenum pname, GLint * params); +typedef void (GLAD_API_PTR *PFNGLGETATTACHEDSHADERSPROC)(GLuint program, GLsizei maxCount, GLsizei * count, GLuint * shaders); +typedef GLint (GLAD_API_PTR *PFNGLGETATTRIBLOCATIONPROC)(GLuint program, const GLchar * name); +typedef void (GLAD_API_PTR *PFNGLGETBOOLEANI_VPROC)(GLenum target, GLuint index, GLboolean * data); +typedef void (GLAD_API_PTR *PFNGLGETBOOLEANVPROC)(GLenum pname, GLboolean * data); +typedef void (GLAD_API_PTR *PFNGLGETBUFFERPARAMETERI64VPROC)(GLenum target, GLenum pname, GLint64 * params); +typedef void (GLAD_API_PTR *PFNGLGETBUFFERPARAMETERIVPROC)(GLenum target, GLenum pname, GLint * params); +typedef void (GLAD_API_PTR *PFNGLGETBUFFERPOINTERVPROC)(GLenum target, GLenum pname, void ** params); +typedef void (GLAD_API_PTR *PFNGLGETBUFFERSUBDATAPROC)(GLenum target, GLintptr offset, GLsizeiptr size, void * data); +typedef void (GLAD_API_PTR *PFNGLGETCOMPRESSEDTEXIMAGEPROC)(GLenum target, GLint level, void * img); +typedef GLuint (GLAD_API_PTR *PFNGLGETDEBUGMESSAGELOGPROC)(GLuint count, GLsizei bufSize, GLenum * sources, GLenum * types, GLuint * ids, GLenum * severities, GLsizei * lengths, GLchar * messageLog); +typedef void (GLAD_API_PTR *PFNGLGETDOUBLEI_VPROC)(GLenum target, GLuint index, GLdouble * data); +typedef void (GLAD_API_PTR *PFNGLGETDOUBLEVPROC)(GLenum pname, GLdouble * data); +typedef GLenum (GLAD_API_PTR *PFNGLGETERRORPROC)(void); +typedef void (GLAD_API_PTR *PFNGLGETFLOATI_VPROC)(GLenum target, GLuint index, GLfloat * data); +typedef void (GLAD_API_PTR *PFNGLGETFLOATVPROC)(GLenum pname, GLfloat * data); +typedef GLint (GLAD_API_PTR *PFNGLGETFRAGDATAINDEXPROC)(GLuint program, const GLchar * name); +typedef GLint (GLAD_API_PTR *PFNGLGETFRAGDATALOCATIONPROC)(GLuint program, const GLchar * name); +typedef void (GLAD_API_PTR *PFNGLGETFRAMEBUFFERATTACHMENTPARAMETERIVPROC)(GLenum target, GLenum attachment, GLenum pname, GLint * params); +typedef void (GLAD_API_PTR *PFNGLGETFRAMEBUFFERPARAMETERIVPROC)(GLenum target, GLenum pname, GLint * params); +typedef void (GLAD_API_PTR *PFNGLGETINTEGER64I_VPROC)(GLenum target, GLuint index, GLint64 * data); +typedef void (GLAD_API_PTR *PFNGLGETINTEGER64VPROC)(GLenum pname, GLint64 * data); +typedef void (GLAD_API_PTR *PFNGLGETINTEGERI_VPROC)(GLenum target, GLuint index, GLint * data); +typedef void (GLAD_API_PTR *PFNGLGETINTEGERVPROC)(GLenum pname, GLint * data); +typedef void (GLAD_API_PTR *PFNGLGETINTERNALFORMATI64VPROC)(GLenum target, GLenum internalformat, GLenum pname, GLsizei count, GLint64 * params); +typedef void (GLAD_API_PTR *PFNGLGETINTERNALFORMATIVPROC)(GLenum target, GLenum internalformat, GLenum pname, GLsizei count, GLint * params); +typedef void (GLAD_API_PTR *PFNGLGETMULTISAMPLEFVPROC)(GLenum pname, GLuint index, GLfloat * val); +typedef void (GLAD_API_PTR *PFNGLGETOBJECTLABELPROC)(GLenum identifier, GLuint name, GLsizei bufSize, GLsizei * length, GLchar * label); +typedef void (GLAD_API_PTR *PFNGLGETOBJECTPTRLABELPROC)(const void * ptr, GLsizei bufSize, GLsizei * length, GLchar * label); +typedef void (GLAD_API_PTR *PFNGLGETPOINTERVPROC)(GLenum pname, void ** params); +typedef void (GLAD_API_PTR *PFNGLGETPROGRAMBINARYPROC)(GLuint program, GLsizei bufSize, GLsizei * length, GLenum * binaryFormat, void * binary); +typedef void (GLAD_API_PTR *PFNGLGETPROGRAMINFOLOGPROC)(GLuint program, GLsizei bufSize, GLsizei * length, GLchar * infoLog); +typedef void (GLAD_API_PTR *PFNGLGETPROGRAMINTERFACEIVPROC)(GLuint program, GLenum programInterface, GLenum pname, GLint * params); +typedef void (GLAD_API_PTR *PFNGLGETPROGRAMPIPELINEINFOLOGPROC)(GLuint pipeline, GLsizei bufSize, GLsizei * length, GLchar * infoLog); +typedef void (GLAD_API_PTR *PFNGLGETPROGRAMPIPELINEIVPROC)(GLuint pipeline, GLenum pname, GLint * params); +typedef GLuint (GLAD_API_PTR *PFNGLGETPROGRAMRESOURCEINDEXPROC)(GLuint program, GLenum programInterface, const GLchar * name); +typedef GLint (GLAD_API_PTR *PFNGLGETPROGRAMRESOURCELOCATIONPROC)(GLuint program, GLenum programInterface, const GLchar * name); +typedef GLint (GLAD_API_PTR *PFNGLGETPROGRAMRESOURCELOCATIONINDEXPROC)(GLuint program, GLenum programInterface, const GLchar * name); +typedef void (GLAD_API_PTR *PFNGLGETPROGRAMRESOURCENAMEPROC)(GLuint program, GLenum programInterface, GLuint index, GLsizei bufSize, GLsizei * length, GLchar * name); +typedef void (GLAD_API_PTR *PFNGLGETPROGRAMRESOURCEIVPROC)(GLuint program, GLenum programInterface, GLuint index, GLsizei propCount, const GLenum * props, GLsizei count, GLsizei * length, GLint * params); +typedef void (GLAD_API_PTR *PFNGLGETPROGRAMSTAGEIVPROC)(GLuint program, GLenum shadertype, GLenum pname, GLint * values); +typedef void (GLAD_API_PTR *PFNGLGETPROGRAMIVPROC)(GLuint program, GLenum pname, GLint * params); +typedef void (GLAD_API_PTR *PFNGLGETQUERYINDEXEDIVPROC)(GLenum target, GLuint index, GLenum pname, GLint * params); +typedef void (GLAD_API_PTR *PFNGLGETQUERYOBJECTI64VPROC)(GLuint id, GLenum pname, GLint64 * params); +typedef void (GLAD_API_PTR *PFNGLGETQUERYOBJECTIVPROC)(GLuint id, GLenum pname, GLint * params); +typedef void (GLAD_API_PTR *PFNGLGETQUERYOBJECTUI64VPROC)(GLuint id, GLenum pname, GLuint64 * params); +typedef void (GLAD_API_PTR *PFNGLGETQUERYOBJECTUIVPROC)(GLuint id, GLenum pname, GLuint * params); +typedef void (GLAD_API_PTR *PFNGLGETQUERYIVPROC)(GLenum target, GLenum pname, GLint * params); +typedef void (GLAD_API_PTR *PFNGLGETRENDERBUFFERPARAMETERIVPROC)(GLenum target, GLenum pname, GLint * params); +typedef void (GLAD_API_PTR *PFNGLGETSAMPLERPARAMETERIIVPROC)(GLuint sampler, GLenum pname, GLint * params); +typedef void (GLAD_API_PTR *PFNGLGETSAMPLERPARAMETERIUIVPROC)(GLuint sampler, GLenum pname, GLuint * params); +typedef void (GLAD_API_PTR *PFNGLGETSAMPLERPARAMETERFVPROC)(GLuint sampler, GLenum pname, GLfloat * params); +typedef void (GLAD_API_PTR *PFNGLGETSAMPLERPARAMETERIVPROC)(GLuint sampler, GLenum pname, GLint * params); +typedef void (GLAD_API_PTR *PFNGLGETSHADERINFOLOGPROC)(GLuint shader, GLsizei bufSize, GLsizei * length, GLchar * infoLog); +typedef void (GLAD_API_PTR *PFNGLGETSHADERPRECISIONFORMATPROC)(GLenum shadertype, GLenum precisiontype, GLint * range, GLint * precision); +typedef void (GLAD_API_PTR *PFNGLGETSHADERSOURCEPROC)(GLuint shader, GLsizei bufSize, GLsizei * length, GLchar * source); +typedef void (GLAD_API_PTR *PFNGLGETSHADERIVPROC)(GLuint shader, GLenum pname, GLint * params); +typedef const GLubyte * (GLAD_API_PTR *PFNGLGETSTRINGPROC)(GLenum name); +typedef const GLubyte * (GLAD_API_PTR *PFNGLGETSTRINGIPROC)(GLenum name, GLuint index); +typedef GLuint (GLAD_API_PTR *PFNGLGETSUBROUTINEINDEXPROC)(GLuint program, GLenum shadertype, const GLchar * name); +typedef GLint (GLAD_API_PTR *PFNGLGETSUBROUTINEUNIFORMLOCATIONPROC)(GLuint program, GLenum shadertype, const GLchar * name); +typedef void (GLAD_API_PTR *PFNGLGETSYNCIVPROC)(GLsync sync, GLenum pname, GLsizei count, GLsizei * length, GLint * values); +typedef void (GLAD_API_PTR *PFNGLGETTEXIMAGEPROC)(GLenum target, GLint level, GLenum format, GLenum type, void * pixels); +typedef void (GLAD_API_PTR *PFNGLGETTEXLEVELPARAMETERFVPROC)(GLenum target, GLint level, GLenum pname, GLfloat * params); +typedef void (GLAD_API_PTR *PFNGLGETTEXLEVELPARAMETERIVPROC)(GLenum target, GLint level, GLenum pname, GLint * params); +typedef void (GLAD_API_PTR *PFNGLGETTEXPARAMETERIIVPROC)(GLenum target, GLenum pname, GLint * params); +typedef void (GLAD_API_PTR *PFNGLGETTEXPARAMETERIUIVPROC)(GLenum target, GLenum pname, GLuint * params); +typedef void (GLAD_API_PTR *PFNGLGETTEXPARAMETERFVPROC)(GLenum target, GLenum pname, GLfloat * params); +typedef void (GLAD_API_PTR *PFNGLGETTEXPARAMETERIVPROC)(GLenum target, GLenum pname, GLint * params); +typedef void (GLAD_API_PTR *PFNGLGETTRANSFORMFEEDBACKVARYINGPROC)(GLuint program, GLuint index, GLsizei bufSize, GLsizei * length, GLsizei * size, GLenum * type, GLchar * name); +typedef GLuint (GLAD_API_PTR *PFNGLGETUNIFORMBLOCKINDEXPROC)(GLuint program, const GLchar * uniformBlockName); +typedef void (GLAD_API_PTR *PFNGLGETUNIFORMINDICESPROC)(GLuint program, GLsizei uniformCount, const GLchar *const* uniformNames, GLuint * uniformIndices); +typedef GLint (GLAD_API_PTR *PFNGLGETUNIFORMLOCATIONPROC)(GLuint program, const GLchar * name); +typedef void (GLAD_API_PTR *PFNGLGETUNIFORMSUBROUTINEUIVPROC)(GLenum shadertype, GLint location, GLuint * params); +typedef void (GLAD_API_PTR *PFNGLGETUNIFORMDVPROC)(GLuint program, GLint location, GLdouble * params); +typedef void (GLAD_API_PTR *PFNGLGETUNIFORMFVPROC)(GLuint program, GLint location, GLfloat * params); +typedef void (GLAD_API_PTR *PFNGLGETUNIFORMIVPROC)(GLuint program, GLint location, GLint * params); +typedef void (GLAD_API_PTR *PFNGLGETUNIFORMUIVPROC)(GLuint program, GLint location, GLuint * params); +typedef void (GLAD_API_PTR *PFNGLGETVERTEXATTRIBIIVPROC)(GLuint index, GLenum pname, GLint * params); +typedef void (GLAD_API_PTR *PFNGLGETVERTEXATTRIBIUIVPROC)(GLuint index, GLenum pname, GLuint * params); +typedef void (GLAD_API_PTR *PFNGLGETVERTEXATTRIBLDVPROC)(GLuint index, GLenum pname, GLdouble * params); +typedef void (GLAD_API_PTR *PFNGLGETVERTEXATTRIBPOINTERVPROC)(GLuint index, GLenum pname, void ** pointer); +typedef void (GLAD_API_PTR *PFNGLGETVERTEXATTRIBDVPROC)(GLuint index, GLenum pname, GLdouble * params); +typedef void (GLAD_API_PTR *PFNGLGETVERTEXATTRIBFVPROC)(GLuint index, GLenum pname, GLfloat * params); +typedef void (GLAD_API_PTR *PFNGLGETVERTEXATTRIBIVPROC)(GLuint index, GLenum pname, GLint * params); +typedef void (GLAD_API_PTR *PFNGLHINTPROC)(GLenum target, GLenum mode); +typedef void (GLAD_API_PTR *PFNGLINVALIDATEBUFFERDATAPROC)(GLuint buffer); +typedef void (GLAD_API_PTR *PFNGLINVALIDATEBUFFERSUBDATAPROC)(GLuint buffer, GLintptr offset, GLsizeiptr length); +typedef void (GLAD_API_PTR *PFNGLINVALIDATEFRAMEBUFFERPROC)(GLenum target, GLsizei numAttachments, const GLenum * attachments); +typedef void (GLAD_API_PTR *PFNGLINVALIDATESUBFRAMEBUFFERPROC)(GLenum target, GLsizei numAttachments, const GLenum * attachments, GLint x, GLint y, GLsizei width, GLsizei height); +typedef void (GLAD_API_PTR *PFNGLINVALIDATETEXIMAGEPROC)(GLuint texture, GLint level); +typedef void (GLAD_API_PTR *PFNGLINVALIDATETEXSUBIMAGEPROC)(GLuint texture, GLint level, GLint xoffset, GLint yoffset, GLint zoffset, GLsizei width, GLsizei height, GLsizei depth); +typedef GLboolean (GLAD_API_PTR *PFNGLISBUFFERPROC)(GLuint buffer); +typedef GLboolean (GLAD_API_PTR *PFNGLISENABLEDPROC)(GLenum cap); +typedef GLboolean (GLAD_API_PTR *PFNGLISENABLEDIPROC)(GLenum target, GLuint index); +typedef GLboolean (GLAD_API_PTR *PFNGLISFRAMEBUFFERPROC)(GLuint framebuffer); +typedef GLboolean (GLAD_API_PTR *PFNGLISPROGRAMPROC)(GLuint program); +typedef GLboolean (GLAD_API_PTR *PFNGLISPROGRAMPIPELINEPROC)(GLuint pipeline); +typedef GLboolean (GLAD_API_PTR *PFNGLISQUERYPROC)(GLuint id); +typedef GLboolean (GLAD_API_PTR *PFNGLISRENDERBUFFERPROC)(GLuint renderbuffer); +typedef GLboolean (GLAD_API_PTR *PFNGLISSAMPLERPROC)(GLuint sampler); +typedef GLboolean (GLAD_API_PTR *PFNGLISSHADERPROC)(GLuint shader); +typedef GLboolean (GLAD_API_PTR *PFNGLISSYNCPROC)(GLsync sync); +typedef GLboolean (GLAD_API_PTR *PFNGLISTEXTUREPROC)(GLuint texture); +typedef GLboolean (GLAD_API_PTR *PFNGLISTRANSFORMFEEDBACKPROC)(GLuint id); +typedef GLboolean (GLAD_API_PTR *PFNGLISVERTEXARRAYPROC)(GLuint array); +typedef void (GLAD_API_PTR *PFNGLLINEWIDTHPROC)(GLfloat width); +typedef void (GLAD_API_PTR *PFNGLLINKPROGRAMPROC)(GLuint program); +typedef void (GLAD_API_PTR *PFNGLLOGICOPPROC)(GLenum opcode); +typedef void * (GLAD_API_PTR *PFNGLMAPBUFFERPROC)(GLenum target, GLenum access); +typedef void * (GLAD_API_PTR *PFNGLMAPBUFFERRANGEPROC)(GLenum target, GLintptr offset, GLsizeiptr length, GLbitfield access); +typedef void (GLAD_API_PTR *PFNGLMEMORYBARRIERPROC)(GLbitfield barriers); +typedef void (GLAD_API_PTR *PFNGLMINSAMPLESHADINGPROC)(GLfloat value); +typedef void (GLAD_API_PTR *PFNGLMULTIDRAWARRAYSPROC)(GLenum mode, const GLint * first, const GLsizei * count, GLsizei drawcount); +typedef void (GLAD_API_PTR *PFNGLMULTIDRAWARRAYSINDIRECTPROC)(GLenum mode, const void * indirect, GLsizei drawcount, GLsizei stride); +typedef void (GLAD_API_PTR *PFNGLMULTIDRAWELEMENTSPROC)(GLenum mode, const GLsizei * count, GLenum type, const void *const* indices, GLsizei drawcount); +typedef void (GLAD_API_PTR *PFNGLMULTIDRAWELEMENTSBASEVERTEXPROC)(GLenum mode, const GLsizei * count, GLenum type, const void *const* indices, GLsizei drawcount, const GLint * basevertex); +typedef void (GLAD_API_PTR *PFNGLMULTIDRAWELEMENTSINDIRECTPROC)(GLenum mode, GLenum type, const void * indirect, GLsizei drawcount, GLsizei stride); +typedef void (GLAD_API_PTR *PFNGLOBJECTLABELPROC)(GLenum identifier, GLuint name, GLsizei length, const GLchar * label); +typedef void (GLAD_API_PTR *PFNGLOBJECTPTRLABELPROC)(const void * ptr, GLsizei length, const GLchar * label); +typedef void (GLAD_API_PTR *PFNGLPATCHPARAMETERFVPROC)(GLenum pname, const GLfloat * values); +typedef void (GLAD_API_PTR *PFNGLPATCHPARAMETERIPROC)(GLenum pname, GLint value); +typedef void (GLAD_API_PTR *PFNGLPAUSETRANSFORMFEEDBACKPROC)(void); +typedef void (GLAD_API_PTR *PFNGLPIXELSTOREFPROC)(GLenum pname, GLfloat param); +typedef void (GLAD_API_PTR *PFNGLPIXELSTOREIPROC)(GLenum pname, GLint param); +typedef void (GLAD_API_PTR *PFNGLPOINTPARAMETERFPROC)(GLenum pname, GLfloat param); +typedef void (GLAD_API_PTR *PFNGLPOINTPARAMETERFVPROC)(GLenum pname, const GLfloat * params); +typedef void (GLAD_API_PTR *PFNGLPOINTPARAMETERIPROC)(GLenum pname, GLint param); +typedef void (GLAD_API_PTR *PFNGLPOINTPARAMETERIVPROC)(GLenum pname, const GLint * params); +typedef void (GLAD_API_PTR *PFNGLPOINTSIZEPROC)(GLfloat size); +typedef void (GLAD_API_PTR *PFNGLPOLYGONMODEPROC)(GLenum face, GLenum mode); +typedef void (GLAD_API_PTR *PFNGLPOLYGONOFFSETPROC)(GLfloat factor, GLfloat units); +typedef void (GLAD_API_PTR *PFNGLPOPDEBUGGROUPPROC)(void); +typedef void (GLAD_API_PTR *PFNGLPRIMITIVERESTARTINDEXPROC)(GLuint index); +typedef void (GLAD_API_PTR *PFNGLPROGRAMBINARYPROC)(GLuint program, GLenum binaryFormat, const void * binary, GLsizei length); +typedef void (GLAD_API_PTR *PFNGLPROGRAMPARAMETERIPROC)(GLuint program, GLenum pname, GLint value); +typedef void (GLAD_API_PTR *PFNGLPROGRAMUNIFORM1DPROC)(GLuint program, GLint location, GLdouble v0); +typedef void (GLAD_API_PTR *PFNGLPROGRAMUNIFORM1DVPROC)(GLuint program, GLint location, GLsizei count, const GLdouble * value); +typedef void (GLAD_API_PTR *PFNGLPROGRAMUNIFORM1FPROC)(GLuint program, GLint location, GLfloat v0); +typedef void (GLAD_API_PTR *PFNGLPROGRAMUNIFORM1FVPROC)(GLuint program, GLint location, GLsizei count, const GLfloat * value); +typedef void (GLAD_API_PTR *PFNGLPROGRAMUNIFORM1IPROC)(GLuint program, GLint location, GLint v0); +typedef void (GLAD_API_PTR *PFNGLPROGRAMUNIFORM1IVPROC)(GLuint program, GLint location, GLsizei count, const GLint * value); +typedef void (GLAD_API_PTR *PFNGLPROGRAMUNIFORM1UIPROC)(GLuint program, GLint location, GLuint v0); +typedef void (GLAD_API_PTR *PFNGLPROGRAMUNIFORM1UIVPROC)(GLuint program, GLint location, GLsizei count, const GLuint * value); +typedef void (GLAD_API_PTR *PFNGLPROGRAMUNIFORM2DPROC)(GLuint program, GLint location, GLdouble v0, GLdouble v1); +typedef void (GLAD_API_PTR *PFNGLPROGRAMUNIFORM2DVPROC)(GLuint program, GLint location, GLsizei count, const GLdouble * value); +typedef void (GLAD_API_PTR *PFNGLPROGRAMUNIFORM2FPROC)(GLuint program, GLint location, GLfloat v0, GLfloat v1); +typedef void (GLAD_API_PTR *PFNGLPROGRAMUNIFORM2FVPROC)(GLuint program, GLint location, GLsizei count, const GLfloat * value); +typedef void (GLAD_API_PTR *PFNGLPROGRAMUNIFORM2IPROC)(GLuint program, GLint location, GLint v0, GLint v1); +typedef void (GLAD_API_PTR *PFNGLPROGRAMUNIFORM2IVPROC)(GLuint program, GLint location, GLsizei count, const GLint * value); +typedef void (GLAD_API_PTR *PFNGLPROGRAMUNIFORM2UIPROC)(GLuint program, GLint location, GLuint v0, GLuint v1); +typedef void (GLAD_API_PTR *PFNGLPROGRAMUNIFORM2UIVPROC)(GLuint program, GLint location, GLsizei count, const GLuint * value); +typedef void (GLAD_API_PTR *PFNGLPROGRAMUNIFORM3DPROC)(GLuint program, GLint location, GLdouble v0, GLdouble v1, GLdouble v2); +typedef void (GLAD_API_PTR *PFNGLPROGRAMUNIFORM3DVPROC)(GLuint program, GLint location, GLsizei count, const GLdouble * value); +typedef void (GLAD_API_PTR *PFNGLPROGRAMUNIFORM3FPROC)(GLuint program, GLint location, GLfloat v0, GLfloat v1, GLfloat v2); +typedef void (GLAD_API_PTR *PFNGLPROGRAMUNIFORM3FVPROC)(GLuint program, GLint location, GLsizei count, const GLfloat * value); +typedef void (GLAD_API_PTR *PFNGLPROGRAMUNIFORM3IPROC)(GLuint program, GLint location, GLint v0, GLint v1, GLint v2); +typedef void (GLAD_API_PTR *PFNGLPROGRAMUNIFORM3IVPROC)(GLuint program, GLint location, GLsizei count, const GLint * value); +typedef void (GLAD_API_PTR *PFNGLPROGRAMUNIFORM3UIPROC)(GLuint program, GLint location, GLuint v0, GLuint v1, GLuint v2); +typedef void (GLAD_API_PTR *PFNGLPROGRAMUNIFORM3UIVPROC)(GLuint program, GLint location, GLsizei count, const GLuint * value); +typedef void (GLAD_API_PTR *PFNGLPROGRAMUNIFORM4DPROC)(GLuint program, GLint location, GLdouble v0, GLdouble v1, GLdouble v2, GLdouble v3); +typedef void (GLAD_API_PTR *PFNGLPROGRAMUNIFORM4DVPROC)(GLuint program, GLint location, GLsizei count, const GLdouble * value); +typedef void (GLAD_API_PTR *PFNGLPROGRAMUNIFORM4FPROC)(GLuint program, GLint location, GLfloat v0, GLfloat v1, GLfloat v2, GLfloat v3); +typedef void (GLAD_API_PTR *PFNGLPROGRAMUNIFORM4FVPROC)(GLuint program, GLint location, GLsizei count, const GLfloat * value); +typedef void (GLAD_API_PTR *PFNGLPROGRAMUNIFORM4IPROC)(GLuint program, GLint location, GLint v0, GLint v1, GLint v2, GLint v3); +typedef void (GLAD_API_PTR *PFNGLPROGRAMUNIFORM4IVPROC)(GLuint program, GLint location, GLsizei count, const GLint * value); +typedef void (GLAD_API_PTR *PFNGLPROGRAMUNIFORM4UIPROC)(GLuint program, GLint location, GLuint v0, GLuint v1, GLuint v2, GLuint v3); +typedef void (GLAD_API_PTR *PFNGLPROGRAMUNIFORM4UIVPROC)(GLuint program, GLint location, GLsizei count, const GLuint * value); +typedef void (GLAD_API_PTR *PFNGLPROGRAMUNIFORMMATRIX2DVPROC)(GLuint program, GLint location, GLsizei count, GLboolean transpose, const GLdouble * value); +typedef void (GLAD_API_PTR *PFNGLPROGRAMUNIFORMMATRIX2FVPROC)(GLuint program, GLint location, GLsizei count, GLboolean transpose, const GLfloat * value); +typedef void (GLAD_API_PTR *PFNGLPROGRAMUNIFORMMATRIX2X3DVPROC)(GLuint program, GLint location, GLsizei count, GLboolean transpose, const GLdouble * value); +typedef void (GLAD_API_PTR *PFNGLPROGRAMUNIFORMMATRIX2X3FVPROC)(GLuint program, GLint location, GLsizei count, GLboolean transpose, const GLfloat * value); +typedef void (GLAD_API_PTR *PFNGLPROGRAMUNIFORMMATRIX2X4DVPROC)(GLuint program, GLint location, GLsizei count, GLboolean transpose, const GLdouble * value); +typedef void (GLAD_API_PTR *PFNGLPROGRAMUNIFORMMATRIX2X4FVPROC)(GLuint program, GLint location, GLsizei count, GLboolean transpose, const GLfloat * value); +typedef void (GLAD_API_PTR *PFNGLPROGRAMUNIFORMMATRIX3DVPROC)(GLuint program, GLint location, GLsizei count, GLboolean transpose, const GLdouble * value); +typedef void (GLAD_API_PTR *PFNGLPROGRAMUNIFORMMATRIX3FVPROC)(GLuint program, GLint location, GLsizei count, GLboolean transpose, const GLfloat * value); +typedef void (GLAD_API_PTR *PFNGLPROGRAMUNIFORMMATRIX3X2DVPROC)(GLuint program, GLint location, GLsizei count, GLboolean transpose, const GLdouble * value); +typedef void (GLAD_API_PTR *PFNGLPROGRAMUNIFORMMATRIX3X2FVPROC)(GLuint program, GLint location, GLsizei count, GLboolean transpose, const GLfloat * value); +typedef void (GLAD_API_PTR *PFNGLPROGRAMUNIFORMMATRIX3X4DVPROC)(GLuint program, GLint location, GLsizei count, GLboolean transpose, const GLdouble * value); +typedef void (GLAD_API_PTR *PFNGLPROGRAMUNIFORMMATRIX3X4FVPROC)(GLuint program, GLint location, GLsizei count, GLboolean transpose, const GLfloat * value); +typedef void (GLAD_API_PTR *PFNGLPROGRAMUNIFORMMATRIX4DVPROC)(GLuint program, GLint location, GLsizei count, GLboolean transpose, const GLdouble * value); +typedef void (GLAD_API_PTR *PFNGLPROGRAMUNIFORMMATRIX4FVPROC)(GLuint program, GLint location, GLsizei count, GLboolean transpose, const GLfloat * value); +typedef void (GLAD_API_PTR *PFNGLPROGRAMUNIFORMMATRIX4X2DVPROC)(GLuint program, GLint location, GLsizei count, GLboolean transpose, const GLdouble * value); +typedef void (GLAD_API_PTR *PFNGLPROGRAMUNIFORMMATRIX4X2FVPROC)(GLuint program, GLint location, GLsizei count, GLboolean transpose, const GLfloat * value); +typedef void (GLAD_API_PTR *PFNGLPROGRAMUNIFORMMATRIX4X3DVPROC)(GLuint program, GLint location, GLsizei count, GLboolean transpose, const GLdouble * value); +typedef void (GLAD_API_PTR *PFNGLPROGRAMUNIFORMMATRIX4X3FVPROC)(GLuint program, GLint location, GLsizei count, GLboolean transpose, const GLfloat * value); +typedef void (GLAD_API_PTR *PFNGLPROVOKINGVERTEXPROC)(GLenum mode); +typedef void (GLAD_API_PTR *PFNGLPUSHDEBUGGROUPPROC)(GLenum source, GLuint id, GLsizei length, const GLchar * message); +typedef void (GLAD_API_PTR *PFNGLQUERYCOUNTERPROC)(GLuint id, GLenum target); +typedef void (GLAD_API_PTR *PFNGLREADBUFFERPROC)(GLenum src); +typedef void (GLAD_API_PTR *PFNGLREADPIXELSPROC)(GLint x, GLint y, GLsizei width, GLsizei height, GLenum format, GLenum type, void * pixels); +typedef void (GLAD_API_PTR *PFNGLRELEASESHADERCOMPILERPROC)(void); +typedef void (GLAD_API_PTR *PFNGLRENDERBUFFERSTORAGEPROC)(GLenum target, GLenum internalformat, GLsizei width, GLsizei height); +typedef void (GLAD_API_PTR *PFNGLRENDERBUFFERSTORAGEMULTISAMPLEPROC)(GLenum target, GLsizei samples, GLenum internalformat, GLsizei width, GLsizei height); +typedef void (GLAD_API_PTR *PFNGLRESUMETRANSFORMFEEDBACKPROC)(void); +typedef void (GLAD_API_PTR *PFNGLSAMPLECOVERAGEPROC)(GLfloat value, GLboolean invert); +typedef void (GLAD_API_PTR *PFNGLSAMPLEMASKIPROC)(GLuint maskNumber, GLbitfield mask); +typedef void (GLAD_API_PTR *PFNGLSAMPLERPARAMETERIIVPROC)(GLuint sampler, GLenum pname, const GLint * param); +typedef void (GLAD_API_PTR *PFNGLSAMPLERPARAMETERIUIVPROC)(GLuint sampler, GLenum pname, const GLuint * param); +typedef void (GLAD_API_PTR *PFNGLSAMPLERPARAMETERFPROC)(GLuint sampler, GLenum pname, GLfloat param); +typedef void (GLAD_API_PTR *PFNGLSAMPLERPARAMETERFVPROC)(GLuint sampler, GLenum pname, const GLfloat * param); +typedef void (GLAD_API_PTR *PFNGLSAMPLERPARAMETERIPROC)(GLuint sampler, GLenum pname, GLint param); +typedef void (GLAD_API_PTR *PFNGLSAMPLERPARAMETERIVPROC)(GLuint sampler, GLenum pname, const GLint * param); +typedef void (GLAD_API_PTR *PFNGLSCISSORPROC)(GLint x, GLint y, GLsizei width, GLsizei height); +typedef void (GLAD_API_PTR *PFNGLSCISSORARRAYVPROC)(GLuint first, GLsizei count, const GLint * v); +typedef void (GLAD_API_PTR *PFNGLSCISSORINDEXEDPROC)(GLuint index, GLint left, GLint bottom, GLsizei width, GLsizei height); +typedef void (GLAD_API_PTR *PFNGLSCISSORINDEXEDVPROC)(GLuint index, const GLint * v); +typedef void (GLAD_API_PTR *PFNGLSHADERBINARYPROC)(GLsizei count, const GLuint * shaders, GLenum binaryFormat, const void * binary, GLsizei length); +typedef void (GLAD_API_PTR *PFNGLSHADERSOURCEPROC)(GLuint shader, GLsizei count, const GLchar *const* string, const GLint * length); +typedef void (GLAD_API_PTR *PFNGLSHADERSTORAGEBLOCKBINDINGPROC)(GLuint program, GLuint storageBlockIndex, GLuint storageBlockBinding); +typedef void (GLAD_API_PTR *PFNGLSTENCILFUNCPROC)(GLenum func, GLint ref, GLuint mask); +typedef void (GLAD_API_PTR *PFNGLSTENCILFUNCSEPARATEPROC)(GLenum face, GLenum func, GLint ref, GLuint mask); +typedef void (GLAD_API_PTR *PFNGLSTENCILMASKPROC)(GLuint mask); +typedef void (GLAD_API_PTR *PFNGLSTENCILMASKSEPARATEPROC)(GLenum face, GLuint mask); +typedef void (GLAD_API_PTR *PFNGLSTENCILOPPROC)(GLenum fail, GLenum zfail, GLenum zpass); +typedef void (GLAD_API_PTR *PFNGLSTENCILOPSEPARATEPROC)(GLenum face, GLenum sfail, GLenum dpfail, GLenum dppass); +typedef void (GLAD_API_PTR *PFNGLTEXBUFFERPROC)(GLenum target, GLenum internalformat, GLuint buffer); +typedef void (GLAD_API_PTR *PFNGLTEXBUFFERRANGEPROC)(GLenum target, GLenum internalformat, GLuint buffer, GLintptr offset, GLsizeiptr size); +typedef void (GLAD_API_PTR *PFNGLTEXIMAGE1DPROC)(GLenum target, GLint level, GLint internalformat, GLsizei width, GLint border, GLenum format, GLenum type, const void * pixels); +typedef void (GLAD_API_PTR *PFNGLTEXIMAGE2DPROC)(GLenum target, GLint level, GLint internalformat, GLsizei width, GLsizei height, GLint border, GLenum format, GLenum type, const void * pixels); +typedef void (GLAD_API_PTR *PFNGLTEXIMAGE2DMULTISAMPLEPROC)(GLenum target, GLsizei samples, GLenum internalformat, GLsizei width, GLsizei height, GLboolean fixedsamplelocations); +typedef void (GLAD_API_PTR *PFNGLTEXIMAGE3DPROC)(GLenum target, GLint level, GLint internalformat, GLsizei width, GLsizei height, GLsizei depth, GLint border, GLenum format, GLenum type, const void * pixels); +typedef void (GLAD_API_PTR *PFNGLTEXIMAGE3DMULTISAMPLEPROC)(GLenum target, GLsizei samples, GLenum internalformat, GLsizei width, GLsizei height, GLsizei depth, GLboolean fixedsamplelocations); +typedef void (GLAD_API_PTR *PFNGLTEXPARAMETERIIVPROC)(GLenum target, GLenum pname, const GLint * params); +typedef void (GLAD_API_PTR *PFNGLTEXPARAMETERIUIVPROC)(GLenum target, GLenum pname, const GLuint * params); +typedef void (GLAD_API_PTR *PFNGLTEXPARAMETERFPROC)(GLenum target, GLenum pname, GLfloat param); +typedef void (GLAD_API_PTR *PFNGLTEXPARAMETERFVPROC)(GLenum target, GLenum pname, const GLfloat * params); +typedef void (GLAD_API_PTR *PFNGLTEXPARAMETERIPROC)(GLenum target, GLenum pname, GLint param); +typedef void (GLAD_API_PTR *PFNGLTEXPARAMETERIVPROC)(GLenum target, GLenum pname, const GLint * params); +typedef void (GLAD_API_PTR *PFNGLTEXSTORAGE1DPROC)(GLenum target, GLsizei levels, GLenum internalformat, GLsizei width); +typedef void (GLAD_API_PTR *PFNGLTEXSTORAGE2DPROC)(GLenum target, GLsizei levels, GLenum internalformat, GLsizei width, GLsizei height); +typedef void (GLAD_API_PTR *PFNGLTEXSTORAGE2DMULTISAMPLEPROC)(GLenum target, GLsizei samples, GLenum internalformat, GLsizei width, GLsizei height, GLboolean fixedsamplelocations); +typedef void (GLAD_API_PTR *PFNGLTEXSTORAGE3DPROC)(GLenum target, GLsizei levels, GLenum internalformat, GLsizei width, GLsizei height, GLsizei depth); +typedef void (GLAD_API_PTR *PFNGLTEXSTORAGE3DMULTISAMPLEPROC)(GLenum target, GLsizei samples, GLenum internalformat, GLsizei width, GLsizei height, GLsizei depth, GLboolean fixedsamplelocations); +typedef void (GLAD_API_PTR *PFNGLTEXSUBIMAGE1DPROC)(GLenum target, GLint level, GLint xoffset, GLsizei width, GLenum format, GLenum type, const void * pixels); +typedef void (GLAD_API_PTR *PFNGLTEXSUBIMAGE2DPROC)(GLenum target, GLint level, GLint xoffset, GLint yoffset, GLsizei width, GLsizei height, GLenum format, GLenum type, const void * pixels); +typedef void (GLAD_API_PTR *PFNGLTEXSUBIMAGE3DPROC)(GLenum target, GLint level, GLint xoffset, GLint yoffset, GLint zoffset, GLsizei width, GLsizei height, GLsizei depth, GLenum format, GLenum type, const void * pixels); +typedef void (GLAD_API_PTR *PFNGLTEXTUREVIEWPROC)(GLuint texture, GLenum target, GLuint origtexture, GLenum internalformat, GLuint minlevel, GLuint numlevels, GLuint minlayer, GLuint numlayers); +typedef void (GLAD_API_PTR *PFNGLTRANSFORMFEEDBACKVARYINGSPROC)(GLuint program, GLsizei count, const GLchar *const* varyings, GLenum bufferMode); +typedef void (GLAD_API_PTR *PFNGLUNIFORM1DPROC)(GLint location, GLdouble x); +typedef void (GLAD_API_PTR *PFNGLUNIFORM1DVPROC)(GLint location, GLsizei count, const GLdouble * value); +typedef void (GLAD_API_PTR *PFNGLUNIFORM1FPROC)(GLint location, GLfloat v0); +typedef void (GLAD_API_PTR *PFNGLUNIFORM1FVPROC)(GLint location, GLsizei count, const GLfloat * value); +typedef void (GLAD_API_PTR *PFNGLUNIFORM1IPROC)(GLint location, GLint v0); +typedef void (GLAD_API_PTR *PFNGLUNIFORM1IVPROC)(GLint location, GLsizei count, const GLint * value); +typedef void (GLAD_API_PTR *PFNGLUNIFORM1UIPROC)(GLint location, GLuint v0); +typedef void (GLAD_API_PTR *PFNGLUNIFORM1UIVPROC)(GLint location, GLsizei count, const GLuint * value); +typedef void (GLAD_API_PTR *PFNGLUNIFORM2DPROC)(GLint location, GLdouble x, GLdouble y); +typedef void (GLAD_API_PTR *PFNGLUNIFORM2DVPROC)(GLint location, GLsizei count, const GLdouble * value); +typedef void (GLAD_API_PTR *PFNGLUNIFORM2FPROC)(GLint location, GLfloat v0, GLfloat v1); +typedef void (GLAD_API_PTR *PFNGLUNIFORM2FVPROC)(GLint location, GLsizei count, const GLfloat * value); +typedef void (GLAD_API_PTR *PFNGLUNIFORM2IPROC)(GLint location, GLint v0, GLint v1); +typedef void (GLAD_API_PTR *PFNGLUNIFORM2IVPROC)(GLint location, GLsizei count, const GLint * value); +typedef void (GLAD_API_PTR *PFNGLUNIFORM2UIPROC)(GLint location, GLuint v0, GLuint v1); +typedef void (GLAD_API_PTR *PFNGLUNIFORM2UIVPROC)(GLint location, GLsizei count, const GLuint * value); +typedef void (GLAD_API_PTR *PFNGLUNIFORM3DPROC)(GLint location, GLdouble x, GLdouble y, GLdouble z); +typedef void (GLAD_API_PTR *PFNGLUNIFORM3DVPROC)(GLint location, GLsizei count, const GLdouble * value); +typedef void (GLAD_API_PTR *PFNGLUNIFORM3FPROC)(GLint location, GLfloat v0, GLfloat v1, GLfloat v2); +typedef void (GLAD_API_PTR *PFNGLUNIFORM3FVPROC)(GLint location, GLsizei count, const GLfloat * value); +typedef void (GLAD_API_PTR *PFNGLUNIFORM3IPROC)(GLint location, GLint v0, GLint v1, GLint v2); +typedef void (GLAD_API_PTR *PFNGLUNIFORM3IVPROC)(GLint location, GLsizei count, const GLint * value); +typedef void (GLAD_API_PTR *PFNGLUNIFORM3UIPROC)(GLint location, GLuint v0, GLuint v1, GLuint v2); +typedef void (GLAD_API_PTR *PFNGLUNIFORM3UIVPROC)(GLint location, GLsizei count, const GLuint * value); +typedef void (GLAD_API_PTR *PFNGLUNIFORM4DPROC)(GLint location, GLdouble x, GLdouble y, GLdouble z, GLdouble w); +typedef void (GLAD_API_PTR *PFNGLUNIFORM4DVPROC)(GLint location, GLsizei count, const GLdouble * value); +typedef void (GLAD_API_PTR *PFNGLUNIFORM4FPROC)(GLint location, GLfloat v0, GLfloat v1, GLfloat v2, GLfloat v3); +typedef void (GLAD_API_PTR *PFNGLUNIFORM4FVPROC)(GLint location, GLsizei count, const GLfloat * value); +typedef void (GLAD_API_PTR *PFNGLUNIFORM4IPROC)(GLint location, GLint v0, GLint v1, GLint v2, GLint v3); +typedef void (GLAD_API_PTR *PFNGLUNIFORM4IVPROC)(GLint location, GLsizei count, const GLint * value); +typedef void (GLAD_API_PTR *PFNGLUNIFORM4UIPROC)(GLint location, GLuint v0, GLuint v1, GLuint v2, GLuint v3); +typedef void (GLAD_API_PTR *PFNGLUNIFORM4UIVPROC)(GLint location, GLsizei count, const GLuint * value); +typedef void (GLAD_API_PTR *PFNGLUNIFORMBLOCKBINDINGPROC)(GLuint program, GLuint uniformBlockIndex, GLuint uniformBlockBinding); +typedef void (GLAD_API_PTR *PFNGLUNIFORMMATRIX2DVPROC)(GLint location, GLsizei count, GLboolean transpose, const GLdouble * value); +typedef void (GLAD_API_PTR *PFNGLUNIFORMMATRIX2FVPROC)(GLint location, GLsizei count, GLboolean transpose, const GLfloat * value); +typedef void (GLAD_API_PTR *PFNGLUNIFORMMATRIX2X3DVPROC)(GLint location, GLsizei count, GLboolean transpose, const GLdouble * value); +typedef void (GLAD_API_PTR *PFNGLUNIFORMMATRIX2X3FVPROC)(GLint location, GLsizei count, GLboolean transpose, const GLfloat * value); +typedef void (GLAD_API_PTR *PFNGLUNIFORMMATRIX2X4DVPROC)(GLint location, GLsizei count, GLboolean transpose, const GLdouble * value); +typedef void (GLAD_API_PTR *PFNGLUNIFORMMATRIX2X4FVPROC)(GLint location, GLsizei count, GLboolean transpose, const GLfloat * value); +typedef void (GLAD_API_PTR *PFNGLUNIFORMMATRIX3DVPROC)(GLint location, GLsizei count, GLboolean transpose, const GLdouble * value); +typedef void (GLAD_API_PTR *PFNGLUNIFORMMATRIX3FVPROC)(GLint location, GLsizei count, GLboolean transpose, const GLfloat * value); +typedef void (GLAD_API_PTR *PFNGLUNIFORMMATRIX3X2DVPROC)(GLint location, GLsizei count, GLboolean transpose, const GLdouble * value); +typedef void (GLAD_API_PTR *PFNGLUNIFORMMATRIX3X2FVPROC)(GLint location, GLsizei count, GLboolean transpose, const GLfloat * value); +typedef void (GLAD_API_PTR *PFNGLUNIFORMMATRIX3X4DVPROC)(GLint location, GLsizei count, GLboolean transpose, const GLdouble * value); +typedef void (GLAD_API_PTR *PFNGLUNIFORMMATRIX3X4FVPROC)(GLint location, GLsizei count, GLboolean transpose, const GLfloat * value); +typedef void (GLAD_API_PTR *PFNGLUNIFORMMATRIX4DVPROC)(GLint location, GLsizei count, GLboolean transpose, const GLdouble * value); +typedef void (GLAD_API_PTR *PFNGLUNIFORMMATRIX4FVPROC)(GLint location, GLsizei count, GLboolean transpose, const GLfloat * value); +typedef void (GLAD_API_PTR *PFNGLUNIFORMMATRIX4X2DVPROC)(GLint location, GLsizei count, GLboolean transpose, const GLdouble * value); +typedef void (GLAD_API_PTR *PFNGLUNIFORMMATRIX4X2FVPROC)(GLint location, GLsizei count, GLboolean transpose, const GLfloat * value); +typedef void (GLAD_API_PTR *PFNGLUNIFORMMATRIX4X3DVPROC)(GLint location, GLsizei count, GLboolean transpose, const GLdouble * value); +typedef void (GLAD_API_PTR *PFNGLUNIFORMMATRIX4X3FVPROC)(GLint location, GLsizei count, GLboolean transpose, const GLfloat * value); +typedef void (GLAD_API_PTR *PFNGLUNIFORMSUBROUTINESUIVPROC)(GLenum shadertype, GLsizei count, const GLuint * indices); +typedef GLboolean (GLAD_API_PTR *PFNGLUNMAPBUFFERPROC)(GLenum target); +typedef void (GLAD_API_PTR *PFNGLUSEPROGRAMPROC)(GLuint program); +typedef void (GLAD_API_PTR *PFNGLUSEPROGRAMSTAGESPROC)(GLuint pipeline, GLbitfield stages, GLuint program); +typedef void (GLAD_API_PTR *PFNGLVALIDATEPROGRAMPROC)(GLuint program); +typedef void (GLAD_API_PTR *PFNGLVALIDATEPROGRAMPIPELINEPROC)(GLuint pipeline); +typedef void (GLAD_API_PTR *PFNGLVERTEXATTRIB1DPROC)(GLuint index, GLdouble x); +typedef void (GLAD_API_PTR *PFNGLVERTEXATTRIB1DVPROC)(GLuint index, const GLdouble * v); +typedef void (GLAD_API_PTR *PFNGLVERTEXATTRIB1FPROC)(GLuint index, GLfloat x); +typedef void (GLAD_API_PTR *PFNGLVERTEXATTRIB1FVPROC)(GLuint index, const GLfloat * v); +typedef void (GLAD_API_PTR *PFNGLVERTEXATTRIB1SPROC)(GLuint index, GLshort x); +typedef void (GLAD_API_PTR *PFNGLVERTEXATTRIB1SVPROC)(GLuint index, const GLshort * v); +typedef void (GLAD_API_PTR *PFNGLVERTEXATTRIB2DPROC)(GLuint index, GLdouble x, GLdouble y); +typedef void (GLAD_API_PTR *PFNGLVERTEXATTRIB2DVPROC)(GLuint index, const GLdouble * v); +typedef void (GLAD_API_PTR *PFNGLVERTEXATTRIB2FPROC)(GLuint index, GLfloat x, GLfloat y); +typedef void (GLAD_API_PTR *PFNGLVERTEXATTRIB2FVPROC)(GLuint index, const GLfloat * v); +typedef void (GLAD_API_PTR *PFNGLVERTEXATTRIB2SPROC)(GLuint index, GLshort x, GLshort y); +typedef void (GLAD_API_PTR *PFNGLVERTEXATTRIB2SVPROC)(GLuint index, const GLshort * v); +typedef void (GLAD_API_PTR *PFNGLVERTEXATTRIB3DPROC)(GLuint index, GLdouble x, GLdouble y, GLdouble z); +typedef void (GLAD_API_PTR *PFNGLVERTEXATTRIB3DVPROC)(GLuint index, const GLdouble * v); +typedef void (GLAD_API_PTR *PFNGLVERTEXATTRIB3FPROC)(GLuint index, GLfloat x, GLfloat y, GLfloat z); +typedef void (GLAD_API_PTR *PFNGLVERTEXATTRIB3FVPROC)(GLuint index, const GLfloat * v); +typedef void (GLAD_API_PTR *PFNGLVERTEXATTRIB3SPROC)(GLuint index, GLshort x, GLshort y, GLshort z); +typedef void (GLAD_API_PTR *PFNGLVERTEXATTRIB3SVPROC)(GLuint index, const GLshort * v); +typedef void (GLAD_API_PTR *PFNGLVERTEXATTRIB4NBVPROC)(GLuint index, const GLbyte * v); +typedef void (GLAD_API_PTR *PFNGLVERTEXATTRIB4NIVPROC)(GLuint index, const GLint * v); +typedef void (GLAD_API_PTR *PFNGLVERTEXATTRIB4NSVPROC)(GLuint index, const GLshort * v); +typedef void (GLAD_API_PTR *PFNGLVERTEXATTRIB4NUBPROC)(GLuint index, GLubyte x, GLubyte y, GLubyte z, GLubyte w); +typedef void (GLAD_API_PTR *PFNGLVERTEXATTRIB4NUBVPROC)(GLuint index, const GLubyte * v); +typedef void (GLAD_API_PTR *PFNGLVERTEXATTRIB4NUIVPROC)(GLuint index, const GLuint * v); +typedef void (GLAD_API_PTR *PFNGLVERTEXATTRIB4NUSVPROC)(GLuint index, const GLushort * v); +typedef void (GLAD_API_PTR *PFNGLVERTEXATTRIB4BVPROC)(GLuint index, const GLbyte * v); +typedef void (GLAD_API_PTR *PFNGLVERTEXATTRIB4DPROC)(GLuint index, GLdouble x, GLdouble y, GLdouble z, GLdouble w); +typedef void (GLAD_API_PTR *PFNGLVERTEXATTRIB4DVPROC)(GLuint index, const GLdouble * v); +typedef void (GLAD_API_PTR *PFNGLVERTEXATTRIB4FPROC)(GLuint index, GLfloat x, GLfloat y, GLfloat z, GLfloat w); +typedef void (GLAD_API_PTR *PFNGLVERTEXATTRIB4FVPROC)(GLuint index, const GLfloat * v); +typedef void (GLAD_API_PTR *PFNGLVERTEXATTRIB4IVPROC)(GLuint index, const GLint * v); +typedef void (GLAD_API_PTR *PFNGLVERTEXATTRIB4SPROC)(GLuint index, GLshort x, GLshort y, GLshort z, GLshort w); +typedef void (GLAD_API_PTR *PFNGLVERTEXATTRIB4SVPROC)(GLuint index, const GLshort * v); +typedef void (GLAD_API_PTR *PFNGLVERTEXATTRIB4UBVPROC)(GLuint index, const GLubyte * v); +typedef void (GLAD_API_PTR *PFNGLVERTEXATTRIB4UIVPROC)(GLuint index, const GLuint * v); +typedef void (GLAD_API_PTR *PFNGLVERTEXATTRIB4USVPROC)(GLuint index, const GLushort * v); +typedef void (GLAD_API_PTR *PFNGLVERTEXATTRIBBINDINGPROC)(GLuint attribindex, GLuint bindingindex); +typedef void (GLAD_API_PTR *PFNGLVERTEXATTRIBDIVISORPROC)(GLuint index, GLuint divisor); +typedef void (GLAD_API_PTR *PFNGLVERTEXATTRIBFORMATPROC)(GLuint attribindex, GLint size, GLenum type, GLboolean normalized, GLuint relativeoffset); +typedef void (GLAD_API_PTR *PFNGLVERTEXATTRIBI1IPROC)(GLuint index, GLint x); +typedef void (GLAD_API_PTR *PFNGLVERTEXATTRIBI1IVPROC)(GLuint index, const GLint * v); +typedef void (GLAD_API_PTR *PFNGLVERTEXATTRIBI1UIPROC)(GLuint index, GLuint x); +typedef void (GLAD_API_PTR *PFNGLVERTEXATTRIBI1UIVPROC)(GLuint index, const GLuint * v); +typedef void (GLAD_API_PTR *PFNGLVERTEXATTRIBI2IPROC)(GLuint index, GLint x, GLint y); +typedef void (GLAD_API_PTR *PFNGLVERTEXATTRIBI2IVPROC)(GLuint index, const GLint * v); +typedef void (GLAD_API_PTR *PFNGLVERTEXATTRIBI2UIPROC)(GLuint index, GLuint x, GLuint y); +typedef void (GLAD_API_PTR *PFNGLVERTEXATTRIBI2UIVPROC)(GLuint index, const GLuint * v); +typedef void (GLAD_API_PTR *PFNGLVERTEXATTRIBI3IPROC)(GLuint index, GLint x, GLint y, GLint z); +typedef void (GLAD_API_PTR *PFNGLVERTEXATTRIBI3IVPROC)(GLuint index, const GLint * v); +typedef void (GLAD_API_PTR *PFNGLVERTEXATTRIBI3UIPROC)(GLuint index, GLuint x, GLuint y, GLuint z); +typedef void (GLAD_API_PTR *PFNGLVERTEXATTRIBI3UIVPROC)(GLuint index, const GLuint * v); +typedef void (GLAD_API_PTR *PFNGLVERTEXATTRIBI4BVPROC)(GLuint index, const GLbyte * v); +typedef void (GLAD_API_PTR *PFNGLVERTEXATTRIBI4IPROC)(GLuint index, GLint x, GLint y, GLint z, GLint w); +typedef void (GLAD_API_PTR *PFNGLVERTEXATTRIBI4IVPROC)(GLuint index, const GLint * v); +typedef void (GLAD_API_PTR *PFNGLVERTEXATTRIBI4SVPROC)(GLuint index, const GLshort * v); +typedef void (GLAD_API_PTR *PFNGLVERTEXATTRIBI4UBVPROC)(GLuint index, const GLubyte * v); +typedef void (GLAD_API_PTR *PFNGLVERTEXATTRIBI4UIPROC)(GLuint index, GLuint x, GLuint y, GLuint z, GLuint w); +typedef void (GLAD_API_PTR *PFNGLVERTEXATTRIBI4UIVPROC)(GLuint index, const GLuint * v); +typedef void (GLAD_API_PTR *PFNGLVERTEXATTRIBI4USVPROC)(GLuint index, const GLushort * v); +typedef void (GLAD_API_PTR *PFNGLVERTEXATTRIBIFORMATPROC)(GLuint attribindex, GLint size, GLenum type, GLuint relativeoffset); +typedef void (GLAD_API_PTR *PFNGLVERTEXATTRIBIPOINTERPROC)(GLuint index, GLint size, GLenum type, GLsizei stride, const void * pointer); +typedef void (GLAD_API_PTR *PFNGLVERTEXATTRIBL1DPROC)(GLuint index, GLdouble x); +typedef void (GLAD_API_PTR *PFNGLVERTEXATTRIBL1DVPROC)(GLuint index, const GLdouble * v); +typedef void (GLAD_API_PTR *PFNGLVERTEXATTRIBL2DPROC)(GLuint index, GLdouble x, GLdouble y); +typedef void (GLAD_API_PTR *PFNGLVERTEXATTRIBL2DVPROC)(GLuint index, const GLdouble * v); +typedef void (GLAD_API_PTR *PFNGLVERTEXATTRIBL3DPROC)(GLuint index, GLdouble x, GLdouble y, GLdouble z); +typedef void (GLAD_API_PTR *PFNGLVERTEXATTRIBL3DVPROC)(GLuint index, const GLdouble * v); +typedef void (GLAD_API_PTR *PFNGLVERTEXATTRIBL4DPROC)(GLuint index, GLdouble x, GLdouble y, GLdouble z, GLdouble w); +typedef void (GLAD_API_PTR *PFNGLVERTEXATTRIBL4DVPROC)(GLuint index, const GLdouble * v); +typedef void (GLAD_API_PTR *PFNGLVERTEXATTRIBLFORMATPROC)(GLuint attribindex, GLint size, GLenum type, GLuint relativeoffset); +typedef void (GLAD_API_PTR *PFNGLVERTEXATTRIBLPOINTERPROC)(GLuint index, GLint size, GLenum type, GLsizei stride, const void * pointer); +typedef void (GLAD_API_PTR *PFNGLVERTEXATTRIBP1UIPROC)(GLuint index, GLenum type, GLboolean normalized, GLuint value); +typedef void (GLAD_API_PTR *PFNGLVERTEXATTRIBP1UIVPROC)(GLuint index, GLenum type, GLboolean normalized, const GLuint * value); +typedef void (GLAD_API_PTR *PFNGLVERTEXATTRIBP2UIPROC)(GLuint index, GLenum type, GLboolean normalized, GLuint value); +typedef void (GLAD_API_PTR *PFNGLVERTEXATTRIBP2UIVPROC)(GLuint index, GLenum type, GLboolean normalized, const GLuint * value); +typedef void (GLAD_API_PTR *PFNGLVERTEXATTRIBP3UIPROC)(GLuint index, GLenum type, GLboolean normalized, GLuint value); +typedef void (GLAD_API_PTR *PFNGLVERTEXATTRIBP3UIVPROC)(GLuint index, GLenum type, GLboolean normalized, const GLuint * value); +typedef void (GLAD_API_PTR *PFNGLVERTEXATTRIBP4UIPROC)(GLuint index, GLenum type, GLboolean normalized, GLuint value); +typedef void (GLAD_API_PTR *PFNGLVERTEXATTRIBP4UIVPROC)(GLuint index, GLenum type, GLboolean normalized, const GLuint * value); +typedef void (GLAD_API_PTR *PFNGLVERTEXATTRIBPOINTERPROC)(GLuint index, GLint size, GLenum type, GLboolean normalized, GLsizei stride, const void * pointer); +typedef void (GLAD_API_PTR *PFNGLVERTEXBINDINGDIVISORPROC)(GLuint bindingindex, GLuint divisor); +typedef void (GLAD_API_PTR *PFNGLVIEWPORTPROC)(GLint x, GLint y, GLsizei width, GLsizei height); +typedef void (GLAD_API_PTR *PFNGLVIEWPORTARRAYVPROC)(GLuint first, GLsizei count, const GLfloat * v); +typedef void (GLAD_API_PTR *PFNGLVIEWPORTINDEXEDFPROC)(GLuint index, GLfloat x, GLfloat y, GLfloat w, GLfloat h); +typedef void (GLAD_API_PTR *PFNGLVIEWPORTINDEXEDFVPROC)(GLuint index, const GLfloat * v); +typedef void (GLAD_API_PTR *PFNGLWAITSYNCPROC)(GLsync sync, GLbitfield flags, GLuint64 timeout); + +GLAD_API_CALL PFNGLACTIVESHADERPROGRAMPROC glad_glActiveShaderProgram; +#define glActiveShaderProgram glad_glActiveShaderProgram +GLAD_API_CALL PFNGLACTIVETEXTUREPROC glad_glActiveTexture; +#define glActiveTexture glad_glActiveTexture +GLAD_API_CALL PFNGLATTACHSHADERPROC glad_glAttachShader; +#define glAttachShader glad_glAttachShader +GLAD_API_CALL PFNGLBEGINCONDITIONALRENDERPROC glad_glBeginConditionalRender; +#define glBeginConditionalRender glad_glBeginConditionalRender +GLAD_API_CALL PFNGLBEGINQUERYPROC glad_glBeginQuery; +#define glBeginQuery glad_glBeginQuery +GLAD_API_CALL PFNGLBEGINQUERYINDEXEDPROC glad_glBeginQueryIndexed; +#define glBeginQueryIndexed glad_glBeginQueryIndexed +GLAD_API_CALL PFNGLBEGINTRANSFORMFEEDBACKPROC glad_glBeginTransformFeedback; +#define glBeginTransformFeedback glad_glBeginTransformFeedback +GLAD_API_CALL PFNGLBINDATTRIBLOCATIONPROC glad_glBindAttribLocation; +#define glBindAttribLocation glad_glBindAttribLocation +GLAD_API_CALL PFNGLBINDBUFFERPROC glad_glBindBuffer; +#define glBindBuffer glad_glBindBuffer +GLAD_API_CALL PFNGLBINDBUFFERBASEPROC glad_glBindBufferBase; +#define glBindBufferBase glad_glBindBufferBase +GLAD_API_CALL PFNGLBINDBUFFERRANGEPROC glad_glBindBufferRange; +#define glBindBufferRange glad_glBindBufferRange +GLAD_API_CALL PFNGLBINDFRAGDATALOCATIONPROC glad_glBindFragDataLocation; +#define glBindFragDataLocation glad_glBindFragDataLocation +GLAD_API_CALL PFNGLBINDFRAGDATALOCATIONINDEXEDPROC glad_glBindFragDataLocationIndexed; +#define glBindFragDataLocationIndexed glad_glBindFragDataLocationIndexed +GLAD_API_CALL PFNGLBINDFRAMEBUFFERPROC glad_glBindFramebuffer; +#define glBindFramebuffer glad_glBindFramebuffer +GLAD_API_CALL PFNGLBINDIMAGETEXTUREPROC glad_glBindImageTexture; +#define glBindImageTexture glad_glBindImageTexture +GLAD_API_CALL PFNGLBINDPROGRAMPIPELINEPROC glad_glBindProgramPipeline; +#define glBindProgramPipeline glad_glBindProgramPipeline +GLAD_API_CALL PFNGLBINDRENDERBUFFERPROC glad_glBindRenderbuffer; +#define glBindRenderbuffer glad_glBindRenderbuffer +GLAD_API_CALL PFNGLBINDSAMPLERPROC glad_glBindSampler; +#define glBindSampler glad_glBindSampler +GLAD_API_CALL PFNGLBINDTEXTUREPROC glad_glBindTexture; +#define glBindTexture glad_glBindTexture +GLAD_API_CALL PFNGLBINDTRANSFORMFEEDBACKPROC glad_glBindTransformFeedback; +#define glBindTransformFeedback glad_glBindTransformFeedback +GLAD_API_CALL PFNGLBINDVERTEXARRAYPROC glad_glBindVertexArray; +#define glBindVertexArray glad_glBindVertexArray +GLAD_API_CALL PFNGLBINDVERTEXBUFFERPROC glad_glBindVertexBuffer; +#define glBindVertexBuffer glad_glBindVertexBuffer +GLAD_API_CALL PFNGLBLENDCOLORPROC glad_glBlendColor; +#define glBlendColor glad_glBlendColor +GLAD_API_CALL PFNGLBLENDEQUATIONPROC glad_glBlendEquation; +#define glBlendEquation glad_glBlendEquation +GLAD_API_CALL PFNGLBLENDEQUATIONSEPARATEPROC glad_glBlendEquationSeparate; +#define glBlendEquationSeparate glad_glBlendEquationSeparate +GLAD_API_CALL PFNGLBLENDEQUATIONSEPARATEIPROC glad_glBlendEquationSeparatei; +#define glBlendEquationSeparatei glad_glBlendEquationSeparatei +GLAD_API_CALL PFNGLBLENDEQUATIONIPROC glad_glBlendEquationi; +#define glBlendEquationi glad_glBlendEquationi +GLAD_API_CALL PFNGLBLENDFUNCPROC glad_glBlendFunc; +#define glBlendFunc glad_glBlendFunc +GLAD_API_CALL PFNGLBLENDFUNCSEPARATEPROC glad_glBlendFuncSeparate; +#define glBlendFuncSeparate glad_glBlendFuncSeparate +GLAD_API_CALL PFNGLBLENDFUNCSEPARATEIPROC glad_glBlendFuncSeparatei; +#define glBlendFuncSeparatei glad_glBlendFuncSeparatei +GLAD_API_CALL PFNGLBLENDFUNCIPROC glad_glBlendFunci; +#define glBlendFunci glad_glBlendFunci +GLAD_API_CALL PFNGLBLITFRAMEBUFFERPROC glad_glBlitFramebuffer; +#define glBlitFramebuffer glad_glBlitFramebuffer +GLAD_API_CALL PFNGLBUFFERDATAPROC glad_glBufferData; +#define glBufferData glad_glBufferData +GLAD_API_CALL PFNGLBUFFERSUBDATAPROC glad_glBufferSubData; +#define glBufferSubData glad_glBufferSubData +GLAD_API_CALL PFNGLCHECKFRAMEBUFFERSTATUSPROC glad_glCheckFramebufferStatus; +#define glCheckFramebufferStatus glad_glCheckFramebufferStatus +GLAD_API_CALL PFNGLCLAMPCOLORPROC glad_glClampColor; +#define glClampColor glad_glClampColor +GLAD_API_CALL PFNGLCLEARPROC glad_glClear; +#define glClear glad_glClear +GLAD_API_CALL PFNGLCLEARBUFFERDATAPROC glad_glClearBufferData; +#define glClearBufferData glad_glClearBufferData +GLAD_API_CALL PFNGLCLEARBUFFERSUBDATAPROC glad_glClearBufferSubData; +#define glClearBufferSubData glad_glClearBufferSubData +GLAD_API_CALL PFNGLCLEARBUFFERFIPROC glad_glClearBufferfi; +#define glClearBufferfi glad_glClearBufferfi +GLAD_API_CALL PFNGLCLEARBUFFERFVPROC glad_glClearBufferfv; +#define glClearBufferfv glad_glClearBufferfv +GLAD_API_CALL PFNGLCLEARBUFFERIVPROC glad_glClearBufferiv; +#define glClearBufferiv glad_glClearBufferiv +GLAD_API_CALL PFNGLCLEARBUFFERUIVPROC glad_glClearBufferuiv; +#define glClearBufferuiv glad_glClearBufferuiv +GLAD_API_CALL PFNGLCLEARCOLORPROC glad_glClearColor; +#define glClearColor glad_glClearColor +GLAD_API_CALL PFNGLCLEARDEPTHPROC glad_glClearDepth; +#define glClearDepth glad_glClearDepth +GLAD_API_CALL PFNGLCLEARDEPTHFPROC glad_glClearDepthf; +#define glClearDepthf glad_glClearDepthf +GLAD_API_CALL PFNGLCLEARSTENCILPROC glad_glClearStencil; +#define glClearStencil glad_glClearStencil +GLAD_API_CALL PFNGLCLIENTWAITSYNCPROC glad_glClientWaitSync; +#define glClientWaitSync glad_glClientWaitSync +GLAD_API_CALL PFNGLCOLORMASKPROC glad_glColorMask; +#define glColorMask glad_glColorMask +GLAD_API_CALL PFNGLCOLORMASKIPROC glad_glColorMaski; +#define glColorMaski glad_glColorMaski +GLAD_API_CALL PFNGLCOMPILESHADERPROC glad_glCompileShader; +#define glCompileShader glad_glCompileShader +GLAD_API_CALL PFNGLCOMPRESSEDTEXIMAGE1DPROC glad_glCompressedTexImage1D; +#define glCompressedTexImage1D glad_glCompressedTexImage1D +GLAD_API_CALL PFNGLCOMPRESSEDTEXIMAGE2DPROC glad_glCompressedTexImage2D; +#define glCompressedTexImage2D glad_glCompressedTexImage2D +GLAD_API_CALL PFNGLCOMPRESSEDTEXIMAGE3DPROC glad_glCompressedTexImage3D; +#define glCompressedTexImage3D glad_glCompressedTexImage3D +GLAD_API_CALL PFNGLCOMPRESSEDTEXSUBIMAGE1DPROC glad_glCompressedTexSubImage1D; +#define glCompressedTexSubImage1D glad_glCompressedTexSubImage1D +GLAD_API_CALL PFNGLCOMPRESSEDTEXSUBIMAGE2DPROC glad_glCompressedTexSubImage2D; +#define glCompressedTexSubImage2D glad_glCompressedTexSubImage2D +GLAD_API_CALL PFNGLCOMPRESSEDTEXSUBIMAGE3DPROC glad_glCompressedTexSubImage3D; +#define glCompressedTexSubImage3D glad_glCompressedTexSubImage3D +GLAD_API_CALL PFNGLCOPYBUFFERSUBDATAPROC glad_glCopyBufferSubData; +#define glCopyBufferSubData glad_glCopyBufferSubData +GLAD_API_CALL PFNGLCOPYIMAGESUBDATAPROC glad_glCopyImageSubData; +#define glCopyImageSubData glad_glCopyImageSubData +GLAD_API_CALL PFNGLCOPYTEXIMAGE1DPROC glad_glCopyTexImage1D; +#define glCopyTexImage1D glad_glCopyTexImage1D +GLAD_API_CALL PFNGLCOPYTEXIMAGE2DPROC glad_glCopyTexImage2D; +#define glCopyTexImage2D glad_glCopyTexImage2D +GLAD_API_CALL PFNGLCOPYTEXSUBIMAGE1DPROC glad_glCopyTexSubImage1D; +#define glCopyTexSubImage1D glad_glCopyTexSubImage1D +GLAD_API_CALL PFNGLCOPYTEXSUBIMAGE2DPROC glad_glCopyTexSubImage2D; +#define glCopyTexSubImage2D glad_glCopyTexSubImage2D +GLAD_API_CALL PFNGLCOPYTEXSUBIMAGE3DPROC glad_glCopyTexSubImage3D; +#define glCopyTexSubImage3D glad_glCopyTexSubImage3D +GLAD_API_CALL PFNGLCREATEPROGRAMPROC glad_glCreateProgram; +#define glCreateProgram glad_glCreateProgram +GLAD_API_CALL PFNGLCREATESHADERPROC glad_glCreateShader; +#define glCreateShader glad_glCreateShader +GLAD_API_CALL PFNGLCREATESHADERPROGRAMVPROC glad_glCreateShaderProgramv; +#define glCreateShaderProgramv glad_glCreateShaderProgramv +GLAD_API_CALL PFNGLCULLFACEPROC glad_glCullFace; +#define glCullFace glad_glCullFace +GLAD_API_CALL PFNGLDEBUGMESSAGECALLBACKPROC glad_glDebugMessageCallback; +#define glDebugMessageCallback glad_glDebugMessageCallback +GLAD_API_CALL PFNGLDEBUGMESSAGECONTROLPROC glad_glDebugMessageControl; +#define glDebugMessageControl glad_glDebugMessageControl +GLAD_API_CALL PFNGLDEBUGMESSAGEINSERTPROC glad_glDebugMessageInsert; +#define glDebugMessageInsert glad_glDebugMessageInsert +GLAD_API_CALL PFNGLDELETEBUFFERSPROC glad_glDeleteBuffers; +#define glDeleteBuffers glad_glDeleteBuffers +GLAD_API_CALL PFNGLDELETEFRAMEBUFFERSPROC glad_glDeleteFramebuffers; +#define glDeleteFramebuffers glad_glDeleteFramebuffers +GLAD_API_CALL PFNGLDELETEPROGRAMPROC glad_glDeleteProgram; +#define glDeleteProgram glad_glDeleteProgram +GLAD_API_CALL PFNGLDELETEPROGRAMPIPELINESPROC glad_glDeleteProgramPipelines; +#define glDeleteProgramPipelines glad_glDeleteProgramPipelines +GLAD_API_CALL PFNGLDELETEQUERIESPROC glad_glDeleteQueries; +#define glDeleteQueries glad_glDeleteQueries +GLAD_API_CALL PFNGLDELETERENDERBUFFERSPROC glad_glDeleteRenderbuffers; +#define glDeleteRenderbuffers glad_glDeleteRenderbuffers +GLAD_API_CALL PFNGLDELETESAMPLERSPROC glad_glDeleteSamplers; +#define glDeleteSamplers glad_glDeleteSamplers +GLAD_API_CALL PFNGLDELETESHADERPROC glad_glDeleteShader; +#define glDeleteShader glad_glDeleteShader +GLAD_API_CALL PFNGLDELETESYNCPROC glad_glDeleteSync; +#define glDeleteSync glad_glDeleteSync +GLAD_API_CALL PFNGLDELETETEXTURESPROC glad_glDeleteTextures; +#define glDeleteTextures glad_glDeleteTextures +GLAD_API_CALL PFNGLDELETETRANSFORMFEEDBACKSPROC glad_glDeleteTransformFeedbacks; +#define glDeleteTransformFeedbacks glad_glDeleteTransformFeedbacks +GLAD_API_CALL PFNGLDELETEVERTEXARRAYSPROC glad_glDeleteVertexArrays; +#define glDeleteVertexArrays glad_glDeleteVertexArrays +GLAD_API_CALL PFNGLDEPTHFUNCPROC glad_glDepthFunc; +#define glDepthFunc glad_glDepthFunc +GLAD_API_CALL PFNGLDEPTHMASKPROC glad_glDepthMask; +#define glDepthMask glad_glDepthMask +GLAD_API_CALL PFNGLDEPTHRANGEPROC glad_glDepthRange; +#define glDepthRange glad_glDepthRange +GLAD_API_CALL PFNGLDEPTHRANGEARRAYVPROC glad_glDepthRangeArrayv; +#define glDepthRangeArrayv glad_glDepthRangeArrayv +GLAD_API_CALL PFNGLDEPTHRANGEINDEXEDPROC glad_glDepthRangeIndexed; +#define glDepthRangeIndexed glad_glDepthRangeIndexed +GLAD_API_CALL PFNGLDEPTHRANGEFPROC glad_glDepthRangef; +#define glDepthRangef glad_glDepthRangef +GLAD_API_CALL PFNGLDETACHSHADERPROC glad_glDetachShader; +#define glDetachShader glad_glDetachShader +GLAD_API_CALL PFNGLDISABLEPROC glad_glDisable; +#define glDisable glad_glDisable +GLAD_API_CALL PFNGLDISABLEVERTEXATTRIBARRAYPROC glad_glDisableVertexAttribArray; +#define glDisableVertexAttribArray glad_glDisableVertexAttribArray +GLAD_API_CALL PFNGLDISABLEIPROC glad_glDisablei; +#define glDisablei glad_glDisablei +GLAD_API_CALL PFNGLDISPATCHCOMPUTEPROC glad_glDispatchCompute; +#define glDispatchCompute glad_glDispatchCompute +GLAD_API_CALL PFNGLDISPATCHCOMPUTEINDIRECTPROC glad_glDispatchComputeIndirect; +#define glDispatchComputeIndirect glad_glDispatchComputeIndirect +GLAD_API_CALL PFNGLDRAWARRAYSPROC glad_glDrawArrays; +#define glDrawArrays glad_glDrawArrays +GLAD_API_CALL PFNGLDRAWARRAYSINDIRECTPROC glad_glDrawArraysIndirect; +#define glDrawArraysIndirect glad_glDrawArraysIndirect +GLAD_API_CALL PFNGLDRAWARRAYSINSTANCEDPROC glad_glDrawArraysInstanced; +#define glDrawArraysInstanced glad_glDrawArraysInstanced +GLAD_API_CALL PFNGLDRAWARRAYSINSTANCEDBASEINSTANCEPROC glad_glDrawArraysInstancedBaseInstance; +#define glDrawArraysInstancedBaseInstance glad_glDrawArraysInstancedBaseInstance +GLAD_API_CALL PFNGLDRAWBUFFERPROC glad_glDrawBuffer; +#define glDrawBuffer glad_glDrawBuffer +GLAD_API_CALL PFNGLDRAWBUFFERSPROC glad_glDrawBuffers; +#define glDrawBuffers glad_glDrawBuffers +GLAD_API_CALL PFNGLDRAWELEMENTSPROC glad_glDrawElements; +#define glDrawElements glad_glDrawElements +GLAD_API_CALL PFNGLDRAWELEMENTSBASEVERTEXPROC glad_glDrawElementsBaseVertex; +#define glDrawElementsBaseVertex glad_glDrawElementsBaseVertex +GLAD_API_CALL PFNGLDRAWELEMENTSINDIRECTPROC glad_glDrawElementsIndirect; +#define glDrawElementsIndirect glad_glDrawElementsIndirect +GLAD_API_CALL PFNGLDRAWELEMENTSINSTANCEDPROC glad_glDrawElementsInstanced; +#define glDrawElementsInstanced glad_glDrawElementsInstanced +GLAD_API_CALL PFNGLDRAWELEMENTSINSTANCEDBASEINSTANCEPROC glad_glDrawElementsInstancedBaseInstance; +#define glDrawElementsInstancedBaseInstance glad_glDrawElementsInstancedBaseInstance +GLAD_API_CALL PFNGLDRAWELEMENTSINSTANCEDBASEVERTEXPROC glad_glDrawElementsInstancedBaseVertex; +#define glDrawElementsInstancedBaseVertex glad_glDrawElementsInstancedBaseVertex +GLAD_API_CALL PFNGLDRAWELEMENTSINSTANCEDBASEVERTEXBASEINSTANCEPROC glad_glDrawElementsInstancedBaseVertexBaseInstance; +#define glDrawElementsInstancedBaseVertexBaseInstance glad_glDrawElementsInstancedBaseVertexBaseInstance +GLAD_API_CALL PFNGLDRAWRANGEELEMENTSPROC glad_glDrawRangeElements; +#define glDrawRangeElements glad_glDrawRangeElements +GLAD_API_CALL PFNGLDRAWRANGEELEMENTSBASEVERTEXPROC glad_glDrawRangeElementsBaseVertex; +#define glDrawRangeElementsBaseVertex glad_glDrawRangeElementsBaseVertex +GLAD_API_CALL PFNGLDRAWTRANSFORMFEEDBACKPROC glad_glDrawTransformFeedback; +#define glDrawTransformFeedback glad_glDrawTransformFeedback +GLAD_API_CALL PFNGLDRAWTRANSFORMFEEDBACKINSTANCEDPROC glad_glDrawTransformFeedbackInstanced; +#define glDrawTransformFeedbackInstanced glad_glDrawTransformFeedbackInstanced +GLAD_API_CALL PFNGLDRAWTRANSFORMFEEDBACKSTREAMPROC glad_glDrawTransformFeedbackStream; +#define glDrawTransformFeedbackStream glad_glDrawTransformFeedbackStream +GLAD_API_CALL PFNGLDRAWTRANSFORMFEEDBACKSTREAMINSTANCEDPROC glad_glDrawTransformFeedbackStreamInstanced; +#define glDrawTransformFeedbackStreamInstanced glad_glDrawTransformFeedbackStreamInstanced +GLAD_API_CALL PFNGLENABLEPROC glad_glEnable; +#define glEnable glad_glEnable +GLAD_API_CALL PFNGLENABLEVERTEXATTRIBARRAYPROC glad_glEnableVertexAttribArray; +#define glEnableVertexAttribArray glad_glEnableVertexAttribArray +GLAD_API_CALL PFNGLENABLEIPROC glad_glEnablei; +#define glEnablei glad_glEnablei +GLAD_API_CALL PFNGLENDCONDITIONALRENDERPROC glad_glEndConditionalRender; +#define glEndConditionalRender glad_glEndConditionalRender +GLAD_API_CALL PFNGLENDQUERYPROC glad_glEndQuery; +#define glEndQuery glad_glEndQuery +GLAD_API_CALL PFNGLENDQUERYINDEXEDPROC glad_glEndQueryIndexed; +#define glEndQueryIndexed glad_glEndQueryIndexed +GLAD_API_CALL PFNGLENDTRANSFORMFEEDBACKPROC glad_glEndTransformFeedback; +#define glEndTransformFeedback glad_glEndTransformFeedback +GLAD_API_CALL PFNGLFENCESYNCPROC glad_glFenceSync; +#define glFenceSync glad_glFenceSync +GLAD_API_CALL PFNGLFINISHPROC glad_glFinish; +#define glFinish glad_glFinish +GLAD_API_CALL PFNGLFLUSHPROC glad_glFlush; +#define glFlush glad_glFlush +GLAD_API_CALL PFNGLFLUSHMAPPEDBUFFERRANGEPROC glad_glFlushMappedBufferRange; +#define glFlushMappedBufferRange glad_glFlushMappedBufferRange +GLAD_API_CALL PFNGLFRAMEBUFFERPARAMETERIPROC glad_glFramebufferParameteri; +#define glFramebufferParameteri glad_glFramebufferParameteri +GLAD_API_CALL PFNGLFRAMEBUFFERRENDERBUFFERPROC glad_glFramebufferRenderbuffer; +#define glFramebufferRenderbuffer glad_glFramebufferRenderbuffer +GLAD_API_CALL PFNGLFRAMEBUFFERTEXTUREPROC glad_glFramebufferTexture; +#define glFramebufferTexture glad_glFramebufferTexture +GLAD_API_CALL PFNGLFRAMEBUFFERTEXTURE1DPROC glad_glFramebufferTexture1D; +#define glFramebufferTexture1D glad_glFramebufferTexture1D +GLAD_API_CALL PFNGLFRAMEBUFFERTEXTURE2DPROC glad_glFramebufferTexture2D; +#define glFramebufferTexture2D glad_glFramebufferTexture2D +GLAD_API_CALL PFNGLFRAMEBUFFERTEXTURE3DPROC glad_glFramebufferTexture3D; +#define glFramebufferTexture3D glad_glFramebufferTexture3D +GLAD_API_CALL PFNGLFRAMEBUFFERTEXTURELAYERPROC glad_glFramebufferTextureLayer; +#define glFramebufferTextureLayer glad_glFramebufferTextureLayer +GLAD_API_CALL PFNGLFRONTFACEPROC glad_glFrontFace; +#define glFrontFace glad_glFrontFace +GLAD_API_CALL PFNGLGENBUFFERSPROC glad_glGenBuffers; +#define glGenBuffers glad_glGenBuffers +GLAD_API_CALL PFNGLGENFRAMEBUFFERSPROC glad_glGenFramebuffers; +#define glGenFramebuffers glad_glGenFramebuffers +GLAD_API_CALL PFNGLGENPROGRAMPIPELINESPROC glad_glGenProgramPipelines; +#define glGenProgramPipelines glad_glGenProgramPipelines +GLAD_API_CALL PFNGLGENQUERIESPROC glad_glGenQueries; +#define glGenQueries glad_glGenQueries +GLAD_API_CALL PFNGLGENRENDERBUFFERSPROC glad_glGenRenderbuffers; +#define glGenRenderbuffers glad_glGenRenderbuffers +GLAD_API_CALL PFNGLGENSAMPLERSPROC glad_glGenSamplers; +#define glGenSamplers glad_glGenSamplers +GLAD_API_CALL PFNGLGENTEXTURESPROC glad_glGenTextures; +#define glGenTextures glad_glGenTextures +GLAD_API_CALL PFNGLGENTRANSFORMFEEDBACKSPROC glad_glGenTransformFeedbacks; +#define glGenTransformFeedbacks glad_glGenTransformFeedbacks +GLAD_API_CALL PFNGLGENVERTEXARRAYSPROC glad_glGenVertexArrays; +#define glGenVertexArrays glad_glGenVertexArrays +GLAD_API_CALL PFNGLGENERATEMIPMAPPROC glad_glGenerateMipmap; +#define glGenerateMipmap glad_glGenerateMipmap +GLAD_API_CALL PFNGLGETACTIVEATOMICCOUNTERBUFFERIVPROC glad_glGetActiveAtomicCounterBufferiv; +#define glGetActiveAtomicCounterBufferiv glad_glGetActiveAtomicCounterBufferiv +GLAD_API_CALL PFNGLGETACTIVEATTRIBPROC glad_glGetActiveAttrib; +#define glGetActiveAttrib glad_glGetActiveAttrib +GLAD_API_CALL PFNGLGETACTIVESUBROUTINENAMEPROC glad_glGetActiveSubroutineName; +#define glGetActiveSubroutineName glad_glGetActiveSubroutineName +GLAD_API_CALL PFNGLGETACTIVESUBROUTINEUNIFORMNAMEPROC glad_glGetActiveSubroutineUniformName; +#define glGetActiveSubroutineUniformName glad_glGetActiveSubroutineUniformName +GLAD_API_CALL PFNGLGETACTIVESUBROUTINEUNIFORMIVPROC glad_glGetActiveSubroutineUniformiv; +#define glGetActiveSubroutineUniformiv glad_glGetActiveSubroutineUniformiv +GLAD_API_CALL PFNGLGETACTIVEUNIFORMPROC glad_glGetActiveUniform; +#define glGetActiveUniform glad_glGetActiveUniform +GLAD_API_CALL PFNGLGETACTIVEUNIFORMBLOCKNAMEPROC glad_glGetActiveUniformBlockName; +#define glGetActiveUniformBlockName glad_glGetActiveUniformBlockName +GLAD_API_CALL PFNGLGETACTIVEUNIFORMBLOCKIVPROC glad_glGetActiveUniformBlockiv; +#define glGetActiveUniformBlockiv glad_glGetActiveUniformBlockiv +GLAD_API_CALL PFNGLGETACTIVEUNIFORMNAMEPROC glad_glGetActiveUniformName; +#define glGetActiveUniformName glad_glGetActiveUniformName +GLAD_API_CALL PFNGLGETACTIVEUNIFORMSIVPROC glad_glGetActiveUniformsiv; +#define glGetActiveUniformsiv glad_glGetActiveUniformsiv +GLAD_API_CALL PFNGLGETATTACHEDSHADERSPROC glad_glGetAttachedShaders; +#define glGetAttachedShaders glad_glGetAttachedShaders +GLAD_API_CALL PFNGLGETATTRIBLOCATIONPROC glad_glGetAttribLocation; +#define glGetAttribLocation glad_glGetAttribLocation +GLAD_API_CALL PFNGLGETBOOLEANI_VPROC glad_glGetBooleani_v; +#define glGetBooleani_v glad_glGetBooleani_v +GLAD_API_CALL PFNGLGETBOOLEANVPROC glad_glGetBooleanv; +#define glGetBooleanv glad_glGetBooleanv +GLAD_API_CALL PFNGLGETBUFFERPARAMETERI64VPROC glad_glGetBufferParameteri64v; +#define glGetBufferParameteri64v glad_glGetBufferParameteri64v +GLAD_API_CALL PFNGLGETBUFFERPARAMETERIVPROC glad_glGetBufferParameteriv; +#define glGetBufferParameteriv glad_glGetBufferParameteriv +GLAD_API_CALL PFNGLGETBUFFERPOINTERVPROC glad_glGetBufferPointerv; +#define glGetBufferPointerv glad_glGetBufferPointerv +GLAD_API_CALL PFNGLGETBUFFERSUBDATAPROC glad_glGetBufferSubData; +#define glGetBufferSubData glad_glGetBufferSubData +GLAD_API_CALL PFNGLGETCOMPRESSEDTEXIMAGEPROC glad_glGetCompressedTexImage; +#define glGetCompressedTexImage glad_glGetCompressedTexImage +GLAD_API_CALL PFNGLGETDEBUGMESSAGELOGPROC glad_glGetDebugMessageLog; +#define glGetDebugMessageLog glad_glGetDebugMessageLog +GLAD_API_CALL PFNGLGETDOUBLEI_VPROC glad_glGetDoublei_v; +#define glGetDoublei_v glad_glGetDoublei_v +GLAD_API_CALL PFNGLGETDOUBLEVPROC glad_glGetDoublev; +#define glGetDoublev glad_glGetDoublev +GLAD_API_CALL PFNGLGETERRORPROC glad_glGetError; +#define glGetError glad_glGetError +GLAD_API_CALL PFNGLGETFLOATI_VPROC glad_glGetFloati_v; +#define glGetFloati_v glad_glGetFloati_v +GLAD_API_CALL PFNGLGETFLOATVPROC glad_glGetFloatv; +#define glGetFloatv glad_glGetFloatv +GLAD_API_CALL PFNGLGETFRAGDATAINDEXPROC glad_glGetFragDataIndex; +#define glGetFragDataIndex glad_glGetFragDataIndex +GLAD_API_CALL PFNGLGETFRAGDATALOCATIONPROC glad_glGetFragDataLocation; +#define glGetFragDataLocation glad_glGetFragDataLocation +GLAD_API_CALL PFNGLGETFRAMEBUFFERATTACHMENTPARAMETERIVPROC glad_glGetFramebufferAttachmentParameteriv; +#define glGetFramebufferAttachmentParameteriv glad_glGetFramebufferAttachmentParameteriv +GLAD_API_CALL PFNGLGETFRAMEBUFFERPARAMETERIVPROC glad_glGetFramebufferParameteriv; +#define glGetFramebufferParameteriv glad_glGetFramebufferParameteriv +GLAD_API_CALL PFNGLGETINTEGER64I_VPROC glad_glGetInteger64i_v; +#define glGetInteger64i_v glad_glGetInteger64i_v +GLAD_API_CALL PFNGLGETINTEGER64VPROC glad_glGetInteger64v; +#define glGetInteger64v glad_glGetInteger64v +GLAD_API_CALL PFNGLGETINTEGERI_VPROC glad_glGetIntegeri_v; +#define glGetIntegeri_v glad_glGetIntegeri_v +GLAD_API_CALL PFNGLGETINTEGERVPROC glad_glGetIntegerv; +#define glGetIntegerv glad_glGetIntegerv +GLAD_API_CALL PFNGLGETINTERNALFORMATI64VPROC glad_glGetInternalformati64v; +#define glGetInternalformati64v glad_glGetInternalformati64v +GLAD_API_CALL PFNGLGETINTERNALFORMATIVPROC glad_glGetInternalformativ; +#define glGetInternalformativ glad_glGetInternalformativ +GLAD_API_CALL PFNGLGETMULTISAMPLEFVPROC glad_glGetMultisamplefv; +#define glGetMultisamplefv glad_glGetMultisamplefv +GLAD_API_CALL PFNGLGETOBJECTLABELPROC glad_glGetObjectLabel; +#define glGetObjectLabel glad_glGetObjectLabel +GLAD_API_CALL PFNGLGETOBJECTPTRLABELPROC glad_glGetObjectPtrLabel; +#define glGetObjectPtrLabel glad_glGetObjectPtrLabel +GLAD_API_CALL PFNGLGETPOINTERVPROC glad_glGetPointerv; +#define glGetPointerv glad_glGetPointerv +GLAD_API_CALL PFNGLGETPROGRAMBINARYPROC glad_glGetProgramBinary; +#define glGetProgramBinary glad_glGetProgramBinary +GLAD_API_CALL PFNGLGETPROGRAMINFOLOGPROC glad_glGetProgramInfoLog; +#define glGetProgramInfoLog glad_glGetProgramInfoLog +GLAD_API_CALL PFNGLGETPROGRAMINTERFACEIVPROC glad_glGetProgramInterfaceiv; +#define glGetProgramInterfaceiv glad_glGetProgramInterfaceiv +GLAD_API_CALL PFNGLGETPROGRAMPIPELINEINFOLOGPROC glad_glGetProgramPipelineInfoLog; +#define glGetProgramPipelineInfoLog glad_glGetProgramPipelineInfoLog +GLAD_API_CALL PFNGLGETPROGRAMPIPELINEIVPROC glad_glGetProgramPipelineiv; +#define glGetProgramPipelineiv glad_glGetProgramPipelineiv +GLAD_API_CALL PFNGLGETPROGRAMRESOURCEINDEXPROC glad_glGetProgramResourceIndex; +#define glGetProgramResourceIndex glad_glGetProgramResourceIndex +GLAD_API_CALL PFNGLGETPROGRAMRESOURCELOCATIONPROC glad_glGetProgramResourceLocation; +#define glGetProgramResourceLocation glad_glGetProgramResourceLocation +GLAD_API_CALL PFNGLGETPROGRAMRESOURCELOCATIONINDEXPROC glad_glGetProgramResourceLocationIndex; +#define glGetProgramResourceLocationIndex glad_glGetProgramResourceLocationIndex +GLAD_API_CALL PFNGLGETPROGRAMRESOURCENAMEPROC glad_glGetProgramResourceName; +#define glGetProgramResourceName glad_glGetProgramResourceName +GLAD_API_CALL PFNGLGETPROGRAMRESOURCEIVPROC glad_glGetProgramResourceiv; +#define glGetProgramResourceiv glad_glGetProgramResourceiv +GLAD_API_CALL PFNGLGETPROGRAMSTAGEIVPROC glad_glGetProgramStageiv; +#define glGetProgramStageiv glad_glGetProgramStageiv +GLAD_API_CALL PFNGLGETPROGRAMIVPROC glad_glGetProgramiv; +#define glGetProgramiv glad_glGetProgramiv +GLAD_API_CALL PFNGLGETQUERYINDEXEDIVPROC glad_glGetQueryIndexediv; +#define glGetQueryIndexediv glad_glGetQueryIndexediv +GLAD_API_CALL PFNGLGETQUERYOBJECTI64VPROC glad_glGetQueryObjecti64v; +#define glGetQueryObjecti64v glad_glGetQueryObjecti64v +GLAD_API_CALL PFNGLGETQUERYOBJECTIVPROC glad_glGetQueryObjectiv; +#define glGetQueryObjectiv glad_glGetQueryObjectiv +GLAD_API_CALL PFNGLGETQUERYOBJECTUI64VPROC glad_glGetQueryObjectui64v; +#define glGetQueryObjectui64v glad_glGetQueryObjectui64v +GLAD_API_CALL PFNGLGETQUERYOBJECTUIVPROC glad_glGetQueryObjectuiv; +#define glGetQueryObjectuiv glad_glGetQueryObjectuiv +GLAD_API_CALL PFNGLGETQUERYIVPROC glad_glGetQueryiv; +#define glGetQueryiv glad_glGetQueryiv +GLAD_API_CALL PFNGLGETRENDERBUFFERPARAMETERIVPROC glad_glGetRenderbufferParameteriv; +#define glGetRenderbufferParameteriv glad_glGetRenderbufferParameteriv +GLAD_API_CALL PFNGLGETSAMPLERPARAMETERIIVPROC glad_glGetSamplerParameterIiv; +#define glGetSamplerParameterIiv glad_glGetSamplerParameterIiv +GLAD_API_CALL PFNGLGETSAMPLERPARAMETERIUIVPROC glad_glGetSamplerParameterIuiv; +#define glGetSamplerParameterIuiv glad_glGetSamplerParameterIuiv +GLAD_API_CALL PFNGLGETSAMPLERPARAMETERFVPROC glad_glGetSamplerParameterfv; +#define glGetSamplerParameterfv glad_glGetSamplerParameterfv +GLAD_API_CALL PFNGLGETSAMPLERPARAMETERIVPROC glad_glGetSamplerParameteriv; +#define glGetSamplerParameteriv glad_glGetSamplerParameteriv +GLAD_API_CALL PFNGLGETSHADERINFOLOGPROC glad_glGetShaderInfoLog; +#define glGetShaderInfoLog glad_glGetShaderInfoLog +GLAD_API_CALL PFNGLGETSHADERPRECISIONFORMATPROC glad_glGetShaderPrecisionFormat; +#define glGetShaderPrecisionFormat glad_glGetShaderPrecisionFormat +GLAD_API_CALL PFNGLGETSHADERSOURCEPROC glad_glGetShaderSource; +#define glGetShaderSource glad_glGetShaderSource +GLAD_API_CALL PFNGLGETSHADERIVPROC glad_glGetShaderiv; +#define glGetShaderiv glad_glGetShaderiv +GLAD_API_CALL PFNGLGETSTRINGPROC glad_glGetString; +#define glGetString glad_glGetString +GLAD_API_CALL PFNGLGETSTRINGIPROC glad_glGetStringi; +#define glGetStringi glad_glGetStringi +GLAD_API_CALL PFNGLGETSUBROUTINEINDEXPROC glad_glGetSubroutineIndex; +#define glGetSubroutineIndex glad_glGetSubroutineIndex +GLAD_API_CALL PFNGLGETSUBROUTINEUNIFORMLOCATIONPROC glad_glGetSubroutineUniformLocation; +#define glGetSubroutineUniformLocation glad_glGetSubroutineUniformLocation +GLAD_API_CALL PFNGLGETSYNCIVPROC glad_glGetSynciv; +#define glGetSynciv glad_glGetSynciv +GLAD_API_CALL PFNGLGETTEXIMAGEPROC glad_glGetTexImage; +#define glGetTexImage glad_glGetTexImage +GLAD_API_CALL PFNGLGETTEXLEVELPARAMETERFVPROC glad_glGetTexLevelParameterfv; +#define glGetTexLevelParameterfv glad_glGetTexLevelParameterfv +GLAD_API_CALL PFNGLGETTEXLEVELPARAMETERIVPROC glad_glGetTexLevelParameteriv; +#define glGetTexLevelParameteriv glad_glGetTexLevelParameteriv +GLAD_API_CALL PFNGLGETTEXPARAMETERIIVPROC glad_glGetTexParameterIiv; +#define glGetTexParameterIiv glad_glGetTexParameterIiv +GLAD_API_CALL PFNGLGETTEXPARAMETERIUIVPROC glad_glGetTexParameterIuiv; +#define glGetTexParameterIuiv glad_glGetTexParameterIuiv +GLAD_API_CALL PFNGLGETTEXPARAMETERFVPROC glad_glGetTexParameterfv; +#define glGetTexParameterfv glad_glGetTexParameterfv +GLAD_API_CALL PFNGLGETTEXPARAMETERIVPROC glad_glGetTexParameteriv; +#define glGetTexParameteriv glad_glGetTexParameteriv +GLAD_API_CALL PFNGLGETTRANSFORMFEEDBACKVARYINGPROC glad_glGetTransformFeedbackVarying; +#define glGetTransformFeedbackVarying glad_glGetTransformFeedbackVarying +GLAD_API_CALL PFNGLGETUNIFORMBLOCKINDEXPROC glad_glGetUniformBlockIndex; +#define glGetUniformBlockIndex glad_glGetUniformBlockIndex +GLAD_API_CALL PFNGLGETUNIFORMINDICESPROC glad_glGetUniformIndices; +#define glGetUniformIndices glad_glGetUniformIndices +GLAD_API_CALL PFNGLGETUNIFORMLOCATIONPROC glad_glGetUniformLocation; +#define glGetUniformLocation glad_glGetUniformLocation +GLAD_API_CALL PFNGLGETUNIFORMSUBROUTINEUIVPROC glad_glGetUniformSubroutineuiv; +#define glGetUniformSubroutineuiv glad_glGetUniformSubroutineuiv +GLAD_API_CALL PFNGLGETUNIFORMDVPROC glad_glGetUniformdv; +#define glGetUniformdv glad_glGetUniformdv +GLAD_API_CALL PFNGLGETUNIFORMFVPROC glad_glGetUniformfv; +#define glGetUniformfv glad_glGetUniformfv +GLAD_API_CALL PFNGLGETUNIFORMIVPROC glad_glGetUniformiv; +#define glGetUniformiv glad_glGetUniformiv +GLAD_API_CALL PFNGLGETUNIFORMUIVPROC glad_glGetUniformuiv; +#define glGetUniformuiv glad_glGetUniformuiv +GLAD_API_CALL PFNGLGETVERTEXATTRIBIIVPROC glad_glGetVertexAttribIiv; +#define glGetVertexAttribIiv glad_glGetVertexAttribIiv +GLAD_API_CALL PFNGLGETVERTEXATTRIBIUIVPROC glad_glGetVertexAttribIuiv; +#define glGetVertexAttribIuiv glad_glGetVertexAttribIuiv +GLAD_API_CALL PFNGLGETVERTEXATTRIBLDVPROC glad_glGetVertexAttribLdv; +#define glGetVertexAttribLdv glad_glGetVertexAttribLdv +GLAD_API_CALL PFNGLGETVERTEXATTRIBPOINTERVPROC glad_glGetVertexAttribPointerv; +#define glGetVertexAttribPointerv glad_glGetVertexAttribPointerv +GLAD_API_CALL PFNGLGETVERTEXATTRIBDVPROC glad_glGetVertexAttribdv; +#define glGetVertexAttribdv glad_glGetVertexAttribdv +GLAD_API_CALL PFNGLGETVERTEXATTRIBFVPROC glad_glGetVertexAttribfv; +#define glGetVertexAttribfv glad_glGetVertexAttribfv +GLAD_API_CALL PFNGLGETVERTEXATTRIBIVPROC glad_glGetVertexAttribiv; +#define glGetVertexAttribiv glad_glGetVertexAttribiv +GLAD_API_CALL PFNGLHINTPROC glad_glHint; +#define glHint glad_glHint +GLAD_API_CALL PFNGLINVALIDATEBUFFERDATAPROC glad_glInvalidateBufferData; +#define glInvalidateBufferData glad_glInvalidateBufferData +GLAD_API_CALL PFNGLINVALIDATEBUFFERSUBDATAPROC glad_glInvalidateBufferSubData; +#define glInvalidateBufferSubData glad_glInvalidateBufferSubData +GLAD_API_CALL PFNGLINVALIDATEFRAMEBUFFERPROC glad_glInvalidateFramebuffer; +#define glInvalidateFramebuffer glad_glInvalidateFramebuffer +GLAD_API_CALL PFNGLINVALIDATESUBFRAMEBUFFERPROC glad_glInvalidateSubFramebuffer; +#define glInvalidateSubFramebuffer glad_glInvalidateSubFramebuffer +GLAD_API_CALL PFNGLINVALIDATETEXIMAGEPROC glad_glInvalidateTexImage; +#define glInvalidateTexImage glad_glInvalidateTexImage +GLAD_API_CALL PFNGLINVALIDATETEXSUBIMAGEPROC glad_glInvalidateTexSubImage; +#define glInvalidateTexSubImage glad_glInvalidateTexSubImage +GLAD_API_CALL PFNGLISBUFFERPROC glad_glIsBuffer; +#define glIsBuffer glad_glIsBuffer +GLAD_API_CALL PFNGLISENABLEDPROC glad_glIsEnabled; +#define glIsEnabled glad_glIsEnabled +GLAD_API_CALL PFNGLISENABLEDIPROC glad_glIsEnabledi; +#define glIsEnabledi glad_glIsEnabledi +GLAD_API_CALL PFNGLISFRAMEBUFFERPROC glad_glIsFramebuffer; +#define glIsFramebuffer glad_glIsFramebuffer +GLAD_API_CALL PFNGLISPROGRAMPROC glad_glIsProgram; +#define glIsProgram glad_glIsProgram +GLAD_API_CALL PFNGLISPROGRAMPIPELINEPROC glad_glIsProgramPipeline; +#define glIsProgramPipeline glad_glIsProgramPipeline +GLAD_API_CALL PFNGLISQUERYPROC glad_glIsQuery; +#define glIsQuery glad_glIsQuery +GLAD_API_CALL PFNGLISRENDERBUFFERPROC glad_glIsRenderbuffer; +#define glIsRenderbuffer glad_glIsRenderbuffer +GLAD_API_CALL PFNGLISSAMPLERPROC glad_glIsSampler; +#define glIsSampler glad_glIsSampler +GLAD_API_CALL PFNGLISSHADERPROC glad_glIsShader; +#define glIsShader glad_glIsShader +GLAD_API_CALL PFNGLISSYNCPROC glad_glIsSync; +#define glIsSync glad_glIsSync +GLAD_API_CALL PFNGLISTEXTUREPROC glad_glIsTexture; +#define glIsTexture glad_glIsTexture +GLAD_API_CALL PFNGLISTRANSFORMFEEDBACKPROC glad_glIsTransformFeedback; +#define glIsTransformFeedback glad_glIsTransformFeedback +GLAD_API_CALL PFNGLISVERTEXARRAYPROC glad_glIsVertexArray; +#define glIsVertexArray glad_glIsVertexArray +GLAD_API_CALL PFNGLLINEWIDTHPROC glad_glLineWidth; +#define glLineWidth glad_glLineWidth +GLAD_API_CALL PFNGLLINKPROGRAMPROC glad_glLinkProgram; +#define glLinkProgram glad_glLinkProgram +GLAD_API_CALL PFNGLLOGICOPPROC glad_glLogicOp; +#define glLogicOp glad_glLogicOp +GLAD_API_CALL PFNGLMAPBUFFERPROC glad_glMapBuffer; +#define glMapBuffer glad_glMapBuffer +GLAD_API_CALL PFNGLMAPBUFFERRANGEPROC glad_glMapBufferRange; +#define glMapBufferRange glad_glMapBufferRange +GLAD_API_CALL PFNGLMEMORYBARRIERPROC glad_glMemoryBarrier; +#define glMemoryBarrier glad_glMemoryBarrier +GLAD_API_CALL PFNGLMINSAMPLESHADINGPROC glad_glMinSampleShading; +#define glMinSampleShading glad_glMinSampleShading +GLAD_API_CALL PFNGLMULTIDRAWARRAYSPROC glad_glMultiDrawArrays; +#define glMultiDrawArrays glad_glMultiDrawArrays +GLAD_API_CALL PFNGLMULTIDRAWARRAYSINDIRECTPROC glad_glMultiDrawArraysIndirect; +#define glMultiDrawArraysIndirect glad_glMultiDrawArraysIndirect +GLAD_API_CALL PFNGLMULTIDRAWELEMENTSPROC glad_glMultiDrawElements; +#define glMultiDrawElements glad_glMultiDrawElements +GLAD_API_CALL PFNGLMULTIDRAWELEMENTSBASEVERTEXPROC glad_glMultiDrawElementsBaseVertex; +#define glMultiDrawElementsBaseVertex glad_glMultiDrawElementsBaseVertex +GLAD_API_CALL PFNGLMULTIDRAWELEMENTSINDIRECTPROC glad_glMultiDrawElementsIndirect; +#define glMultiDrawElementsIndirect glad_glMultiDrawElementsIndirect +GLAD_API_CALL PFNGLOBJECTLABELPROC glad_glObjectLabel; +#define glObjectLabel glad_glObjectLabel +GLAD_API_CALL PFNGLOBJECTPTRLABELPROC glad_glObjectPtrLabel; +#define glObjectPtrLabel glad_glObjectPtrLabel +GLAD_API_CALL PFNGLPATCHPARAMETERFVPROC glad_glPatchParameterfv; +#define glPatchParameterfv glad_glPatchParameterfv +GLAD_API_CALL PFNGLPATCHPARAMETERIPROC glad_glPatchParameteri; +#define glPatchParameteri glad_glPatchParameteri +GLAD_API_CALL PFNGLPAUSETRANSFORMFEEDBACKPROC glad_glPauseTransformFeedback; +#define glPauseTransformFeedback glad_glPauseTransformFeedback +GLAD_API_CALL PFNGLPIXELSTOREFPROC glad_glPixelStoref; +#define glPixelStoref glad_glPixelStoref +GLAD_API_CALL PFNGLPIXELSTOREIPROC glad_glPixelStorei; +#define glPixelStorei glad_glPixelStorei +GLAD_API_CALL PFNGLPOINTPARAMETERFPROC glad_glPointParameterf; +#define glPointParameterf glad_glPointParameterf +GLAD_API_CALL PFNGLPOINTPARAMETERFVPROC glad_glPointParameterfv; +#define glPointParameterfv glad_glPointParameterfv +GLAD_API_CALL PFNGLPOINTPARAMETERIPROC glad_glPointParameteri; +#define glPointParameteri glad_glPointParameteri +GLAD_API_CALL PFNGLPOINTPARAMETERIVPROC glad_glPointParameteriv; +#define glPointParameteriv glad_glPointParameteriv +GLAD_API_CALL PFNGLPOINTSIZEPROC glad_glPointSize; +#define glPointSize glad_glPointSize +GLAD_API_CALL PFNGLPOLYGONMODEPROC glad_glPolygonMode; +#define glPolygonMode glad_glPolygonMode +GLAD_API_CALL PFNGLPOLYGONOFFSETPROC glad_glPolygonOffset; +#define glPolygonOffset glad_glPolygonOffset +GLAD_API_CALL PFNGLPOPDEBUGGROUPPROC glad_glPopDebugGroup; +#define glPopDebugGroup glad_glPopDebugGroup +GLAD_API_CALL PFNGLPRIMITIVERESTARTINDEXPROC glad_glPrimitiveRestartIndex; +#define glPrimitiveRestartIndex glad_glPrimitiveRestartIndex +GLAD_API_CALL PFNGLPROGRAMBINARYPROC glad_glProgramBinary; +#define glProgramBinary glad_glProgramBinary +GLAD_API_CALL PFNGLPROGRAMPARAMETERIPROC glad_glProgramParameteri; +#define glProgramParameteri glad_glProgramParameteri +GLAD_API_CALL PFNGLPROGRAMUNIFORM1DPROC glad_glProgramUniform1d; +#define glProgramUniform1d glad_glProgramUniform1d +GLAD_API_CALL PFNGLPROGRAMUNIFORM1DVPROC glad_glProgramUniform1dv; +#define glProgramUniform1dv glad_glProgramUniform1dv +GLAD_API_CALL PFNGLPROGRAMUNIFORM1FPROC glad_glProgramUniform1f; +#define glProgramUniform1f glad_glProgramUniform1f +GLAD_API_CALL PFNGLPROGRAMUNIFORM1FVPROC glad_glProgramUniform1fv; +#define glProgramUniform1fv glad_glProgramUniform1fv +GLAD_API_CALL PFNGLPROGRAMUNIFORM1IPROC glad_glProgramUniform1i; +#define glProgramUniform1i glad_glProgramUniform1i +GLAD_API_CALL PFNGLPROGRAMUNIFORM1IVPROC glad_glProgramUniform1iv; +#define glProgramUniform1iv glad_glProgramUniform1iv +GLAD_API_CALL PFNGLPROGRAMUNIFORM1UIPROC glad_glProgramUniform1ui; +#define glProgramUniform1ui glad_glProgramUniform1ui +GLAD_API_CALL PFNGLPROGRAMUNIFORM1UIVPROC glad_glProgramUniform1uiv; +#define glProgramUniform1uiv glad_glProgramUniform1uiv +GLAD_API_CALL PFNGLPROGRAMUNIFORM2DPROC glad_glProgramUniform2d; +#define glProgramUniform2d glad_glProgramUniform2d +GLAD_API_CALL PFNGLPROGRAMUNIFORM2DVPROC glad_glProgramUniform2dv; +#define glProgramUniform2dv glad_glProgramUniform2dv +GLAD_API_CALL PFNGLPROGRAMUNIFORM2FPROC glad_glProgramUniform2f; +#define glProgramUniform2f glad_glProgramUniform2f +GLAD_API_CALL PFNGLPROGRAMUNIFORM2FVPROC glad_glProgramUniform2fv; +#define glProgramUniform2fv glad_glProgramUniform2fv +GLAD_API_CALL PFNGLPROGRAMUNIFORM2IPROC glad_glProgramUniform2i; +#define glProgramUniform2i glad_glProgramUniform2i +GLAD_API_CALL PFNGLPROGRAMUNIFORM2IVPROC glad_glProgramUniform2iv; +#define glProgramUniform2iv glad_glProgramUniform2iv +GLAD_API_CALL PFNGLPROGRAMUNIFORM2UIPROC glad_glProgramUniform2ui; +#define glProgramUniform2ui glad_glProgramUniform2ui +GLAD_API_CALL PFNGLPROGRAMUNIFORM2UIVPROC glad_glProgramUniform2uiv; +#define glProgramUniform2uiv glad_glProgramUniform2uiv +GLAD_API_CALL PFNGLPROGRAMUNIFORM3DPROC glad_glProgramUniform3d; +#define glProgramUniform3d glad_glProgramUniform3d +GLAD_API_CALL PFNGLPROGRAMUNIFORM3DVPROC glad_glProgramUniform3dv; +#define glProgramUniform3dv glad_glProgramUniform3dv +GLAD_API_CALL PFNGLPROGRAMUNIFORM3FPROC glad_glProgramUniform3f; +#define glProgramUniform3f glad_glProgramUniform3f +GLAD_API_CALL PFNGLPROGRAMUNIFORM3FVPROC glad_glProgramUniform3fv; +#define glProgramUniform3fv glad_glProgramUniform3fv +GLAD_API_CALL PFNGLPROGRAMUNIFORM3IPROC glad_glProgramUniform3i; +#define glProgramUniform3i glad_glProgramUniform3i +GLAD_API_CALL PFNGLPROGRAMUNIFORM3IVPROC glad_glProgramUniform3iv; +#define glProgramUniform3iv glad_glProgramUniform3iv +GLAD_API_CALL PFNGLPROGRAMUNIFORM3UIPROC glad_glProgramUniform3ui; +#define glProgramUniform3ui glad_glProgramUniform3ui +GLAD_API_CALL PFNGLPROGRAMUNIFORM3UIVPROC glad_glProgramUniform3uiv; +#define glProgramUniform3uiv glad_glProgramUniform3uiv +GLAD_API_CALL PFNGLPROGRAMUNIFORM4DPROC glad_glProgramUniform4d; +#define glProgramUniform4d glad_glProgramUniform4d +GLAD_API_CALL PFNGLPROGRAMUNIFORM4DVPROC glad_glProgramUniform4dv; +#define glProgramUniform4dv glad_glProgramUniform4dv +GLAD_API_CALL PFNGLPROGRAMUNIFORM4FPROC glad_glProgramUniform4f; +#define glProgramUniform4f glad_glProgramUniform4f +GLAD_API_CALL PFNGLPROGRAMUNIFORM4FVPROC glad_glProgramUniform4fv; +#define glProgramUniform4fv glad_glProgramUniform4fv +GLAD_API_CALL PFNGLPROGRAMUNIFORM4IPROC glad_glProgramUniform4i; +#define glProgramUniform4i glad_glProgramUniform4i +GLAD_API_CALL PFNGLPROGRAMUNIFORM4IVPROC glad_glProgramUniform4iv; +#define glProgramUniform4iv glad_glProgramUniform4iv +GLAD_API_CALL PFNGLPROGRAMUNIFORM4UIPROC glad_glProgramUniform4ui; +#define glProgramUniform4ui glad_glProgramUniform4ui +GLAD_API_CALL PFNGLPROGRAMUNIFORM4UIVPROC glad_glProgramUniform4uiv; +#define glProgramUniform4uiv glad_glProgramUniform4uiv +GLAD_API_CALL PFNGLPROGRAMUNIFORMMATRIX2DVPROC glad_glProgramUniformMatrix2dv; +#define glProgramUniformMatrix2dv glad_glProgramUniformMatrix2dv +GLAD_API_CALL PFNGLPROGRAMUNIFORMMATRIX2FVPROC glad_glProgramUniformMatrix2fv; +#define glProgramUniformMatrix2fv glad_glProgramUniformMatrix2fv +GLAD_API_CALL PFNGLPROGRAMUNIFORMMATRIX2X3DVPROC glad_glProgramUniformMatrix2x3dv; +#define glProgramUniformMatrix2x3dv glad_glProgramUniformMatrix2x3dv +GLAD_API_CALL PFNGLPROGRAMUNIFORMMATRIX2X3FVPROC glad_glProgramUniformMatrix2x3fv; +#define glProgramUniformMatrix2x3fv glad_glProgramUniformMatrix2x3fv +GLAD_API_CALL PFNGLPROGRAMUNIFORMMATRIX2X4DVPROC glad_glProgramUniformMatrix2x4dv; +#define glProgramUniformMatrix2x4dv glad_glProgramUniformMatrix2x4dv +GLAD_API_CALL PFNGLPROGRAMUNIFORMMATRIX2X4FVPROC glad_glProgramUniformMatrix2x4fv; +#define glProgramUniformMatrix2x4fv glad_glProgramUniformMatrix2x4fv +GLAD_API_CALL PFNGLPROGRAMUNIFORMMATRIX3DVPROC glad_glProgramUniformMatrix3dv; +#define glProgramUniformMatrix3dv glad_glProgramUniformMatrix3dv +GLAD_API_CALL PFNGLPROGRAMUNIFORMMATRIX3FVPROC glad_glProgramUniformMatrix3fv; +#define glProgramUniformMatrix3fv glad_glProgramUniformMatrix3fv +GLAD_API_CALL PFNGLPROGRAMUNIFORMMATRIX3X2DVPROC glad_glProgramUniformMatrix3x2dv; +#define glProgramUniformMatrix3x2dv glad_glProgramUniformMatrix3x2dv +GLAD_API_CALL PFNGLPROGRAMUNIFORMMATRIX3X2FVPROC glad_glProgramUniformMatrix3x2fv; +#define glProgramUniformMatrix3x2fv glad_glProgramUniformMatrix3x2fv +GLAD_API_CALL PFNGLPROGRAMUNIFORMMATRIX3X4DVPROC glad_glProgramUniformMatrix3x4dv; +#define glProgramUniformMatrix3x4dv glad_glProgramUniformMatrix3x4dv +GLAD_API_CALL PFNGLPROGRAMUNIFORMMATRIX3X4FVPROC glad_glProgramUniformMatrix3x4fv; +#define glProgramUniformMatrix3x4fv glad_glProgramUniformMatrix3x4fv +GLAD_API_CALL PFNGLPROGRAMUNIFORMMATRIX4DVPROC glad_glProgramUniformMatrix4dv; +#define glProgramUniformMatrix4dv glad_glProgramUniformMatrix4dv +GLAD_API_CALL PFNGLPROGRAMUNIFORMMATRIX4FVPROC glad_glProgramUniformMatrix4fv; +#define glProgramUniformMatrix4fv glad_glProgramUniformMatrix4fv +GLAD_API_CALL PFNGLPROGRAMUNIFORMMATRIX4X2DVPROC glad_glProgramUniformMatrix4x2dv; +#define glProgramUniformMatrix4x2dv glad_glProgramUniformMatrix4x2dv +GLAD_API_CALL PFNGLPROGRAMUNIFORMMATRIX4X2FVPROC glad_glProgramUniformMatrix4x2fv; +#define glProgramUniformMatrix4x2fv glad_glProgramUniformMatrix4x2fv +GLAD_API_CALL PFNGLPROGRAMUNIFORMMATRIX4X3DVPROC glad_glProgramUniformMatrix4x3dv; +#define glProgramUniformMatrix4x3dv glad_glProgramUniformMatrix4x3dv +GLAD_API_CALL PFNGLPROGRAMUNIFORMMATRIX4X3FVPROC glad_glProgramUniformMatrix4x3fv; +#define glProgramUniformMatrix4x3fv glad_glProgramUniformMatrix4x3fv +GLAD_API_CALL PFNGLPROVOKINGVERTEXPROC glad_glProvokingVertex; +#define glProvokingVertex glad_glProvokingVertex +GLAD_API_CALL PFNGLPUSHDEBUGGROUPPROC glad_glPushDebugGroup; +#define glPushDebugGroup glad_glPushDebugGroup +GLAD_API_CALL PFNGLQUERYCOUNTERPROC glad_glQueryCounter; +#define glQueryCounter glad_glQueryCounter +GLAD_API_CALL PFNGLREADBUFFERPROC glad_glReadBuffer; +#define glReadBuffer glad_glReadBuffer +GLAD_API_CALL PFNGLREADPIXELSPROC glad_glReadPixels; +#define glReadPixels glad_glReadPixels +GLAD_API_CALL PFNGLRELEASESHADERCOMPILERPROC glad_glReleaseShaderCompiler; +#define glReleaseShaderCompiler glad_glReleaseShaderCompiler +GLAD_API_CALL PFNGLRENDERBUFFERSTORAGEPROC glad_glRenderbufferStorage; +#define glRenderbufferStorage glad_glRenderbufferStorage +GLAD_API_CALL PFNGLRENDERBUFFERSTORAGEMULTISAMPLEPROC glad_glRenderbufferStorageMultisample; +#define glRenderbufferStorageMultisample glad_glRenderbufferStorageMultisample +GLAD_API_CALL PFNGLRESUMETRANSFORMFEEDBACKPROC glad_glResumeTransformFeedback; +#define glResumeTransformFeedback glad_glResumeTransformFeedback +GLAD_API_CALL PFNGLSAMPLECOVERAGEPROC glad_glSampleCoverage; +#define glSampleCoverage glad_glSampleCoverage +GLAD_API_CALL PFNGLSAMPLEMASKIPROC glad_glSampleMaski; +#define glSampleMaski glad_glSampleMaski +GLAD_API_CALL PFNGLSAMPLERPARAMETERIIVPROC glad_glSamplerParameterIiv; +#define glSamplerParameterIiv glad_glSamplerParameterIiv +GLAD_API_CALL PFNGLSAMPLERPARAMETERIUIVPROC glad_glSamplerParameterIuiv; +#define glSamplerParameterIuiv glad_glSamplerParameterIuiv +GLAD_API_CALL PFNGLSAMPLERPARAMETERFPROC glad_glSamplerParameterf; +#define glSamplerParameterf glad_glSamplerParameterf +GLAD_API_CALL PFNGLSAMPLERPARAMETERFVPROC glad_glSamplerParameterfv; +#define glSamplerParameterfv glad_glSamplerParameterfv +GLAD_API_CALL PFNGLSAMPLERPARAMETERIPROC glad_glSamplerParameteri; +#define glSamplerParameteri glad_glSamplerParameteri +GLAD_API_CALL PFNGLSAMPLERPARAMETERIVPROC glad_glSamplerParameteriv; +#define glSamplerParameteriv glad_glSamplerParameteriv +GLAD_API_CALL PFNGLSCISSORPROC glad_glScissor; +#define glScissor glad_glScissor +GLAD_API_CALL PFNGLSCISSORARRAYVPROC glad_glScissorArrayv; +#define glScissorArrayv glad_glScissorArrayv +GLAD_API_CALL PFNGLSCISSORINDEXEDPROC glad_glScissorIndexed; +#define glScissorIndexed glad_glScissorIndexed +GLAD_API_CALL PFNGLSCISSORINDEXEDVPROC glad_glScissorIndexedv; +#define glScissorIndexedv glad_glScissorIndexedv +GLAD_API_CALL PFNGLSHADERBINARYPROC glad_glShaderBinary; +#define glShaderBinary glad_glShaderBinary +GLAD_API_CALL PFNGLSHADERSOURCEPROC glad_glShaderSource; +#define glShaderSource glad_glShaderSource +GLAD_API_CALL PFNGLSHADERSTORAGEBLOCKBINDINGPROC glad_glShaderStorageBlockBinding; +#define glShaderStorageBlockBinding glad_glShaderStorageBlockBinding +GLAD_API_CALL PFNGLSTENCILFUNCPROC glad_glStencilFunc; +#define glStencilFunc glad_glStencilFunc +GLAD_API_CALL PFNGLSTENCILFUNCSEPARATEPROC glad_glStencilFuncSeparate; +#define glStencilFuncSeparate glad_glStencilFuncSeparate +GLAD_API_CALL PFNGLSTENCILMASKPROC glad_glStencilMask; +#define glStencilMask glad_glStencilMask +GLAD_API_CALL PFNGLSTENCILMASKSEPARATEPROC glad_glStencilMaskSeparate; +#define glStencilMaskSeparate glad_glStencilMaskSeparate +GLAD_API_CALL PFNGLSTENCILOPPROC glad_glStencilOp; +#define glStencilOp glad_glStencilOp +GLAD_API_CALL PFNGLSTENCILOPSEPARATEPROC glad_glStencilOpSeparate; +#define glStencilOpSeparate glad_glStencilOpSeparate +GLAD_API_CALL PFNGLTEXBUFFERPROC glad_glTexBuffer; +#define glTexBuffer glad_glTexBuffer +GLAD_API_CALL PFNGLTEXBUFFERRANGEPROC glad_glTexBufferRange; +#define glTexBufferRange glad_glTexBufferRange +GLAD_API_CALL PFNGLTEXIMAGE1DPROC glad_glTexImage1D; +#define glTexImage1D glad_glTexImage1D +GLAD_API_CALL PFNGLTEXIMAGE2DPROC glad_glTexImage2D; +#define glTexImage2D glad_glTexImage2D +GLAD_API_CALL PFNGLTEXIMAGE2DMULTISAMPLEPROC glad_glTexImage2DMultisample; +#define glTexImage2DMultisample glad_glTexImage2DMultisample +GLAD_API_CALL PFNGLTEXIMAGE3DPROC glad_glTexImage3D; +#define glTexImage3D glad_glTexImage3D +GLAD_API_CALL PFNGLTEXIMAGE3DMULTISAMPLEPROC glad_glTexImage3DMultisample; +#define glTexImage3DMultisample glad_glTexImage3DMultisample +GLAD_API_CALL PFNGLTEXPARAMETERIIVPROC glad_glTexParameterIiv; +#define glTexParameterIiv glad_glTexParameterIiv +GLAD_API_CALL PFNGLTEXPARAMETERIUIVPROC glad_glTexParameterIuiv; +#define glTexParameterIuiv glad_glTexParameterIuiv +GLAD_API_CALL PFNGLTEXPARAMETERFPROC glad_glTexParameterf; +#define glTexParameterf glad_glTexParameterf +GLAD_API_CALL PFNGLTEXPARAMETERFVPROC glad_glTexParameterfv; +#define glTexParameterfv glad_glTexParameterfv +GLAD_API_CALL PFNGLTEXPARAMETERIPROC glad_glTexParameteri; +#define glTexParameteri glad_glTexParameteri +GLAD_API_CALL PFNGLTEXPARAMETERIVPROC glad_glTexParameteriv; +#define glTexParameteriv glad_glTexParameteriv +GLAD_API_CALL PFNGLTEXSTORAGE1DPROC glad_glTexStorage1D; +#define glTexStorage1D glad_glTexStorage1D +GLAD_API_CALL PFNGLTEXSTORAGE2DPROC glad_glTexStorage2D; +#define glTexStorage2D glad_glTexStorage2D +GLAD_API_CALL PFNGLTEXSTORAGE2DMULTISAMPLEPROC glad_glTexStorage2DMultisample; +#define glTexStorage2DMultisample glad_glTexStorage2DMultisample +GLAD_API_CALL PFNGLTEXSTORAGE3DPROC glad_glTexStorage3D; +#define glTexStorage3D glad_glTexStorage3D +GLAD_API_CALL PFNGLTEXSTORAGE3DMULTISAMPLEPROC glad_glTexStorage3DMultisample; +#define glTexStorage3DMultisample glad_glTexStorage3DMultisample +GLAD_API_CALL PFNGLTEXSUBIMAGE1DPROC glad_glTexSubImage1D; +#define glTexSubImage1D glad_glTexSubImage1D +GLAD_API_CALL PFNGLTEXSUBIMAGE2DPROC glad_glTexSubImage2D; +#define glTexSubImage2D glad_glTexSubImage2D +GLAD_API_CALL PFNGLTEXSUBIMAGE3DPROC glad_glTexSubImage3D; +#define glTexSubImage3D glad_glTexSubImage3D +GLAD_API_CALL PFNGLTEXTUREVIEWPROC glad_glTextureView; +#define glTextureView glad_glTextureView +GLAD_API_CALL PFNGLTRANSFORMFEEDBACKVARYINGSPROC glad_glTransformFeedbackVaryings; +#define glTransformFeedbackVaryings glad_glTransformFeedbackVaryings +GLAD_API_CALL PFNGLUNIFORM1DPROC glad_glUniform1d; +#define glUniform1d glad_glUniform1d +GLAD_API_CALL PFNGLUNIFORM1DVPROC glad_glUniform1dv; +#define glUniform1dv glad_glUniform1dv +GLAD_API_CALL PFNGLUNIFORM1FPROC glad_glUniform1f; +#define glUniform1f glad_glUniform1f +GLAD_API_CALL PFNGLUNIFORM1FVPROC glad_glUniform1fv; +#define glUniform1fv glad_glUniform1fv +GLAD_API_CALL PFNGLUNIFORM1IPROC glad_glUniform1i; +#define glUniform1i glad_glUniform1i +GLAD_API_CALL PFNGLUNIFORM1IVPROC glad_glUniform1iv; +#define glUniform1iv glad_glUniform1iv +GLAD_API_CALL PFNGLUNIFORM1UIPROC glad_glUniform1ui; +#define glUniform1ui glad_glUniform1ui +GLAD_API_CALL PFNGLUNIFORM1UIVPROC glad_glUniform1uiv; +#define glUniform1uiv glad_glUniform1uiv +GLAD_API_CALL PFNGLUNIFORM2DPROC glad_glUniform2d; +#define glUniform2d glad_glUniform2d +GLAD_API_CALL PFNGLUNIFORM2DVPROC glad_glUniform2dv; +#define glUniform2dv glad_glUniform2dv +GLAD_API_CALL PFNGLUNIFORM2FPROC glad_glUniform2f; +#define glUniform2f glad_glUniform2f +GLAD_API_CALL PFNGLUNIFORM2FVPROC glad_glUniform2fv; +#define glUniform2fv glad_glUniform2fv +GLAD_API_CALL PFNGLUNIFORM2IPROC glad_glUniform2i; +#define glUniform2i glad_glUniform2i +GLAD_API_CALL PFNGLUNIFORM2IVPROC glad_glUniform2iv; +#define glUniform2iv glad_glUniform2iv +GLAD_API_CALL PFNGLUNIFORM2UIPROC glad_glUniform2ui; +#define glUniform2ui glad_glUniform2ui +GLAD_API_CALL PFNGLUNIFORM2UIVPROC glad_glUniform2uiv; +#define glUniform2uiv glad_glUniform2uiv +GLAD_API_CALL PFNGLUNIFORM3DPROC glad_glUniform3d; +#define glUniform3d glad_glUniform3d +GLAD_API_CALL PFNGLUNIFORM3DVPROC glad_glUniform3dv; +#define glUniform3dv glad_glUniform3dv +GLAD_API_CALL PFNGLUNIFORM3FPROC glad_glUniform3f; +#define glUniform3f glad_glUniform3f +GLAD_API_CALL PFNGLUNIFORM3FVPROC glad_glUniform3fv; +#define glUniform3fv glad_glUniform3fv +GLAD_API_CALL PFNGLUNIFORM3IPROC glad_glUniform3i; +#define glUniform3i glad_glUniform3i +GLAD_API_CALL PFNGLUNIFORM3IVPROC glad_glUniform3iv; +#define glUniform3iv glad_glUniform3iv +GLAD_API_CALL PFNGLUNIFORM3UIPROC glad_glUniform3ui; +#define glUniform3ui glad_glUniform3ui +GLAD_API_CALL PFNGLUNIFORM3UIVPROC glad_glUniform3uiv; +#define glUniform3uiv glad_glUniform3uiv +GLAD_API_CALL PFNGLUNIFORM4DPROC glad_glUniform4d; +#define glUniform4d glad_glUniform4d +GLAD_API_CALL PFNGLUNIFORM4DVPROC glad_glUniform4dv; +#define glUniform4dv glad_glUniform4dv +GLAD_API_CALL PFNGLUNIFORM4FPROC glad_glUniform4f; +#define glUniform4f glad_glUniform4f +GLAD_API_CALL PFNGLUNIFORM4FVPROC glad_glUniform4fv; +#define glUniform4fv glad_glUniform4fv +GLAD_API_CALL PFNGLUNIFORM4IPROC glad_glUniform4i; +#define glUniform4i glad_glUniform4i +GLAD_API_CALL PFNGLUNIFORM4IVPROC glad_glUniform4iv; +#define glUniform4iv glad_glUniform4iv +GLAD_API_CALL PFNGLUNIFORM4UIPROC glad_glUniform4ui; +#define glUniform4ui glad_glUniform4ui +GLAD_API_CALL PFNGLUNIFORM4UIVPROC glad_glUniform4uiv; +#define glUniform4uiv glad_glUniform4uiv +GLAD_API_CALL PFNGLUNIFORMBLOCKBINDINGPROC glad_glUniformBlockBinding; +#define glUniformBlockBinding glad_glUniformBlockBinding +GLAD_API_CALL PFNGLUNIFORMMATRIX2DVPROC glad_glUniformMatrix2dv; +#define glUniformMatrix2dv glad_glUniformMatrix2dv +GLAD_API_CALL PFNGLUNIFORMMATRIX2FVPROC glad_glUniformMatrix2fv; +#define glUniformMatrix2fv glad_glUniformMatrix2fv +GLAD_API_CALL PFNGLUNIFORMMATRIX2X3DVPROC glad_glUniformMatrix2x3dv; +#define glUniformMatrix2x3dv glad_glUniformMatrix2x3dv +GLAD_API_CALL PFNGLUNIFORMMATRIX2X3FVPROC glad_glUniformMatrix2x3fv; +#define glUniformMatrix2x3fv glad_glUniformMatrix2x3fv +GLAD_API_CALL PFNGLUNIFORMMATRIX2X4DVPROC glad_glUniformMatrix2x4dv; +#define glUniformMatrix2x4dv glad_glUniformMatrix2x4dv +GLAD_API_CALL PFNGLUNIFORMMATRIX2X4FVPROC glad_glUniformMatrix2x4fv; +#define glUniformMatrix2x4fv glad_glUniformMatrix2x4fv +GLAD_API_CALL PFNGLUNIFORMMATRIX3DVPROC glad_glUniformMatrix3dv; +#define glUniformMatrix3dv glad_glUniformMatrix3dv +GLAD_API_CALL PFNGLUNIFORMMATRIX3FVPROC glad_glUniformMatrix3fv; +#define glUniformMatrix3fv glad_glUniformMatrix3fv +GLAD_API_CALL PFNGLUNIFORMMATRIX3X2DVPROC glad_glUniformMatrix3x2dv; +#define glUniformMatrix3x2dv glad_glUniformMatrix3x2dv +GLAD_API_CALL PFNGLUNIFORMMATRIX3X2FVPROC glad_glUniformMatrix3x2fv; +#define glUniformMatrix3x2fv glad_glUniformMatrix3x2fv +GLAD_API_CALL PFNGLUNIFORMMATRIX3X4DVPROC glad_glUniformMatrix3x4dv; +#define glUniformMatrix3x4dv glad_glUniformMatrix3x4dv +GLAD_API_CALL PFNGLUNIFORMMATRIX3X4FVPROC glad_glUniformMatrix3x4fv; +#define glUniformMatrix3x4fv glad_glUniformMatrix3x4fv +GLAD_API_CALL PFNGLUNIFORMMATRIX4DVPROC glad_glUniformMatrix4dv; +#define glUniformMatrix4dv glad_glUniformMatrix4dv +GLAD_API_CALL PFNGLUNIFORMMATRIX4FVPROC glad_glUniformMatrix4fv; +#define glUniformMatrix4fv glad_glUniformMatrix4fv +GLAD_API_CALL PFNGLUNIFORMMATRIX4X2DVPROC glad_glUniformMatrix4x2dv; +#define glUniformMatrix4x2dv glad_glUniformMatrix4x2dv +GLAD_API_CALL PFNGLUNIFORMMATRIX4X2FVPROC glad_glUniformMatrix4x2fv; +#define glUniformMatrix4x2fv glad_glUniformMatrix4x2fv +GLAD_API_CALL PFNGLUNIFORMMATRIX4X3DVPROC glad_glUniformMatrix4x3dv; +#define glUniformMatrix4x3dv glad_glUniformMatrix4x3dv +GLAD_API_CALL PFNGLUNIFORMMATRIX4X3FVPROC glad_glUniformMatrix4x3fv; +#define glUniformMatrix4x3fv glad_glUniformMatrix4x3fv +GLAD_API_CALL PFNGLUNIFORMSUBROUTINESUIVPROC glad_glUniformSubroutinesuiv; +#define glUniformSubroutinesuiv glad_glUniformSubroutinesuiv +GLAD_API_CALL PFNGLUNMAPBUFFERPROC glad_glUnmapBuffer; +#define glUnmapBuffer glad_glUnmapBuffer +GLAD_API_CALL PFNGLUSEPROGRAMPROC glad_glUseProgram; +#define glUseProgram glad_glUseProgram +GLAD_API_CALL PFNGLUSEPROGRAMSTAGESPROC glad_glUseProgramStages; +#define glUseProgramStages glad_glUseProgramStages +GLAD_API_CALL PFNGLVALIDATEPROGRAMPROC glad_glValidateProgram; +#define glValidateProgram glad_glValidateProgram +GLAD_API_CALL PFNGLVALIDATEPROGRAMPIPELINEPROC glad_glValidateProgramPipeline; +#define glValidateProgramPipeline glad_glValidateProgramPipeline +GLAD_API_CALL PFNGLVERTEXATTRIB1DPROC glad_glVertexAttrib1d; +#define glVertexAttrib1d glad_glVertexAttrib1d +GLAD_API_CALL PFNGLVERTEXATTRIB1DVPROC glad_glVertexAttrib1dv; +#define glVertexAttrib1dv glad_glVertexAttrib1dv +GLAD_API_CALL PFNGLVERTEXATTRIB1FPROC glad_glVertexAttrib1f; +#define glVertexAttrib1f glad_glVertexAttrib1f +GLAD_API_CALL PFNGLVERTEXATTRIB1FVPROC glad_glVertexAttrib1fv; +#define glVertexAttrib1fv glad_glVertexAttrib1fv +GLAD_API_CALL PFNGLVERTEXATTRIB1SPROC glad_glVertexAttrib1s; +#define glVertexAttrib1s glad_glVertexAttrib1s +GLAD_API_CALL PFNGLVERTEXATTRIB1SVPROC glad_glVertexAttrib1sv; +#define glVertexAttrib1sv glad_glVertexAttrib1sv +GLAD_API_CALL PFNGLVERTEXATTRIB2DPROC glad_glVertexAttrib2d; +#define glVertexAttrib2d glad_glVertexAttrib2d +GLAD_API_CALL PFNGLVERTEXATTRIB2DVPROC glad_glVertexAttrib2dv; +#define glVertexAttrib2dv glad_glVertexAttrib2dv +GLAD_API_CALL PFNGLVERTEXATTRIB2FPROC glad_glVertexAttrib2f; +#define glVertexAttrib2f glad_glVertexAttrib2f +GLAD_API_CALL PFNGLVERTEXATTRIB2FVPROC glad_glVertexAttrib2fv; +#define glVertexAttrib2fv glad_glVertexAttrib2fv +GLAD_API_CALL PFNGLVERTEXATTRIB2SPROC glad_glVertexAttrib2s; +#define glVertexAttrib2s glad_glVertexAttrib2s +GLAD_API_CALL PFNGLVERTEXATTRIB2SVPROC glad_glVertexAttrib2sv; +#define glVertexAttrib2sv glad_glVertexAttrib2sv +GLAD_API_CALL PFNGLVERTEXATTRIB3DPROC glad_glVertexAttrib3d; +#define glVertexAttrib3d glad_glVertexAttrib3d +GLAD_API_CALL PFNGLVERTEXATTRIB3DVPROC glad_glVertexAttrib3dv; +#define glVertexAttrib3dv glad_glVertexAttrib3dv +GLAD_API_CALL PFNGLVERTEXATTRIB3FPROC glad_glVertexAttrib3f; +#define glVertexAttrib3f glad_glVertexAttrib3f +GLAD_API_CALL PFNGLVERTEXATTRIB3FVPROC glad_glVertexAttrib3fv; +#define glVertexAttrib3fv glad_glVertexAttrib3fv +GLAD_API_CALL PFNGLVERTEXATTRIB3SPROC glad_glVertexAttrib3s; +#define glVertexAttrib3s glad_glVertexAttrib3s +GLAD_API_CALL PFNGLVERTEXATTRIB3SVPROC glad_glVertexAttrib3sv; +#define glVertexAttrib3sv glad_glVertexAttrib3sv +GLAD_API_CALL PFNGLVERTEXATTRIB4NBVPROC glad_glVertexAttrib4Nbv; +#define glVertexAttrib4Nbv glad_glVertexAttrib4Nbv +GLAD_API_CALL PFNGLVERTEXATTRIB4NIVPROC glad_glVertexAttrib4Niv; +#define glVertexAttrib4Niv glad_glVertexAttrib4Niv +GLAD_API_CALL PFNGLVERTEXATTRIB4NSVPROC glad_glVertexAttrib4Nsv; +#define glVertexAttrib4Nsv glad_glVertexAttrib4Nsv +GLAD_API_CALL PFNGLVERTEXATTRIB4NUBPROC glad_glVertexAttrib4Nub; +#define glVertexAttrib4Nub glad_glVertexAttrib4Nub +GLAD_API_CALL PFNGLVERTEXATTRIB4NUBVPROC glad_glVertexAttrib4Nubv; +#define glVertexAttrib4Nubv glad_glVertexAttrib4Nubv +GLAD_API_CALL PFNGLVERTEXATTRIB4NUIVPROC glad_glVertexAttrib4Nuiv; +#define glVertexAttrib4Nuiv glad_glVertexAttrib4Nuiv +GLAD_API_CALL PFNGLVERTEXATTRIB4NUSVPROC glad_glVertexAttrib4Nusv; +#define glVertexAttrib4Nusv glad_glVertexAttrib4Nusv +GLAD_API_CALL PFNGLVERTEXATTRIB4BVPROC glad_glVertexAttrib4bv; +#define glVertexAttrib4bv glad_glVertexAttrib4bv +GLAD_API_CALL PFNGLVERTEXATTRIB4DPROC glad_glVertexAttrib4d; +#define glVertexAttrib4d glad_glVertexAttrib4d +GLAD_API_CALL PFNGLVERTEXATTRIB4DVPROC glad_glVertexAttrib4dv; +#define glVertexAttrib4dv glad_glVertexAttrib4dv +GLAD_API_CALL PFNGLVERTEXATTRIB4FPROC glad_glVertexAttrib4f; +#define glVertexAttrib4f glad_glVertexAttrib4f +GLAD_API_CALL PFNGLVERTEXATTRIB4FVPROC glad_glVertexAttrib4fv; +#define glVertexAttrib4fv glad_glVertexAttrib4fv +GLAD_API_CALL PFNGLVERTEXATTRIB4IVPROC glad_glVertexAttrib4iv; +#define glVertexAttrib4iv glad_glVertexAttrib4iv +GLAD_API_CALL PFNGLVERTEXATTRIB4SPROC glad_glVertexAttrib4s; +#define glVertexAttrib4s glad_glVertexAttrib4s +GLAD_API_CALL PFNGLVERTEXATTRIB4SVPROC glad_glVertexAttrib4sv; +#define glVertexAttrib4sv glad_glVertexAttrib4sv +GLAD_API_CALL PFNGLVERTEXATTRIB4UBVPROC glad_glVertexAttrib4ubv; +#define glVertexAttrib4ubv glad_glVertexAttrib4ubv +GLAD_API_CALL PFNGLVERTEXATTRIB4UIVPROC glad_glVertexAttrib4uiv; +#define glVertexAttrib4uiv glad_glVertexAttrib4uiv +GLAD_API_CALL PFNGLVERTEXATTRIB4USVPROC glad_glVertexAttrib4usv; +#define glVertexAttrib4usv glad_glVertexAttrib4usv +GLAD_API_CALL PFNGLVERTEXATTRIBBINDINGPROC glad_glVertexAttribBinding; +#define glVertexAttribBinding glad_glVertexAttribBinding +GLAD_API_CALL PFNGLVERTEXATTRIBDIVISORPROC glad_glVertexAttribDivisor; +#define glVertexAttribDivisor glad_glVertexAttribDivisor +GLAD_API_CALL PFNGLVERTEXATTRIBFORMATPROC glad_glVertexAttribFormat; +#define glVertexAttribFormat glad_glVertexAttribFormat +GLAD_API_CALL PFNGLVERTEXATTRIBI1IPROC glad_glVertexAttribI1i; +#define glVertexAttribI1i glad_glVertexAttribI1i +GLAD_API_CALL PFNGLVERTEXATTRIBI1IVPROC glad_glVertexAttribI1iv; +#define glVertexAttribI1iv glad_glVertexAttribI1iv +GLAD_API_CALL PFNGLVERTEXATTRIBI1UIPROC glad_glVertexAttribI1ui; +#define glVertexAttribI1ui glad_glVertexAttribI1ui +GLAD_API_CALL PFNGLVERTEXATTRIBI1UIVPROC glad_glVertexAttribI1uiv; +#define glVertexAttribI1uiv glad_glVertexAttribI1uiv +GLAD_API_CALL PFNGLVERTEXATTRIBI2IPROC glad_glVertexAttribI2i; +#define glVertexAttribI2i glad_glVertexAttribI2i +GLAD_API_CALL PFNGLVERTEXATTRIBI2IVPROC glad_glVertexAttribI2iv; +#define glVertexAttribI2iv glad_glVertexAttribI2iv +GLAD_API_CALL PFNGLVERTEXATTRIBI2UIPROC glad_glVertexAttribI2ui; +#define glVertexAttribI2ui glad_glVertexAttribI2ui +GLAD_API_CALL PFNGLVERTEXATTRIBI2UIVPROC glad_glVertexAttribI2uiv; +#define glVertexAttribI2uiv glad_glVertexAttribI2uiv +GLAD_API_CALL PFNGLVERTEXATTRIBI3IPROC glad_glVertexAttribI3i; +#define glVertexAttribI3i glad_glVertexAttribI3i +GLAD_API_CALL PFNGLVERTEXATTRIBI3IVPROC glad_glVertexAttribI3iv; +#define glVertexAttribI3iv glad_glVertexAttribI3iv +GLAD_API_CALL PFNGLVERTEXATTRIBI3UIPROC glad_glVertexAttribI3ui; +#define glVertexAttribI3ui glad_glVertexAttribI3ui +GLAD_API_CALL PFNGLVERTEXATTRIBI3UIVPROC glad_glVertexAttribI3uiv; +#define glVertexAttribI3uiv glad_glVertexAttribI3uiv +GLAD_API_CALL PFNGLVERTEXATTRIBI4BVPROC glad_glVertexAttribI4bv; +#define glVertexAttribI4bv glad_glVertexAttribI4bv +GLAD_API_CALL PFNGLVERTEXATTRIBI4IPROC glad_glVertexAttribI4i; +#define glVertexAttribI4i glad_glVertexAttribI4i +GLAD_API_CALL PFNGLVERTEXATTRIBI4IVPROC glad_glVertexAttribI4iv; +#define glVertexAttribI4iv glad_glVertexAttribI4iv +GLAD_API_CALL PFNGLVERTEXATTRIBI4SVPROC glad_glVertexAttribI4sv; +#define glVertexAttribI4sv glad_glVertexAttribI4sv +GLAD_API_CALL PFNGLVERTEXATTRIBI4UBVPROC glad_glVertexAttribI4ubv; +#define glVertexAttribI4ubv glad_glVertexAttribI4ubv +GLAD_API_CALL PFNGLVERTEXATTRIBI4UIPROC glad_glVertexAttribI4ui; +#define glVertexAttribI4ui glad_glVertexAttribI4ui +GLAD_API_CALL PFNGLVERTEXATTRIBI4UIVPROC glad_glVertexAttribI4uiv; +#define glVertexAttribI4uiv glad_glVertexAttribI4uiv +GLAD_API_CALL PFNGLVERTEXATTRIBI4USVPROC glad_glVertexAttribI4usv; +#define glVertexAttribI4usv glad_glVertexAttribI4usv +GLAD_API_CALL PFNGLVERTEXATTRIBIFORMATPROC glad_glVertexAttribIFormat; +#define glVertexAttribIFormat glad_glVertexAttribIFormat +GLAD_API_CALL PFNGLVERTEXATTRIBIPOINTERPROC glad_glVertexAttribIPointer; +#define glVertexAttribIPointer glad_glVertexAttribIPointer +GLAD_API_CALL PFNGLVERTEXATTRIBL1DPROC glad_glVertexAttribL1d; +#define glVertexAttribL1d glad_glVertexAttribL1d +GLAD_API_CALL PFNGLVERTEXATTRIBL1DVPROC glad_glVertexAttribL1dv; +#define glVertexAttribL1dv glad_glVertexAttribL1dv +GLAD_API_CALL PFNGLVERTEXATTRIBL2DPROC glad_glVertexAttribL2d; +#define glVertexAttribL2d glad_glVertexAttribL2d +GLAD_API_CALL PFNGLVERTEXATTRIBL2DVPROC glad_glVertexAttribL2dv; +#define glVertexAttribL2dv glad_glVertexAttribL2dv +GLAD_API_CALL PFNGLVERTEXATTRIBL3DPROC glad_glVertexAttribL3d; +#define glVertexAttribL3d glad_glVertexAttribL3d +GLAD_API_CALL PFNGLVERTEXATTRIBL3DVPROC glad_glVertexAttribL3dv; +#define glVertexAttribL3dv glad_glVertexAttribL3dv +GLAD_API_CALL PFNGLVERTEXATTRIBL4DPROC glad_glVertexAttribL4d; +#define glVertexAttribL4d glad_glVertexAttribL4d +GLAD_API_CALL PFNGLVERTEXATTRIBL4DVPROC glad_glVertexAttribL4dv; +#define glVertexAttribL4dv glad_glVertexAttribL4dv +GLAD_API_CALL PFNGLVERTEXATTRIBLFORMATPROC glad_glVertexAttribLFormat; +#define glVertexAttribLFormat glad_glVertexAttribLFormat +GLAD_API_CALL PFNGLVERTEXATTRIBLPOINTERPROC glad_glVertexAttribLPointer; +#define glVertexAttribLPointer glad_glVertexAttribLPointer +GLAD_API_CALL PFNGLVERTEXATTRIBP1UIPROC glad_glVertexAttribP1ui; +#define glVertexAttribP1ui glad_glVertexAttribP1ui +GLAD_API_CALL PFNGLVERTEXATTRIBP1UIVPROC glad_glVertexAttribP1uiv; +#define glVertexAttribP1uiv glad_glVertexAttribP1uiv +GLAD_API_CALL PFNGLVERTEXATTRIBP2UIPROC glad_glVertexAttribP2ui; +#define glVertexAttribP2ui glad_glVertexAttribP2ui +GLAD_API_CALL PFNGLVERTEXATTRIBP2UIVPROC glad_glVertexAttribP2uiv; +#define glVertexAttribP2uiv glad_glVertexAttribP2uiv +GLAD_API_CALL PFNGLVERTEXATTRIBP3UIPROC glad_glVertexAttribP3ui; +#define glVertexAttribP3ui glad_glVertexAttribP3ui +GLAD_API_CALL PFNGLVERTEXATTRIBP3UIVPROC glad_glVertexAttribP3uiv; +#define glVertexAttribP3uiv glad_glVertexAttribP3uiv +GLAD_API_CALL PFNGLVERTEXATTRIBP4UIPROC glad_glVertexAttribP4ui; +#define glVertexAttribP4ui glad_glVertexAttribP4ui +GLAD_API_CALL PFNGLVERTEXATTRIBP4UIVPROC glad_glVertexAttribP4uiv; +#define glVertexAttribP4uiv glad_glVertexAttribP4uiv +GLAD_API_CALL PFNGLVERTEXATTRIBPOINTERPROC glad_glVertexAttribPointer; +#define glVertexAttribPointer glad_glVertexAttribPointer +GLAD_API_CALL PFNGLVERTEXBINDINGDIVISORPROC glad_glVertexBindingDivisor; +#define glVertexBindingDivisor glad_glVertexBindingDivisor +GLAD_API_CALL PFNGLVIEWPORTPROC glad_glViewport; +#define glViewport glad_glViewport +GLAD_API_CALL PFNGLVIEWPORTARRAYVPROC glad_glViewportArrayv; +#define glViewportArrayv glad_glViewportArrayv +GLAD_API_CALL PFNGLVIEWPORTINDEXEDFPROC glad_glViewportIndexedf; +#define glViewportIndexedf glad_glViewportIndexedf +GLAD_API_CALL PFNGLVIEWPORTINDEXEDFVPROC glad_glViewportIndexedfv; +#define glViewportIndexedfv glad_glViewportIndexedfv +GLAD_API_CALL PFNGLWAITSYNCPROC glad_glWaitSync; +#define glWaitSync glad_glWaitSync + + + + + +GLAD_API_CALL int gladLoadGLUserPtr( GLADuserptrloadfunc load, void *userptr); +GLAD_API_CALL int gladLoadGL( GLADloadfunc load); + + + +#ifdef __cplusplus +} +#endif +#endif diff --git a/include/opengl.h b/include/opengl.h new file mode 100644 index 0000000..1545e24 --- /dev/null +++ b/include/opengl.h @@ -0,0 +1,14 @@ +#pragma once + +#ifdef __cplusplus +extern "C" { +#endif + +void * read_file(const char * filename, int * out_size); + +unsigned int compile_from_files(const char * vertex_path, + const char * fragment_path); + +#ifdef __cplusplus +} +#endif diff --git a/include/sal.h b/include/sal.h new file mode 100644 index 0000000..9d461e8 --- /dev/null +++ b/include/sal.h @@ -0,0 +1,2941 @@ +// Licensed to the .NET Foundation under one or more agreements. +// The .NET Foundation licenses this file to you under the MIT license. + +/*** +*sal.h - markers for documenting the semantics of APIs +* + +* +*Purpose: +* sal.h provides a set of annotations to describe how a function uses its +* parameters - the assumptions it makes about them, and the guarantees it makes +* upon finishing. +****/ +#pragma once + +/*========================================================================== + + The comments in this file are intended to give basic understanding of + the usage of SAL, the Microsoft Source Code Annotation Language. + For more details, please see http://go.microsoft.com/fwlink/?LinkID=242134 + + The macros are defined in 3 layers, plus the structural set: + + _In_/_Out_/_Ret_ Layer: + ---------------------- + This layer provides the highest abstraction and its macros should be used + in most cases. These macros typically start with: + _In_ : input parameter to a function, unmodified by called function + _Out_ : output parameter, written to by called function, pointed-to + location not expected to be initialized prior to call + _Outptr_ : like _Out_ when returned variable is a pointer type + (so param is pointer-to-pointer type). Called function + provides/allocated space. + _Outref_ : like _Outptr_, except param is reference-to-pointer type. + _Inout_ : inout parameter, read from and potentially modified by + called function. + _Ret_ : for return values + _Field_ : class/struct field invariants + For common usage, this class of SAL provides the most concise annotations. + Note that _In_/_Out_/_Inout_/_Outptr_ annotations are designed to be used + with a parameter target. Using them with _At_ to specify non-parameter + targets may yield unexpected results. + + This layer also includes a number of other properties that can be specified + to extend the ability of code analysis, most notably: + -- Designating parameters as format strings for printf/scanf/scanf_s + -- Requesting stricter type checking for C enum parameters + + _Pre_/_Post_ Layer: + ------------------ + The macros of this layer only should be used when there is no suitable macro + in the _In_/_Out_ layer. Its macros start with _Pre_ or _Post_. + This layer provides the most flexibility for annotations. + + Implementation Abstraction Layer: + -------------------------------- + Macros from this layer should never be used directly. The layer only exists + to hide the implementation of the annotation macros. + + Structural Layer: + ---------------- + These annotations, like _At_ and _When_, are used with annotations from + any of the other layers as modifiers, indicating exactly when and where + the annotations apply. + + + Common syntactic conventions: + ---------------------------- + + Usage: + ----- + _In_, _Out_, _Inout_, _Pre_, _Post_, are for formal parameters. + _Ret_, _Deref_ret_ must be used for return values. + + Nullness: + -------- + If the parameter can be NULL as a precondition to the function, the + annotation contains _opt. If the macro does not contain '_opt' the + parameter cannot be NULL. + + If an out/inout parameter returns a null pointer as a postcondition, this is + indicated by _Ret_maybenull_ or _result_maybenull_. If the macro is not + of this form, then the result will not be NULL as a postcondition. + _Outptr_ - output value is not NULL + _Outptr_result_maybenull_ - output value might be NULL + + String Type: + ----------- + _z: NullTerminated string + for _In_ parameters the buffer must have the specified stringtype before the call + for _Out_ parameters the buffer must have the specified stringtype after the call + for _Inout_ parameters both conditions apply + + Extent Syntax: + ------------- + Buffer sizes are expressed as element counts, unless the macro explicitly + contains _byte_ or _bytes_. Some annotations specify two buffer sizes, in + which case the second is used to indicate how much of the buffer is valid + as a postcondition. This table outlines the precondition buffer allocation + size, precondition number of valid elements, postcondition allocation size, + and postcondition number of valid elements for representative buffer size + annotations: + Pre | Pre | Post | Post + alloc | valid | alloc | valid + Annotation elems | elems | elems | elems + ---------- ------------------------------------ + _In_reads_(s) s | s | s | s + _Inout_updates_(s) s | s | s | s + _Inout_updates_to_(s,c) s | s | s | c + _Out_writes_(s) s | 0 | s | s + _Out_writes_to_(s,c) s | 0 | s | c + _Outptr_result_buffer_(s) ? | ? | s | s + _Outptr_result_buffer_to_(s,c) ? | ? | s | c + + For the _Outptr_ annotations, the buffer in question is at one level of + dereference. The called function is responsible for supplying the buffer. + + Success and failure: + ------------------- + The SAL concept of success allows functions to define expressions that can + be tested by the caller, which if it evaluates to non-zero, indicates the + function succeeded, which means that its postconditions are guaranteed to + hold. Otherwise, if the expression evaluates to zero, the function is + considered to have failed, and the postconditions are not guaranteed. + + The success criteria can be specified with the _Success_(expr) annotation: + _Success_(return != FALSE) BOOL + PathCanonicalizeA(_Out_writes_(MAX_PATH) LPSTR pszBuf, LPCSTR pszPath) : + pszBuf is only guaranteed to be NULL-terminated when TRUE is returned, + and FALSE indicates failure. In common practice, callers check for zero + vs. non-zero returns, so it is preferable to express the success + criteria in terms of zero/non-zero, not checked for exactly TRUE. + + Functions can specify that some postconditions will still hold, even when + the function fails, using _On_failure_(anno-list), or postconditions that + hold regardless of success or failure using _Always_(anno-list). + + The annotation _Return_type_success_(expr) may be used with a typedef to + give a default _Success_ criteria to all functions returning that type. + This is the case for common Windows API status types, including + HRESULT and NTSTATUS. This may be overridden on a per-function basis by + specifying a _Success_ annotation locally. + +============================================================================*/ + +#define __ATTR_SAL + +#ifndef _SAL_VERSION /*IFSTRIP=IGN*/ +#define _SAL_VERSION 20 +#endif + +#ifdef _PREFAST_ // [ + +// choose attribute or __declspec implementation +#ifndef _USE_DECLSPECS_FOR_SAL // [ +#define _USE_DECLSPECS_FOR_SAL 1 +#endif // ] + +#if _USE_DECLSPECS_FOR_SAL // [ +#undef _USE_ATTRIBUTES_FOR_SAL +#define _USE_ATTRIBUTES_FOR_SAL 0 +#elif !defined(_USE_ATTRIBUTES_FOR_SAL) // ][ +#if _MSC_VER >= 1400 /*IFSTRIP=IGN*/ // [ +#define _USE_ATTRIBUTES_FOR_SAL 1 +#else // ][ +#define _USE_ATTRIBUTES_FOR_SAL 0 +#endif // ] +#endif // ] + + +#if !_USE_DECLSPECS_FOR_SAL // [ +#if !_USE_ATTRIBUTES_FOR_SAL // [ +#if _MSC_VER >= 1400 /*IFSTRIP=IGN*/ // [ +#undef _USE_ATTRIBUTES_FOR_SAL +#define _USE_ATTRIBUTES_FOR_SAL 1 +#else // ][ +#undef _USE_DECLSPECS_FOR_SAL +#define _USE_DECLSPECS_FOR_SAL 1 +#endif // ] +#endif // ] +#endif // ] + +#else + +// Disable expansion of SAL macros in non-Prefast mode to +// improve compiler throughput. +#ifndef _USE_DECLSPECS_FOR_SAL // [ +#define _USE_DECLSPECS_FOR_SAL 0 +#endif // ] +#ifndef _USE_ATTRIBUTES_FOR_SAL // [ +#define _USE_ATTRIBUTES_FOR_SAL 0 +#endif // ] + +#endif // ] + +// safeguard for MIDL and RC builds +#if _USE_DECLSPECS_FOR_SAL && ( defined( MIDL_PASS ) || defined(__midl) || defined(RC_INVOKED) || !defined(_PREFAST_) ) /*IFSTRIP=IGN*/ // [ +#undef _USE_DECLSPECS_FOR_SAL +#define _USE_DECLSPECS_FOR_SAL 0 +#endif // ] +#if _USE_ATTRIBUTES_FOR_SAL && ( !defined(_MSC_EXTENSIONS) || defined( MIDL_PASS ) || defined(__midl) || defined(RC_INVOKED) ) /*IFSTRIP=IGN*/ // [ +#undef _USE_ATTRIBUTES_FOR_SAL +#define _USE_ATTRIBUTES_FOR_SAL 0 +#endif // ] + +#if _USE_DECLSPECS_FOR_SAL || _USE_ATTRIBUTES_FOR_SAL + +// Special enum type for Y/N/M +enum __SAL_YesNo {_SAL_notpresent, _SAL_no, _SAL_maybe, _SAL_yes, _SAL_default}; + +#endif + +#if defined(BUILD_WINDOWS) && !_USE_ATTRIBUTES_FOR_SAL /*IFSTRIP=IGN*/ +#define _SAL1_Source_(Name, args, annotes) _SA_annotes3(SAL_name, #Name, "", "1") _GrouP_(annotes _SAL_nop_impl_) +#define _SAL1_1_Source_(Name, args, annotes) _SA_annotes3(SAL_name, #Name, "", "1.1") _GrouP_(annotes _SAL_nop_impl_) +#define _SAL1_2_Source_(Name, args, annotes) _SA_annotes3(SAL_name, #Name, "", "1.2") _GrouP_(annotes _SAL_nop_impl_) +#define _SAL2_Source_(Name, args, annotes) _SA_annotes3(SAL_name, #Name, "", "2") _GrouP_(annotes _SAL_nop_impl_) +#else +#define _SAL1_Source_(Name, args, annotes) _SA_annotes3(SAL_name, #Name, "", "1") _Group_(annotes _SAL_nop_impl_) +#define _SAL1_1_Source_(Name, args, annotes) _SA_annotes3(SAL_name, #Name, "", "1.1") _Group_(annotes _SAL_nop_impl_) +#define _SAL1_2_Source_(Name, args, annotes) _SA_annotes3(SAL_name, #Name, "", "1.2") _Group_(annotes _SAL_nop_impl_) +#define _SAL2_Source_(Name, args, annotes) _SA_annotes3(SAL_name, #Name, "", "2") _Group_(annotes _SAL_nop_impl_) +#endif + +//============================================================================ +// Structural SAL: +// These annotations modify the use of other annotations. They may +// express the annotation target (i.e. what parameter/field the annotation +// applies to) or the condition under which the annotation is applicable. +//============================================================================ + +// _At_(target, annos) specifies that the annotations listed in 'annos' is to +// be applied to 'target' rather than to the identifier which is the current +// lexical target. +#define _At_(target, annos) _At_impl_(target, annos _SAL_nop_impl_) + +// _At_buffer_(target, iter, bound, annos) is similar to _At_, except that +// target names a buffer, and each annotation in annos is applied to each +// element of target up to bound, with the variable named in iter usable +// by the annotations to refer to relevant offsets within target. +#define _At_buffer_(target, iter, bound, annos) _At_buffer_impl_(target, iter, bound, annos _SAL_nop_impl_) + +// _When_(expr, annos) specifies that the annotations listed in 'annos' only +// apply when 'expr' evaluates to non-zero. +#define _When_(expr, annos) _When_impl_(expr, annos _SAL_nop_impl_) +#define _Group_(annos) _Group_impl_(annos _SAL_nop_impl_) +#define _GrouP_(annos) _GrouP_impl_(annos _SAL_nop_impl_) + +// indicates whether normal post conditions apply to a function +#define _Success_(expr) _SAL2_Source_(_Success_, (expr), _Success_impl_(expr)) + +// indicates whether post conditions apply to a function returning +// the type that this annotation is applied to +#define _Return_type_success_(expr) _SAL2_Source_(_Return_type_success_, (expr), _Success_impl_(expr)) + +// Establish postconditions that apply only if the function does not succeed +#define _On_failure_(annos) _On_failure_impl_(annos _SAL_nop_impl_) + +// Establish postconditions that apply in both success and failure cases. +// Only applicable with functions that have _Success_ or _Return_type_succss_. +#define _Always_(annos) _Always_impl_(annos _SAL_nop_impl_) + +// Usable on a function definition. Asserts that a function declaration is +// in scope, and its annotations are to be used. There are no other annotations +// allowed on the function definition. +#define _Use_decl_annotations_ _Use_decl_anno_impl_ + +// _Notref_ may precede a _Deref_ or "real" annotation, and removes one +// level of dereference if the parameter is a C++ reference (&). If the +// net deref on a "real" annotation is negative, it is simply discarded. +#define _Notref_ _Notref_impl_ + +// Annotations for defensive programming styles. +#define _Pre_defensive_ _SA_annotes0(SAL_pre_defensive) +#define _Post_defensive_ _SA_annotes0(SAL_post_defensive) + +#define _In_defensive_(annotes) _Pre_defensive_ _Group_(annotes) +#define _Out_defensive_(annotes) _Post_defensive_ _Group_(annotes) +#define _Inout_defensive_(annotes) _Pre_defensive_ _Post_defensive_ _Group_(annotes) + +//============================================================================ +// _In_\_Out_ Layer: +//============================================================================ + +// Reserved pointer parameters, must always be NULL. +#define _Reserved_ _SAL2_Source_(_Reserved_, (), _Pre1_impl_(__null_impl)) + +// _Const_ allows specification that any namable memory location is considered +// readonly for a given call. +#define _Const_ _SAL2_Source_(_Const_, (), _Pre1_impl_(__readaccess_impl_notref)) + + +// Input parameters -------------------------- + +// _In_ - Annotations for parameters where data is passed into the function, but not modified. +// _In_ by itself can be used with non-pointer types (although it is redundant). + +// e.g. void SetPoint( _In_ const POINT* pPT ); +#define _In_ _SAL2_Source_(_In_, (), _Pre1_impl_(__notnull_impl_notref) _Pre_valid_impl_ _Deref_pre1_impl_(__readaccess_impl_notref)) +#define _In_opt_ _SAL2_Source_(_In_opt_, (), _Pre1_impl_(__maybenull_impl_notref) _Pre_valid_impl_ _Deref_pre_readonly_) + +// nullterminated 'in' parameters. +// e.g. void CopyStr( _In_z_ const char* szFrom, _Out_z_cap_(cchTo) char* szTo, size_t cchTo ); +#define _In_z_ _SAL2_Source_(_In_z_, (), _In_ _Pre1_impl_(__zterm_impl)) +#define _In_opt_z_ _SAL2_Source_(_In_opt_z_, (), _In_opt_ _Pre1_impl_(__zterm_impl)) + + +// 'input' buffers with given size + +#define _In_reads_(size) _SAL2_Source_(_In_reads_, (size), _Pre_count_(size) _Deref_pre_readonly_) +#define _In_reads_opt_(size) _SAL2_Source_(_In_reads_opt_, (size), _Pre_opt_count_(size) _Deref_pre_readonly_) +#define _In_reads_bytes_(size) _SAL2_Source_(_In_reads_bytes_, (size), _Pre_bytecount_(size) _Deref_pre_readonly_) +#define _In_reads_bytes_opt_(size) _SAL2_Source_(_In_reads_bytes_opt_, (size), _Pre_opt_bytecount_(size) _Deref_pre_readonly_) +#define _In_reads_z_(size) _SAL2_Source_(_In_reads_z_, (size), _In_reads_(size) _Pre_z_) +#define _In_reads_opt_z_(size) _SAL2_Source_(_In_reads_opt_z_, (size), _Pre_opt_count_(size) _Deref_pre_readonly_ _Pre_opt_z_) +#define _In_reads_or_z_(size) _SAL2_Source_(_In_reads_or_z_, (size), _In_ _When_(_String_length_(_Curr_) < (size), _Pre_z_) _When_(_String_length_(_Curr_) >= (size), _Pre1_impl_(__count_impl(size)))) +#define _In_reads_or_z_opt_(size) _SAL2_Source_(_In_reads_or_z_opt_, (size), _In_opt_ _When_(_String_length_(_Curr_) < (size), _Pre_z_) _When_(_String_length_(_Curr_) >= (size), _Pre1_impl_(__count_impl(size)))) + + +// 'input' buffers valid to the given end pointer + +#define _In_reads_to_ptr_(ptr) _SAL2_Source_(_In_reads_to_ptr_, (ptr), _Pre_ptrdiff_count_(ptr) _Deref_pre_readonly_) +#define _In_reads_to_ptr_opt_(ptr) _SAL2_Source_(_In_reads_to_ptr_opt_, (ptr), _Pre_opt_ptrdiff_count_(ptr) _Deref_pre_readonly_) +#define _In_reads_to_ptr_z_(ptr) _SAL2_Source_(_In_reads_to_ptr_z_, (ptr), _In_reads_to_ptr_(ptr) _Pre_z_) +#define _In_reads_to_ptr_opt_z_(ptr) _SAL2_Source_(_In_reads_to_ptr_opt_z_, (ptr), _Pre_opt_ptrdiff_count_(ptr) _Deref_pre_readonly_ _Pre_opt_z_) + + + +// Output parameters -------------------------- + +// _Out_ - Annotations for pointer or reference parameters where data passed back to the caller. +// These are mostly used where the pointer/reference is to a non-pointer type. +// _Outptr_/_Outref) (see below) are typically used to return pointers via parameters. + +// e.g. void GetPoint( _Out_ POINT* pPT ); +#define _Out_ _SAL2_Source_(_Out_, (), _Out_impl_) +#define _Out_opt_ _SAL2_Source_(_Out_opt_, (), _Out_opt_impl_) + +#define _Out_writes_(size) _SAL2_Source_(_Out_writes_, (size), _Pre_cap_(size) _Post_valid_impl_) +#define _Out_writes_opt_(size) _SAL2_Source_(_Out_writes_opt_, (size), _Pre_opt_cap_(size) _Post_valid_impl_) +#define _Out_writes_bytes_(size) _SAL2_Source_(_Out_writes_bytes_, (size), _Pre_bytecap_(size) _Post_valid_impl_) +#define _Out_writes_bytes_opt_(size) _SAL2_Source_(_Out_writes_bytes_opt_, (size), _Pre_opt_bytecap_(size) _Post_valid_impl_) +#define _Out_writes_z_(size) _SAL2_Source_(_Out_writes_z_, (size), _Pre_cap_(size) _Post_valid_impl_ _Post_z_) +#define _Out_writes_opt_z_(size) _SAL2_Source_(_Out_writes_opt_z_, (size), _Pre_opt_cap_(size) _Post_valid_impl_ _Post_z_) + +#define _Out_writes_to_(size,count) _SAL2_Source_(_Out_writes_to_, (size,count), _Pre_cap_(size) _Post_valid_impl_ _Post_count_(count)) +#define _Out_writes_to_opt_(size,count) _SAL2_Source_(_Out_writes_to_opt_, (size,count), _Pre_opt_cap_(size) _Post_valid_impl_ _Post_count_(count)) +#define _Out_writes_all_(size) _SAL2_Source_(_Out_writes_all_, (size), _Out_writes_to_(_Old_(size), _Old_(size))) +#define _Out_writes_all_opt_(size) _SAL2_Source_(_Out_writes_all_opt_, (size), _Out_writes_to_opt_(_Old_(size), _Old_(size))) + +#define _Out_writes_bytes_to_(size,count) _SAL2_Source_(_Out_writes_bytes_to_, (size,count), _Pre_bytecap_(size) _Post_valid_impl_ _Post_bytecount_(count)) +#define _Out_writes_bytes_to_opt_(size,count) _SAL2_Source_(_Out_writes_bytes_to_opt_, (size,count), _Pre_opt_bytecap_(size) _Post_valid_impl_ _Post_bytecount_(count)) +#define _Out_writes_bytes_all_(size) _SAL2_Source_(_Out_writes_bytes_all_, (size), _Out_writes_bytes_to_(_Old_(size), _Old_(size))) +#define _Out_writes_bytes_all_opt_(size) _SAL2_Source_(_Out_writes_bytes_all_opt_, (size), _Out_writes_bytes_to_opt_(_Old_(size), _Old_(size))) + +#define _Out_writes_to_ptr_(ptr) _SAL2_Source_(_Out_writes_to_ptr_, (ptr), _Pre_ptrdiff_cap_(ptr) _Post_valid_impl_) +#define _Out_writes_to_ptr_opt_(ptr) _SAL2_Source_(_Out_writes_to_ptr_opt_, (ptr), _Pre_opt_ptrdiff_cap_(ptr) _Post_valid_impl_) +#define _Out_writes_to_ptr_z_(ptr) _SAL2_Source_(_Out_writes_to_ptr_z_, (ptr), _Pre_ptrdiff_cap_(ptr) _Post_valid_impl_ Post_z_) +#define _Out_writes_to_ptr_opt_z_(ptr) _SAL2_Source_(_Out_writes_to_ptr_opt_z_, (ptr), _Pre_opt_ptrdiff_cap_(ptr) _Post_valid_impl_ Post_z_) + + +// Inout parameters ---------------------------- + +// _Inout_ - Annotations for pointer or reference parameters where data is passed in and +// potentially modified. +// void ModifyPoint( _Inout_ POINT* pPT ); +// void ModifyPointByRef( _Inout_ POINT& pPT ); + +#define _Inout_ _SAL2_Source_(_Inout_, (), _Prepost_valid_) +#define _Inout_opt_ _SAL2_Source_(_Inout_opt_, (), _Prepost_opt_valid_) + +// For modifying string buffers +// void toupper( _Inout_z_ char* sz ); +#define _Inout_z_ _SAL2_Source_(_Inout_z_, (), _Prepost_z_) +#define _Inout_opt_z_ _SAL2_Source_(_Inout_opt_z_, (), _Prepost_opt_z_) + +// For modifying buffers with explicit element size +#define _Inout_updates_(size) _SAL2_Source_(_Inout_updates_, (size), _Pre_cap_(size) _Pre_valid_impl_ _Post_valid_impl_) +#define _Inout_updates_opt_(size) _SAL2_Source_(_Inout_updates_opt_, (size), _Pre_opt_cap_(size) _Pre_valid_impl_ _Post_valid_impl_) +#define _Inout_updates_z_(size) _SAL2_Source_(_Inout_updates_z_, (size), _Pre_cap_(size) _Pre_valid_impl_ _Post_valid_impl_ _Pre1_impl_(__zterm_impl) _Post1_impl_(__zterm_impl)) +#define _Inout_updates_opt_z_(size) _SAL2_Source_(_Inout_updates_opt_z_, (size), _Pre_opt_cap_(size) _Pre_valid_impl_ _Post_valid_impl_ _Pre1_impl_(__zterm_impl) _Post1_impl_(__zterm_impl)) + +#define _Inout_updates_to_(size,count) _SAL2_Source_(_Inout_updates_to_, (size,count), _Out_writes_to_(size,count) _Pre_valid_impl_ _Pre1_impl_(__count_impl(count))) +#define _Inout_updates_to_opt_(size,count) _SAL2_Source_(_Inout_updates_to_opt_, (size,count), _Out_writes_to_opt_(size,count) _Pre_valid_impl_ _Pre1_impl_(__count_impl(count))) + +#define _Inout_updates_all_(size) _SAL2_Source_(_Inout_updates_all_, (size), _Inout_updates_to_(_Old_(size), _Old_(size))) +#define _Inout_updates_all_opt_(size) _SAL2_Source_(_Inout_updates_all_opt_, (size), _Inout_updates_to_opt_(_Old_(size), _Old_(size))) + +// For modifying buffers with explicit byte size +#define _Inout_updates_bytes_(size) _SAL2_Source_(_Inout_updates_bytes_, (size), _Pre_bytecap_(size) _Pre_valid_impl_ _Post_valid_impl_) +#define _Inout_updates_bytes_opt_(size) _SAL2_Source_(_Inout_updates_bytes_opt_, (size), _Pre_opt_bytecap_(size) _Pre_valid_impl_ _Post_valid_impl_) + +#define _Inout_updates_bytes_to_(size,count) _SAL2_Source_(_Inout_updates_bytes_to_, (size,count), _Out_writes_bytes_to_(size,count) _Pre_valid_impl_ _Pre1_impl_(__bytecount_impl(count))) +#define _Inout_updates_bytes_to_opt_(size,count) _SAL2_Source_(_Inout_updates_bytes_to_opt_, (size,count), _Out_writes_bytes_to_opt_(size,count) _Pre_valid_impl_ _Pre1_impl_(__bytecount_impl(count))) + +#define _Inout_updates_bytes_all_(size) _SAL2_Source_(_Inout_updates_bytes_all_, (size), _Inout_updates_bytes_to_(_Old_(size), _Old_(size))) +#define _Inout_updates_bytes_all_opt_(size) _SAL2_Source_(_Inout_updates_bytes_all_opt_, (size), _Inout_updates_bytes_to_opt_(_Old_(size), _Old_(size))) + + +// Pointer to pointer parameters ------------------------- + +// _Outptr_ - Annotations for output params returning pointers +// These describe parameters where the called function provides the buffer: +// HRESULT SHStrDupW(_In_ LPCWSTR psz, _Outptr_ LPWSTR *ppwsz); +// The caller passes the address of an LPWSTR variable as ppwsz, and SHStrDupW allocates +// and initializes memory and returns the pointer to the new LPWSTR in *ppwsz. +// +// _Outptr_opt_ - describes parameters that are allowed to be NULL. +// _Outptr_*_result_maybenull_ - describes parameters where the called function might return NULL to the caller. +// +// Example: +// void MyFunc(_Outptr_opt_ int **ppData1, _Outptr_result_maybenull_ int **ppData2); +// Callers: +// MyFunc(NULL, NULL); // error: parameter 2, ppData2, should not be NULL +// MyFunc(&pData1, &pData2); // ok: both non-NULL +// if (*pData1 == *pData2) ... // error: pData2 might be NULL after call + +#define _Outptr_ _SAL2_Source_(_Outptr_, (), _Out_impl_ _Deref_post2_impl_(__notnull_impl_notref, __count_impl(1))) +#define _Outptr_result_maybenull_ _SAL2_Source_(_Outptr_result_maybenull_, (), _Out_impl_ _Deref_post2_impl_(__maybenull_impl_notref, __count_impl(1))) +#define _Outptr_opt_ _SAL2_Source_(_Outptr_opt_, (), _Out_opt_impl_ _Deref_post2_impl_(__notnull_impl_notref, __count_impl(1))) +#define _Outptr_opt_result_maybenull_ _SAL2_Source_(_Outptr_opt_result_maybenull_, (), _Out_opt_impl_ _Deref_post2_impl_(__maybenull_impl_notref, __count_impl(1))) + +// Annotations for _Outptr_ parameters returning pointers to null terminated strings. + +#define _Outptr_result_z_ _SAL2_Source_(_Outptr_result_z_, (), _Out_impl_ _Deref_post_z_) +#define _Outptr_opt_result_z_ _SAL2_Source_(_Outptr_opt_result_z_, (), _Out_opt_impl_ _Deref_post_z_) +#define _Outptr_result_maybenull_z_ _SAL2_Source_(_Outptr_result_maybenull_z_, (), _Out_impl_ _Deref_post_opt_z_) +#define _Outptr_opt_result_maybenull_z_ _SAL2_Source_(_Outptr_opt_result_maybenull_z_, (), _Out_opt_impl_ _Deref_post_opt_z_) + +// Annotations for _Outptr_ parameters where the output pointer is set to NULL if the function fails. + +#define _Outptr_result_nullonfailure_ _SAL2_Source_(_Outptr_result_nullonfailure_, (), _Outptr_ _On_failure_(_Deref_post_null_)) +#define _Outptr_opt_result_nullonfailure_ _SAL2_Source_(_Outptr_opt_result_nullonfailure_, (), _Outptr_opt_ _On_failure_(_Deref_post_null_)) + +// Annotations for _Outptr_ parameters which return a pointer to a ref-counted COM object, +// following the COM convention of setting the output to NULL on failure. +// The current implementation is identical to _Outptr_result_nullonfailure_. +// For pointers to types that are not COM objects, _Outptr_result_nullonfailure_ is preferred. + +#define _COM_Outptr_ _SAL2_Source_(_COM_Outptr_, (), _Outptr_ _On_failure_(_Deref_post_null_)) +#define _COM_Outptr_result_maybenull_ _SAL2_Source_(_COM_Outptr_result_maybenull_, (), _Outptr_result_maybenull_ _On_failure_(_Deref_post_null_)) +#define _COM_Outptr_opt_ _SAL2_Source_(_COM_Outptr_opt_, (), _Outptr_opt_ _On_failure_(_Deref_post_null_)) +#define _COM_Outptr_opt_result_maybenull_ _SAL2_Source_(_COM_Outptr_opt_result_maybenull_, (), _Outptr_opt_result_maybenull_ _On_failure_(_Deref_post_null_)) + +// Annotations for _Outptr_ parameters returning a pointer to buffer with a specified number of elements/bytes + +#define _Outptr_result_buffer_(size) _SAL2_Source_(_Outptr_result_buffer_, (size), _Out_impl_ _Deref_post2_impl_(__notnull_impl_notref, __cap_impl(size))) +#define _Outptr_opt_result_buffer_(size) _SAL2_Source_(_Outptr_opt_result_buffer_, (size), _Out_opt_impl_ _Deref_post2_impl_(__notnull_impl_notref, __cap_impl(size))) +#define _Outptr_result_buffer_to_(size, count) _SAL2_Source_(_Outptr_result_buffer_to_, (size, count), _Out_impl_ _Deref_post3_impl_(__notnull_impl_notref, __cap_impl(size), __count_impl(count))) +#define _Outptr_opt_result_buffer_to_(size, count) _SAL2_Source_(_Outptr_opt_result_buffer_to_, (size, count), _Out_opt_impl_ _Deref_post3_impl_(__notnull_impl_notref, __cap_impl(size), __count_impl(count))) + +#define _Outptr_result_buffer_all_(size) _SAL2_Source_(_Outptr_result_buffer_all_, (size), _Out_impl_ _Deref_post2_impl_(__notnull_impl_notref, __count_impl(size))) +#define _Outptr_opt_result_buffer_all_(size) _SAL2_Source_(_Outptr_opt_result_buffer_all_, (size), _Out_opt_impl_ _Deref_post2_impl_(__notnull_impl_notref, __count_impl(size))) + +#define _Outptr_result_buffer_maybenull_(size) _SAL2_Source_(_Outptr_result_buffer_maybenull_, (size), _Out_impl_ _Deref_post2_impl_(__maybenull_impl_notref, __cap_impl(size))) +#define _Outptr_opt_result_buffer_maybenull_(size) _SAL2_Source_(_Outptr_opt_result_buffer_maybenull_, (size), _Out_opt_impl_ _Deref_post2_impl_(__maybenull_impl_notref, __cap_impl(size))) +#define _Outptr_result_buffer_to_maybenull_(size, count) _SAL2_Source_(_Outptr_result_buffer_to_maybenull_, (size, count), _Out_impl_ _Deref_post3_impl_(__maybenull_impl_notref, __cap_impl(size), __count_impl(count))) +#define _Outptr_opt_result_buffer_to_maybenull_(size, count) _SAL2_Source_(_Outptr_opt_result_buffer_to_maybenull_, (size, count), _Out_opt_impl_ _Deref_post3_impl_(__maybenull_impl_notref, __cap_impl(size), __count_impl(count))) + +#define _Outptr_result_buffer_all_maybenull_(size) _SAL2_Source_(_Outptr_result_buffer_all_maybenull_, (size), _Out_impl_ _Deref_post2_impl_(__maybenull_impl_notref, __count_impl(size))) +#define _Outptr_opt_result_buffer_all_maybenull_(size) _SAL2_Source_(_Outptr_opt_result_buffer_all_maybenull_, (size), _Out_opt_impl_ _Deref_post2_impl_(__maybenull_impl_notref, __count_impl(size))) + +#define _Outptr_result_bytebuffer_(size) _SAL2_Source_(_Outptr_result_bytebuffer_, (size), _Out_impl_ _Deref_post2_impl_(__notnull_impl_notref, __bytecap_impl(size))) +#define _Outptr_opt_result_bytebuffer_(size) _SAL2_Source_(_Outptr_opt_result_bytebuffer_, (size), _Out_opt_impl_ _Deref_post2_impl_(__notnull_impl_notref, __bytecap_impl(size))) +#define _Outptr_result_bytebuffer_to_(size, count) _SAL2_Source_(_Outptr_result_bytebuffer_to_, (size, count), _Out_impl_ _Deref_post3_impl_(__notnull_impl_notref, __bytecap_impl(size), __bytecount_impl(count))) +#define _Outptr_opt_result_bytebuffer_to_(size, count) _SAL2_Source_(_Outptr_opt_result_bytebuffer_to_, (size, count), _Out_opt_impl_ _Deref_post3_impl_(__notnull_impl_notref, __bytecap_impl(size), __bytecount_impl(count))) + +#define _Outptr_result_bytebuffer_all_(size) _SAL2_Source_(_Outptr_result_bytebuffer_all_, (size), _Out_impl_ _Deref_post2_impl_(__notnull_impl_notref, __bytecount_impl(size))) +#define _Outptr_opt_result_bytebuffer_all_(size) _SAL2_Source_(_Outptr_opt_result_bytebuffer_all_, (size), _Out_opt_impl_ _Deref_post2_impl_(__notnull_impl_notref, __bytecount_impl(size))) + +#define _Outptr_result_bytebuffer_maybenull_(size) _SAL2_Source_(_Outptr_result_bytebuffer_maybenull_, (size), _Out_impl_ _Deref_post2_impl_(__maybenull_impl_notref, __bytecap_impl(size))) +#define _Outptr_opt_result_bytebuffer_maybenull_(size) _SAL2_Source_(_Outptr_opt_result_bytebuffer_maybenull_, (size), _Out_opt_impl_ _Deref_post2_impl_(__maybenull_impl_notref, __bytecap_impl(size))) +#define _Outptr_result_bytebuffer_to_maybenull_(size, count) _SAL2_Source_(_Outptr_result_bytebuffer_to_maybenull_, (size, count), _Out_impl_ _Deref_post3_impl_(__maybenull_impl_notref, __bytecap_impl(size), __bytecount_impl(count))) +#define _Outptr_opt_result_bytebuffer_to_maybenull_(size, count) _SAL2_Source_(_Outptr_opt_result_bytebuffer_to_maybenull_, (size, count), _Out_opt_impl_ _Deref_post3_impl_(__maybenull_impl_notref, __bytecap_impl(size), __bytecount_impl(count))) + +#define _Outptr_result_bytebuffer_all_maybenull_(size) _SAL2_Source_(_Outptr_result_bytebuffer_all_maybenull_, (size), _Out_impl_ _Deref_post2_impl_(__maybenull_impl_notref, __bytecount_impl(size))) +#define _Outptr_opt_result_bytebuffer_all_maybenull_(size) _SAL2_Source_(_Outptr_opt_result_bytebuffer_all_maybenull_, (size), _Out_opt_impl_ _Deref_post2_impl_(__maybenull_impl_notref, __bytecount_impl(size))) + +// Annotations for output reference to pointer parameters. + +#define _Outref_ _SAL2_Source_(_Outref_, (), _Out_impl_ _Post_notnull_) +#define _Outref_result_maybenull_ _SAL2_Source_(_Outref_result_maybenull_, (), _Pre2_impl_(__notnull_impl_notref, __cap_c_one_notref_impl) _Post_maybenull_ _Post_valid_impl_) + +#define _Outref_result_buffer_(size) _SAL2_Source_(_Outref_result_buffer_, (size), _Outref_ _Post1_impl_(__cap_impl(size))) +#define _Outref_result_bytebuffer_(size) _SAL2_Source_(_Outref_result_bytebuffer_, (size), _Outref_ _Post1_impl_(__bytecap_impl(size))) +#define _Outref_result_buffer_to_(size, count) _SAL2_Source_(_Outref_result_buffer_to_, (size, count), _Outref_result_buffer_(size) _Post1_impl_(__count_impl(count))) +#define _Outref_result_bytebuffer_to_(size, count) _SAL2_Source_(_Outref_result_bytebuffer_to_, (size, count), _Outref_result_bytebuffer_(size) _Post1_impl_(__bytecount_impl(count))) +#define _Outref_result_buffer_all_(size) _SAL2_Source_(_Outref_result_buffer_all_, (size), _Outref_result_buffer_to_(size, _Old_(size))) +#define _Outref_result_bytebuffer_all_(size) _SAL2_Source_(_Outref_result_bytebuffer_all_, (size), _Outref_result_bytebuffer_to_(size, _Old_(size))) + +#define _Outref_result_buffer_maybenull_(size) _SAL2_Source_(_Outref_result_buffer_maybenull_, (size), _Outref_result_maybenull_ _Post1_impl_(__cap_impl(size))) +#define _Outref_result_bytebuffer_maybenull_(size) _SAL2_Source_(_Outref_result_bytebuffer_maybenull_, (size), _Outref_result_maybenull_ _Post1_impl_(__bytecap_impl(size))) +#define _Outref_result_buffer_to_maybenull_(size, count) _SAL2_Source_(_Outref_result_buffer_to_maybenull_, (size, count), _Outref_result_buffer_maybenull_(size) _Post1_impl_(__count_impl(count))) +#define _Outref_result_bytebuffer_to_maybenull_(size, count) _SAL2_Source_(_Outref_result_bytebuffer_to_maybenull_, (size, count), _Outref_result_bytebuffer_maybenull_(size) _Post1_impl_(__bytecount_impl(count))) +#define _Outref_result_buffer_all_maybenull_(size) _SAL2_Source_(_Outref_result_buffer_all_maybenull_, (size), _Outref_result_buffer_to_maybenull_(size, _Old_(size))) +#define _Outref_result_bytebuffer_all_maybenull_(size) _SAL2_Source_(_Outref_result_bytebuffer_all_maybenull_, (size), _Outref_result_bytebuffer_to_maybenull_(size, _Old_(size))) + +// Annotations for output reference to pointer parameters that guarantee +// that the pointer is set to NULL on failure. +#define _Outref_result_nullonfailure_ _SAL2_Source_(_Outref_result_nullonfailure_, (), _Outref_ _On_failure_(_Post_null_)) + +// Generic annotations to set output value of a by-pointer or by-reference parameter to null/zero on failure. +#define _Result_nullonfailure_ _SAL2_Source_(_Result_nullonfailure_, (), _On_failure_(_Notref_impl_ _Deref_impl_ _Post_null_)) +#define _Result_zeroonfailure_ _SAL2_Source_(_Result_zeroonfailure_, (), _On_failure_(_Notref_impl_ _Deref_impl_ _Out_range_(==, 0))) + + +// return values ------------------------------- + +// +// _Ret_ annotations +// +// describing conditions that hold for return values after the call + +// e.g. _Ret_z_ CString::operator const WCHAR*() const throw(); +#define _Ret_z_ _SAL2_Source_(_Ret_z_, (), _Ret2_impl_(__notnull_impl, __zterm_impl) _Ret_valid_impl_) +#define _Ret_maybenull_z_ _SAL2_Source_(_Ret_maybenull_z_, (), _Ret2_impl_(__maybenull_impl,__zterm_impl) _Ret_valid_impl_) + +// used with allocated but not yet initialized objects +#define _Ret_notnull_ _SAL2_Source_(_Ret_notnull_, (), _Ret1_impl_(__notnull_impl)) +#define _Ret_maybenull_ _SAL2_Source_(_Ret_maybenull_, (), _Ret1_impl_(__maybenull_impl)) +#define _Ret_null_ _SAL2_Source_(_Ret_null_, (), _Ret1_impl_(__null_impl)) + +// used with allocated and initialized objects +// returns single valid object +#define _Ret_valid_ _SAL2_Source_(_Ret_valid_, (), _Ret1_impl_(__notnull_impl_notref) _Ret_valid_impl_) + +// returns pointer to initialized buffer of specified size +#define _Ret_writes_(size) _SAL2_Source_(_Ret_writes_, (size), _Ret2_impl_(__notnull_impl, __count_impl(size)) _Ret_valid_impl_) +#define _Ret_writes_z_(size) _SAL2_Source_(_Ret_writes_z_, (size), _Ret3_impl_(__notnull_impl, __count_impl(size), __zterm_impl) _Ret_valid_impl_) +#define _Ret_writes_bytes_(size) _SAL2_Source_(_Ret_writes_bytes_, (size), _Ret2_impl_(__notnull_impl, __bytecount_impl(size)) _Ret_valid_impl_) +#define _Ret_writes_maybenull_(size) _SAL2_Source_(_Ret_writes_maybenull_, (size), _Ret2_impl_(__maybenull_impl,__count_impl(size)) _Ret_valid_impl_) +#define _Ret_writes_maybenull_z_(size) _SAL2_Source_(_Ret_writes_maybenull_z_, (size), _Ret3_impl_(__maybenull_impl,__count_impl(size),__zterm_impl) _Ret_valid_impl_) +#define _Ret_writes_bytes_maybenull_(size) _SAL2_Source_(_Ret_writes_bytes_maybenull_, (size), _Ret2_impl_(__maybenull_impl,__bytecount_impl(size)) _Ret_valid_impl_) + +// returns pointer to partially initialized buffer, with total size 'size' and initialized size 'count' +#define _Ret_writes_to_(size,count) _SAL2_Source_(_Ret_writes_to_, (size,count), _Ret3_impl_(__notnull_impl, __cap_impl(size), __count_impl(count)) _Ret_valid_impl_) +#define _Ret_writes_bytes_to_(size,count) _SAL2_Source_(_Ret_writes_bytes_to_, (size,count), _Ret3_impl_(__notnull_impl, __bytecap_impl(size), __bytecount_impl(count)) _Ret_valid_impl_) +#define _Ret_writes_to_maybenull_(size,count) _SAL2_Source_(_Ret_writes_to_maybenull_, (size,count), _Ret3_impl_(__maybenull_impl, __cap_impl(size), __count_impl(count)) _Ret_valid_impl_) +#define _Ret_writes_bytes_to_maybenull_(size,count) _SAL2_Source_(_Ret_writes_bytes_to_maybenull_, (size,count), _Ret3_impl_(__maybenull_impl, __bytecap_impl(size), __bytecount_impl(count)) _Ret_valid_impl_) + + +// Annotations for strict type checking +#define _Points_to_data_ _SAL2_Source_(_Points_to_data_, (), _Pre_ _Points_to_data_impl_) +#define _Literal_ _SAL2_Source_(_Literal_, (), _Pre_ _Literal_impl_) +#define _Notliteral_ _SAL2_Source_(_Notliteral_, (), _Pre_ _Notliteral_impl_) + +// Check the return value of a function e.g. _Check_return_ ErrorCode Foo(); +#define _Check_return_ _SAL2_Source_(_Check_return_, (), _Check_return_impl_) +#define _Must_inspect_result_ _SAL2_Source_(_Must_inspect_result_, (), _Must_inspect_impl_ _Check_return_impl_) + +// e.g. MyPrintF( _Printf_format_string_ const WCHAR* wzFormat, ... ); +#define _Printf_format_string_ _SAL2_Source_(_Printf_format_string_, (), _Printf_format_string_impl_) +#define _Scanf_format_string_ _SAL2_Source_(_Scanf_format_string_, (), _Scanf_format_string_impl_) +#define _Scanf_s_format_string_ _SAL2_Source_(_Scanf_s_format_string_, (), _Scanf_s_format_string_impl_) + +#define _Format_string_impl_(kind,where) _SA_annotes2(SAL_IsFormatString2, kind, where) +#define _Printf_format_string_params_(x) _SAL2_Source_(_Printf_format_string_params_, (x), _Format_string_impl_("printf", x)) +#define _Scanf_format_string_params_(x) _SAL2_Source_(_Scanf_format_string_params_, (x), _Format_string_impl_("scanf", x)) +#define _Scanf_s_format_string_params_(x) _SAL2_Source_(_Scanf_s_format_string_params_, (x), _Format_string_impl_("scanf_s", x)) + +// annotations to express value of integral or pointer parameter +#define _In_range_(lb,ub) _SAL2_Source_(_In_range_, (lb,ub), _In_range_impl_(lb,ub)) +#define _Out_range_(lb,ub) _SAL2_Source_(_Out_range_, (lb,ub), _Out_range_impl_(lb,ub)) +#define _Ret_range_(lb,ub) _SAL2_Source_(_Ret_range_, (lb,ub), _Ret_range_impl_(lb,ub)) +#define _Deref_in_range_(lb,ub) _SAL2_Source_(_Deref_in_range_, (lb,ub), _Deref_in_range_impl_(lb,ub)) +#define _Deref_out_range_(lb,ub) _SAL2_Source_(_Deref_out_range_, (lb,ub), _Deref_out_range_impl_(lb,ub)) +#define _Deref_ret_range_(lb,ub) _SAL2_Source_(_Deref_ret_range_, (lb,ub), _Deref_ret_range_impl_(lb,ub)) +#define _Pre_equal_to_(expr) _SAL2_Source_(_Pre_equal_to_, (expr), _In_range_(==, expr)) +#define _Post_equal_to_(expr) _SAL2_Source_(_Post_equal_to_, (expr), _Out_range_(==, expr)) + +// annotation to express that a value (usually a field of a mutable class) +// is not changed by a function call +#define _Unchanged_(e) _SAL2_Source_(_Unchanged_, (e), _At_(e, _Post_equal_to_(_Old_(e)) _Const_)) + +// Annotations to allow expressing generalized pre and post conditions. +// 'cond' may be any valid SAL expression that is considered to be true as a precondition +// or postcondition (respsectively). +#define _Pre_satisfies_(cond) _SAL2_Source_(_Pre_satisfies_, (cond), _Pre_satisfies_impl_(cond)) +#define _Post_satisfies_(cond) _SAL2_Source_(_Post_satisfies_, (cond), _Post_satisfies_impl_(cond)) + +// Annotations to express struct, class and field invariants +#define _Struct_size_bytes_(size) _SAL2_Source_(_Struct_size_bytes_, (size), _Writable_bytes_(size)) + +#define _Field_size_(size) _SAL2_Source_(_Field_size_, (size), _Notnull_ _Writable_elements_(size)) +#define _Field_size_opt_(size) _SAL2_Source_(_Field_size_opt_, (size), _Maybenull_ _Writable_elements_(size)) +#define _Field_size_part_(size, count) _SAL2_Source_(_Field_size_part_, (size, count), _Notnull_ _Writable_elements_(size) _Readable_elements_(count)) +#define _Field_size_part_opt_(size, count) _SAL2_Source_(_Field_size_part_opt_, (size, count), _Maybenull_ _Writable_elements_(size) _Readable_elements_(count)) +#define _Field_size_full_(size) _SAL2_Source_(_Field_size_full_, (size), _Field_size_part_(size, size)) +#define _Field_size_full_opt_(size) _SAL2_Source_(_Field_size_full_opt_, (size), _Field_size_part_opt_(size, size)) + +#define _Field_size_bytes_(size) _SAL2_Source_(_Field_size_bytes_, (size), _Notnull_ _Writable_bytes_(size)) +#define _Field_size_bytes_opt_(size) _SAL2_Source_(_Field_size_bytes_opt_, (size), _Maybenull_ _Writable_bytes_(size)) +#define _Field_size_bytes_part_(size, count) _SAL2_Source_(_Field_size_bytes_part_, (size, count), _Notnull_ _Writable_bytes_(size) _Readable_bytes_(count)) +#define _Field_size_bytes_part_opt_(size, count) _SAL2_Source_(_Field_size_bytes_part_opt_, (size, count), _Maybenull_ _Writable_bytes_(size) _Readable_bytes_(count)) +#define _Field_size_bytes_full_(size) _SAL2_Source_(_Field_size_bytes_full_, (size), _Field_size_bytes_part_(size, size)) +#define _Field_size_bytes_full_opt_(size) _SAL2_Source_(_Field_size_bytes_full_opt_, (size), _Field_size_bytes_part_opt_(size, size)) + +#define _Field_z_ _SAL2_Source_(_Field_z_, (), _Null_terminated_) + +#define _Field_range_(min,max) _SAL2_Source_(_Field_range_, (min,max), _Field_range_impl_(min,max)) + +//============================================================================ +// _Pre_\_Post_ Layer: +//============================================================================ + +// +// Raw Pre/Post for declaring custom pre/post conditions +// + +#define _Pre_ _Pre_impl_ +#define _Post_ _Post_impl_ + +// +// Validity property +// + +#define _Valid_ _Valid_impl_ +#define _Notvalid_ _Notvalid_impl_ +#define _Maybevalid_ _Maybevalid_impl_ + +// +// Buffer size properties +// + +// Expressing buffer sizes without specifying pre or post condition +#define _Readable_bytes_(size) _SAL2_Source_(_Readable_bytes_, (size), _Readable_bytes_impl_(size)) +#define _Readable_elements_(size) _SAL2_Source_(_Readable_elements_, (size), _Readable_elements_impl_(size)) +#define _Writable_bytes_(size) _SAL2_Source_(_Writable_bytes_, (size), _Writable_bytes_impl_(size)) +#define _Writable_elements_(size) _SAL2_Source_(_Writable_elements_, (size), _Writable_elements_impl_(size)) + +#define _Null_terminated_ _SAL2_Source_(_Null_terminated_, (), _Null_terminated_impl_) +#define _NullNull_terminated_ _SAL2_Source_(_NullNull_terminated_, (), _NullNull_terminated_impl_) + +// Expressing buffer size as pre or post condition +#define _Pre_readable_size_(size) _SAL2_Source_(_Pre_readable_size_, (size), _Pre1_impl_(__count_impl(size)) _Pre_valid_impl_) +#define _Pre_writable_size_(size) _SAL2_Source_(_Pre_writable_size_, (size), _Pre1_impl_(__cap_impl(size))) +#define _Pre_readable_byte_size_(size) _SAL2_Source_(_Pre_readable_byte_size_, (size), _Pre1_impl_(__bytecount_impl(size)) _Pre_valid_impl_) +#define _Pre_writable_byte_size_(size) _SAL2_Source_(_Pre_writable_byte_size_, (size), _Pre1_impl_(__bytecap_impl(size))) + +#define _Post_readable_size_(size) _SAL2_Source_(_Post_readable_size_, (size), _Post1_impl_(__count_impl(size)) _Post_valid_impl_) +#define _Post_writable_size_(size) _SAL2_Source_(_Post_writable_size_, (size), _Post1_impl_(__cap_impl(size))) +#define _Post_readable_byte_size_(size) _SAL2_Source_(_Post_readable_byte_size_, (size), _Post1_impl_(__bytecount_impl(size)) _Post_valid_impl_) +#define _Post_writable_byte_size_(size) _SAL2_Source_(_Post_writable_byte_size_, (size), _Post1_impl_(__bytecap_impl(size))) + +// +// Pointer null-ness properties +// +#define _Null_ _Null_impl_ +#define _Notnull_ _Notnull_impl_ +#define _Maybenull_ _Maybenull_impl_ + +// +// _Pre_ annotations --- +// +// describing conditions that must be met before the call of the function + +// e.g. int strlen( _Pre_z_ const char* sz ); +// buffer is a zero terminated string +#define _Pre_z_ _SAL2_Source_(_Pre_z_, (), _Pre1_impl_(__zterm_impl) _Pre_valid_impl_) + +// valid size unknown or indicated by type (e.g.:LPSTR) +#define _Pre_valid_ _SAL2_Source_(_Pre_valid_, (), _Pre1_impl_(__notnull_impl_notref) _Pre_valid_impl_) +#define _Pre_opt_valid_ _SAL2_Source_(_Pre_opt_valid_, (), _Pre1_impl_(__maybenull_impl_notref) _Pre_valid_impl_) + +#define _Pre_invalid_ _SAL2_Source_(_Pre_invalid_, (), _Deref_pre1_impl_(__notvalid_impl)) + +// Overrides recursive valid when some field is not yet initialized when using _Inout_ +#define _Pre_unknown_ _SAL2_Source_(_Pre_unknown_, (), _Pre1_impl_(__maybevalid_impl)) + +// used with allocated but not yet initialized objects +#define _Pre_notnull_ _SAL2_Source_(_Pre_notnull_, (), _Pre1_impl_(__notnull_impl_notref)) +#define _Pre_maybenull_ _SAL2_Source_(_Pre_maybenull_, (), _Pre1_impl_(__maybenull_impl_notref)) +#define _Pre_null_ _SAL2_Source_(_Pre_null_, (), _Pre1_impl_(__null_impl_notref)) + +// +// _Post_ annotations --- +// +// describing conditions that hold after the function call + +// void CopyStr( _In_z_ const char* szFrom, _Pre_cap_(cch) _Post_z_ char* szFrom, size_t cchFrom ); +// buffer will be a zero-terminated string after the call +#define _Post_z_ _SAL2_Source_(_Post_z_, (), _Post1_impl_(__zterm_impl) _Post_valid_impl_) + +// e.g. HRESULT InitStruct( _Post_valid_ Struct* pobj ); +#define _Post_valid_ _SAL2_Source_(_Post_valid_, (), _Post_valid_impl_) +#define _Post_invalid_ _SAL2_Source_(_Post_invalid_, (), _Deref_post1_impl_(__notvalid_impl)) + +// e.g. void free( _Post_ptr_invalid_ void* pv ); +#define _Post_ptr_invalid_ _SAL2_Source_(_Post_ptr_invalid_, (), _Post1_impl_(__notvalid_impl)) + +// e.g. void ThrowExceptionIfNull( _Post_notnull_ const void* pv ); +#define _Post_notnull_ _SAL2_Source_(_Post_notnull_, (), _Post1_impl_(__notnull_impl)) + +// e.g. HRESULT GetObject(_Outptr_ _On_failure_(_At_(*p, _Post_null_)) T **p); +#define _Post_null_ _SAL2_Source_(_Post_null_, (), _Post1_impl_(__null_impl)) + +#define _Post_maybenull_ _SAL2_Source_(_Post_maybenull_, (), _Post1_impl_(__maybenull_impl)) + +#define _Prepost_z_ _SAL2_Source_(_Prepost_z_, (), _Pre_z_ _Post_z_) + + +// #pragma region Input Buffer SAL 1 compatibility macros + +/*========================================================================== + + This section contains definitions for macros defined for VS2010 and earlier. + Usage of these macros is still supported, but the SAL 2 macros defined above + are recommended instead. This comment block is retained to assist in + understanding SAL that still uses the older syntax. + + The macros are defined in 3 layers: + + _In_\_Out_ Layer: + ---------------- + This layer provides the highest abstraction and its macros should be used + in most cases. Its macros start with _In_, _Out_ or _Inout_. For the + typical case they provide the most concise annotations. + + _Pre_\_Post_ Layer: + ------------------ + The macros of this layer only should be used when there is no suitable macro + in the _In_\_Out_ layer. Its macros start with _Pre_, _Post_, _Ret_, + _Deref_pre_ _Deref_post_ and _Deref_ret_. This layer provides the most + flexibility for annotations. + + Implementation Abstraction Layer: + -------------------------------- + Macros from this layer should never be used directly. The layer only exists + to hide the implementation of the annotation macros. + + + Annotation Syntax: + |--------------|----------|----------------|-----------------------------| + | Usage | Nullness | ZeroTerminated | Extent | + |--------------|----------|----------------|-----------------------------| + | _In_ | <> | <> | <> | + | _Out_ | opt_ | z_ | [byte]cap_[c_|x_]( size ) | + | _Inout_ | | | [byte]count_[c_|x_]( size ) | + | _Deref_out_ | | | ptrdiff_cap_( ptr ) | + |--------------| | | ptrdiff_count_( ptr ) | + | _Ret_ | | | | + | _Deref_ret_ | | | | + |--------------| | | | + | _Pre_ | | | | + | _Post_ | | | | + | _Deref_pre_ | | | | + | _Deref_post_ | | | | + |--------------|----------|----------------|-----------------------------| + + Usage: + ----- + _In_, _Out_, _Inout_, _Pre_, _Post_, _Deref_pre_, _Deref_post_ are for + formal parameters. + _Ret_, _Deref_ret_ must be used for return values. + + Nullness: + -------- + If the pointer can be NULL the annotation contains _opt. If the macro + does not contain '_opt' the pointer may not be NULL. + + String Type: + ----------- + _z: NullTerminated string + for _In_ parameters the buffer must have the specified stringtype before the call + for _Out_ parameters the buffer must have the specified stringtype after the call + for _Inout_ parameters both conditions apply + + Extent Syntax: + |------|---------------|---------------| + | Unit | Writ\Readable | Argument Type | + |------|---------------|---------------| + | <> | cap_ | <> | + | byte | count_ | c_ | + | | | x_ | + |------|---------------|---------------| + + 'cap' (capacity) describes the writable size of the buffer and is typically used + with _Out_. The default unit is elements. Use 'bytecap' if the size is given in bytes + 'count' describes the readable size of the buffer and is typically used with _In_. + The default unit is elements. Use 'bytecount' if the size is given in bytes. + + Argument syntax for cap_, bytecap_, count_, bytecount_: + (|return)[+n] e.g. cch, return, cb+2 + + If the buffer size is a constant expression use the c_ postfix. + E.g. cap_c_(20), count_c_(MAX_PATH), bytecount_c_(16) + + If the buffer size is given by a limiting pointer use the ptrdiff_ versions + of the macros. + + If the buffer size is neither a parameter nor a constant expression use the x_ + postfix. e.g. bytecount_x_(num*size) x_ annotations accept any arbitrary string. + No analysis can be done for x_ annotations but they at least tell the tool that + the buffer has some sort of extent description. x_ annotations might be supported + by future compiler versions. + +============================================================================*/ + +// e.g. void SetCharRange( _In_count_(cch) const char* rgch, size_t cch ) +// valid buffer extent described by another parameter +#define _In_count_(size) _SAL1_1_Source_(_In_count_, (size), _Pre_count_(size) _Deref_pre_readonly_) +#define _In_opt_count_(size) _SAL1_1_Source_(_In_opt_count_, (size), _Pre_opt_count_(size) _Deref_pre_readonly_) +#define _In_bytecount_(size) _SAL1_1_Source_(_In_bytecount_, (size), _Pre_bytecount_(size) _Deref_pre_readonly_) +#define _In_opt_bytecount_(size) _SAL1_1_Source_(_In_opt_bytecount_, (size), _Pre_opt_bytecount_(size) _Deref_pre_readonly_) + +// valid buffer extent described by a constant extression +#define _In_count_c_(size) _SAL1_1_Source_(_In_count_c_, (size), _Pre_count_c_(size) _Deref_pre_readonly_) +#define _In_opt_count_c_(size) _SAL1_1_Source_(_In_opt_count_c_, (size), _Pre_opt_count_c_(size) _Deref_pre_readonly_) +#define _In_bytecount_c_(size) _SAL1_1_Source_(_In_bytecount_c_, (size), _Pre_bytecount_c_(size) _Deref_pre_readonly_) +#define _In_opt_bytecount_c_(size) _SAL1_1_Source_(_In_opt_bytecount_c_, (size), _Pre_opt_bytecount_c_(size) _Deref_pre_readonly_) + +// nullterminated 'input' buffers with given size + +// e.g. void SetCharRange( _In_count_(cch) const char* rgch, size_t cch ) +// nullterminated valid buffer extent described by another parameter +#define _In_z_count_(size) _SAL1_1_Source_(_In_z_count_, (size), _Pre_z_ _Pre_count_(size) _Deref_pre_readonly_) +#define _In_opt_z_count_(size) _SAL1_1_Source_(_In_opt_z_count_, (size), _Pre_opt_z_ _Pre_opt_count_(size) _Deref_pre_readonly_) +#define _In_z_bytecount_(size) _SAL1_1_Source_(_In_z_bytecount_, (size), _Pre_z_ _Pre_bytecount_(size) _Deref_pre_readonly_) +#define _In_opt_z_bytecount_(size) _SAL1_1_Source_(_In_opt_z_bytecount_, (size), _Pre_opt_z_ _Pre_opt_bytecount_(size) _Deref_pre_readonly_) + +// nullterminated valid buffer extent described by a constant extression +#define _In_z_count_c_(size) _SAL1_1_Source_(_In_z_count_c_, (size), _Pre_z_ _Pre_count_c_(size) _Deref_pre_readonly_) +#define _In_opt_z_count_c_(size) _SAL1_1_Source_(_In_opt_z_count_c_, (size), _Pre_opt_z_ _Pre_opt_count_c_(size) _Deref_pre_readonly_) +#define _In_z_bytecount_c_(size) _SAL1_1_Source_(_In_z_bytecount_c_, (size), _Pre_z_ _Pre_bytecount_c_(size) _Deref_pre_readonly_) +#define _In_opt_z_bytecount_c_(size) _SAL1_1_Source_(_In_opt_z_bytecount_c_, (size), _Pre_opt_z_ _Pre_opt_bytecount_c_(size) _Deref_pre_readonly_) + +// buffer capacity is described by another pointer +// e.g. void Foo( _In_ptrdiff_count_(pchMax) const char* pch, const char* pchMax ) { while pch < pchMax ) pch++; } +#define _In_ptrdiff_count_(size) _SAL1_1_Source_(_In_ptrdiff_count_, (size), _Pre_ptrdiff_count_(size) _Deref_pre_readonly_) +#define _In_opt_ptrdiff_count_(size) _SAL1_1_Source_(_In_opt_ptrdiff_count_, (size), _Pre_opt_ptrdiff_count_(size) _Deref_pre_readonly_) + +// 'x' version for complex expressions that are not supported by the current compiler version +// e.g. void Set3ColMatrix( _In_count_x_(3*cRows) const Elem* matrix, int cRows ); +#define _In_count_x_(size) _SAL1_1_Source_(_In_count_x_, (size), _Pre_count_x_(size) _Deref_pre_readonly_) +#define _In_opt_count_x_(size) _SAL1_1_Source_(_In_opt_count_x_, (size), _Pre_opt_count_x_(size) _Deref_pre_readonly_) +#define _In_bytecount_x_(size) _SAL1_1_Source_(_In_bytecount_x_, (size), _Pre_bytecount_x_(size) _Deref_pre_readonly_) +#define _In_opt_bytecount_x_(size) _SAL1_1_Source_(_In_opt_bytecount_x_, (size), _Pre_opt_bytecount_x_(size) _Deref_pre_readonly_) + + +// 'out' with buffer size +// e.g. void GetIndices( _Out_cap_(cIndices) int* rgIndices, size_t cIndices ); +// buffer capacity is described by another parameter +#define _Out_cap_(size) _SAL1_1_Source_(_Out_cap_, (size), _Pre_cap_(size) _Post_valid_impl_) +#define _Out_opt_cap_(size) _SAL1_1_Source_(_Out_opt_cap_, (size), _Pre_opt_cap_(size) _Post_valid_impl_) +#define _Out_bytecap_(size) _SAL1_1_Source_(_Out_bytecap_, (size), _Pre_bytecap_(size) _Post_valid_impl_) +#define _Out_opt_bytecap_(size) _SAL1_1_Source_(_Out_opt_bytecap_, (size), _Pre_opt_bytecap_(size) _Post_valid_impl_) + +// buffer capacity is described by a constant expression +#define _Out_cap_c_(size) _SAL1_1_Source_(_Out_cap_c_, (size), _Pre_cap_c_(size) _Post_valid_impl_) +#define _Out_opt_cap_c_(size) _SAL1_1_Source_(_Out_opt_cap_c_, (size), _Pre_opt_cap_c_(size) _Post_valid_impl_) +#define _Out_bytecap_c_(size) _SAL1_1_Source_(_Out_bytecap_c_, (size), _Pre_bytecap_c_(size) _Post_valid_impl_) +#define _Out_opt_bytecap_c_(size) _SAL1_1_Source_(_Out_opt_bytecap_c_, (size), _Pre_opt_bytecap_c_(size) _Post_valid_impl_) + +// buffer capacity is described by another parameter multiplied by a constant expression +#define _Out_cap_m_(mult,size) _SAL1_1_Source_(_Out_cap_m_, (mult,size), _Pre_cap_m_(mult,size) _Post_valid_impl_) +#define _Out_opt_cap_m_(mult,size) _SAL1_1_Source_(_Out_opt_cap_m_, (mult,size), _Pre_opt_cap_m_(mult,size) _Post_valid_impl_) +#define _Out_z_cap_m_(mult,size) _SAL1_1_Source_(_Out_z_cap_m_, (mult,size), _Pre_cap_m_(mult,size) _Post_valid_impl_ _Post_z_) +#define _Out_opt_z_cap_m_(mult,size) _SAL1_1_Source_(_Out_opt_z_cap_m_, (mult,size), _Pre_opt_cap_m_(mult,size) _Post_valid_impl_ _Post_z_) + +// buffer capacity is described by another pointer +// e.g. void Foo( _Out_ptrdiff_cap_(pchMax) char* pch, const char* pchMax ) { while pch < pchMax ) pch++; } +#define _Out_ptrdiff_cap_(size) _SAL1_1_Source_(_Out_ptrdiff_cap_, (size), _Pre_ptrdiff_cap_(size) _Post_valid_impl_) +#define _Out_opt_ptrdiff_cap_(size) _SAL1_1_Source_(_Out_opt_ptrdiff_cap_, (size), _Pre_opt_ptrdiff_cap_(size) _Post_valid_impl_) + +// buffer capacity is described by a complex expression +#define _Out_cap_x_(size) _SAL1_1_Source_(_Out_cap_x_, (size), _Pre_cap_x_(size) _Post_valid_impl_) +#define _Out_opt_cap_x_(size) _SAL1_1_Source_(_Out_opt_cap_x_, (size), _Pre_opt_cap_x_(size) _Post_valid_impl_) +#define _Out_bytecap_x_(size) _SAL1_1_Source_(_Out_bytecap_x_, (size), _Pre_bytecap_x_(size) _Post_valid_impl_) +#define _Out_opt_bytecap_x_(size) _SAL1_1_Source_(_Out_opt_bytecap_x_, (size), _Pre_opt_bytecap_x_(size) _Post_valid_impl_) + +// a zero terminated string is filled into a buffer of given capacity +// e.g. void CopyStr( _In_z_ const char* szFrom, _Out_z_cap_(cchTo) char* szTo, size_t cchTo ); +// buffer capacity is described by another parameter +#define _Out_z_cap_(size) _SAL1_1_Source_(_Out_z_cap_, (size), _Pre_cap_(size) _Post_valid_impl_ _Post_z_) +#define _Out_opt_z_cap_(size) _SAL1_1_Source_(_Out_opt_z_cap_, (size), _Pre_opt_cap_(size) _Post_valid_impl_ _Post_z_) +#define _Out_z_bytecap_(size) _SAL1_1_Source_(_Out_z_bytecap_, (size), _Pre_bytecap_(size) _Post_valid_impl_ _Post_z_) +#define _Out_opt_z_bytecap_(size) _SAL1_1_Source_(_Out_opt_z_bytecap_, (size), _Pre_opt_bytecap_(size) _Post_valid_impl_ _Post_z_) + +// buffer capacity is described by a constant expression +#define _Out_z_cap_c_(size) _SAL1_1_Source_(_Out_z_cap_c_, (size), _Pre_cap_c_(size) _Post_valid_impl_ _Post_z_) +#define _Out_opt_z_cap_c_(size) _SAL1_1_Source_(_Out_opt_z_cap_c_, (size), _Pre_opt_cap_c_(size) _Post_valid_impl_ _Post_z_) +#define _Out_z_bytecap_c_(size) _SAL1_1_Source_(_Out_z_bytecap_c_, (size), _Pre_bytecap_c_(size) _Post_valid_impl_ _Post_z_) +#define _Out_opt_z_bytecap_c_(size) _SAL1_1_Source_(_Out_opt_z_bytecap_c_, (size), _Pre_opt_bytecap_c_(size) _Post_valid_impl_ _Post_z_) + +// buffer capacity is described by a complex expression +#define _Out_z_cap_x_(size) _SAL1_1_Source_(_Out_z_cap_x_, (size), _Pre_cap_x_(size) _Post_valid_impl_ _Post_z_) +#define _Out_opt_z_cap_x_(size) _SAL1_1_Source_(_Out_opt_z_cap_x_, (size), _Pre_opt_cap_x_(size) _Post_valid_impl_ _Post_z_) +#define _Out_z_bytecap_x_(size) _SAL1_1_Source_(_Out_z_bytecap_x_, (size), _Pre_bytecap_x_(size) _Post_valid_impl_ _Post_z_) +#define _Out_opt_z_bytecap_x_(size) _SAL1_1_Source_(_Out_opt_z_bytecap_x_, (size), _Pre_opt_bytecap_x_(size) _Post_valid_impl_ _Post_z_) + +// a zero terminated string is filled into a buffer of given capacity +// e.g. size_t CopyCharRange( _In_count_(cchFrom) const char* rgchFrom, size_t cchFrom, _Out_cap_post_count_(cchTo,return)) char* rgchTo, size_t cchTo ); +#define _Out_cap_post_count_(cap,count) _SAL1_1_Source_(_Out_cap_post_count_, (cap,count), _Pre_cap_(cap) _Post_valid_impl_ _Post_count_(count)) +#define _Out_opt_cap_post_count_(cap,count) _SAL1_1_Source_(_Out_opt_cap_post_count_, (cap,count), _Pre_opt_cap_(cap) _Post_valid_impl_ _Post_count_(count)) +#define _Out_bytecap_post_bytecount_(cap,count) _SAL1_1_Source_(_Out_bytecap_post_bytecount_, (cap,count), _Pre_bytecap_(cap) _Post_valid_impl_ _Post_bytecount_(count)) +#define _Out_opt_bytecap_post_bytecount_(cap,count) _SAL1_1_Source_(_Out_opt_bytecap_post_bytecount_, (cap,count), _Pre_opt_bytecap_(cap) _Post_valid_impl_ _Post_bytecount_(count)) + +// a zero terminated string is filled into a buffer of given capacity +// e.g. size_t CopyStr( _In_z_ const char* szFrom, _Out_z_cap_post_count_(cchTo,return+1) char* szTo, size_t cchTo ); +#define _Out_z_cap_post_count_(cap,count) _SAL1_1_Source_(_Out_z_cap_post_count_, (cap,count), _Pre_cap_(cap) _Post_valid_impl_ _Post_z_count_(count)) +#define _Out_opt_z_cap_post_count_(cap,count) _SAL1_1_Source_(_Out_opt_z_cap_post_count_, (cap,count), _Pre_opt_cap_(cap) _Post_valid_impl_ _Post_z_count_(count)) +#define _Out_z_bytecap_post_bytecount_(cap,count) _SAL1_1_Source_(_Out_z_bytecap_post_bytecount_, (cap,count), _Pre_bytecap_(cap) _Post_valid_impl_ _Post_z_bytecount_(count)) +#define _Out_opt_z_bytecap_post_bytecount_(cap,count) _SAL1_1_Source_(_Out_opt_z_bytecap_post_bytecount_, (cap,count), _Pre_opt_bytecap_(cap) _Post_valid_impl_ _Post_z_bytecount_(count)) + +// only use with dereferenced arguments e.g. '*pcch' +#define _Out_capcount_(capcount) _SAL1_1_Source_(_Out_capcount_, (capcount), _Pre_cap_(capcount) _Post_valid_impl_ _Post_count_(capcount)) +#define _Out_opt_capcount_(capcount) _SAL1_1_Source_(_Out_opt_capcount_, (capcount), _Pre_opt_cap_(capcount) _Post_valid_impl_ _Post_count_(capcount)) +#define _Out_bytecapcount_(capcount) _SAL1_1_Source_(_Out_bytecapcount_, (capcount), _Pre_bytecap_(capcount) _Post_valid_impl_ _Post_bytecount_(capcount)) +#define _Out_opt_bytecapcount_(capcount) _SAL1_1_Source_(_Out_opt_bytecapcount_, (capcount), _Pre_opt_bytecap_(capcount) _Post_valid_impl_ _Post_bytecount_(capcount)) + +#define _Out_capcount_x_(capcount) _SAL1_1_Source_(_Out_capcount_x_, (capcount), _Pre_cap_x_(capcount) _Post_valid_impl_ _Post_count_x_(capcount)) +#define _Out_opt_capcount_x_(capcount) _SAL1_1_Source_(_Out_opt_capcount_x_, (capcount), _Pre_opt_cap_x_(capcount) _Post_valid_impl_ _Post_count_x_(capcount)) +#define _Out_bytecapcount_x_(capcount) _SAL1_1_Source_(_Out_bytecapcount_x_, (capcount), _Pre_bytecap_x_(capcount) _Post_valid_impl_ _Post_bytecount_x_(capcount)) +#define _Out_opt_bytecapcount_x_(capcount) _SAL1_1_Source_(_Out_opt_bytecapcount_x_, (capcount), _Pre_opt_bytecap_x_(capcount) _Post_valid_impl_ _Post_bytecount_x_(capcount)) + +// e.g. GetString( _Out_z_capcount_(*pLen+1) char* sz, size_t* pLen ); +#define _Out_z_capcount_(capcount) _SAL1_1_Source_(_Out_z_capcount_, (capcount), _Pre_cap_(capcount) _Post_valid_impl_ _Post_z_count_(capcount)) +#define _Out_opt_z_capcount_(capcount) _SAL1_1_Source_(_Out_opt_z_capcount_, (capcount), _Pre_opt_cap_(capcount) _Post_valid_impl_ _Post_z_count_(capcount)) +#define _Out_z_bytecapcount_(capcount) _SAL1_1_Source_(_Out_z_bytecapcount_, (capcount), _Pre_bytecap_(capcount) _Post_valid_impl_ _Post_z_bytecount_(capcount)) +#define _Out_opt_z_bytecapcount_(capcount) _SAL1_1_Source_(_Out_opt_z_bytecapcount_, (capcount), _Pre_opt_bytecap_(capcount) _Post_valid_impl_ _Post_z_bytecount_(capcount)) + + +// 'inout' buffers with initialized elements before and after the call +// e.g. void ModifyIndices( _Inout_count_(cIndices) int* rgIndices, size_t cIndices ); +#define _Inout_count_(size) _SAL1_1_Source_(_Inout_count_, (size), _Prepost_count_(size)) +#define _Inout_opt_count_(size) _SAL1_1_Source_(_Inout_opt_count_, (size), _Prepost_opt_count_(size)) +#define _Inout_bytecount_(size) _SAL1_1_Source_(_Inout_bytecount_, (size), _Prepost_bytecount_(size)) +#define _Inout_opt_bytecount_(size) _SAL1_1_Source_(_Inout_opt_bytecount_, (size), _Prepost_opt_bytecount_(size)) + +#define _Inout_count_c_(size) _SAL1_1_Source_(_Inout_count_c_, (size), _Prepost_count_c_(size)) +#define _Inout_opt_count_c_(size) _SAL1_1_Source_(_Inout_opt_count_c_, (size), _Prepost_opt_count_c_(size)) +#define _Inout_bytecount_c_(size) _SAL1_1_Source_(_Inout_bytecount_c_, (size), _Prepost_bytecount_c_(size)) +#define _Inout_opt_bytecount_c_(size) _SAL1_1_Source_(_Inout_opt_bytecount_c_, (size), _Prepost_opt_bytecount_c_(size)) + +// nullterminated 'inout' buffers with initialized elements before and after the call +// e.g. void ModifyIndices( _Inout_count_(cIndices) int* rgIndices, size_t cIndices ); +#define _Inout_z_count_(size) _SAL1_1_Source_(_Inout_z_count_, (size), _Prepost_z_ _Prepost_count_(size)) +#define _Inout_opt_z_count_(size) _SAL1_1_Source_(_Inout_opt_z_count_, (size), _Prepost_z_ _Prepost_opt_count_(size)) +#define _Inout_z_bytecount_(size) _SAL1_1_Source_(_Inout_z_bytecount_, (size), _Prepost_z_ _Prepost_bytecount_(size)) +#define _Inout_opt_z_bytecount_(size) _SAL1_1_Source_(_Inout_opt_z_bytecount_, (size), _Prepost_z_ _Prepost_opt_bytecount_(size)) + +#define _Inout_z_count_c_(size) _SAL1_1_Source_(_Inout_z_count_c_, (size), _Prepost_z_ _Prepost_count_c_(size)) +#define _Inout_opt_z_count_c_(size) _SAL1_1_Source_(_Inout_opt_z_count_c_, (size), _Prepost_z_ _Prepost_opt_count_c_(size)) +#define _Inout_z_bytecount_c_(size) _SAL1_1_Source_(_Inout_z_bytecount_c_, (size), _Prepost_z_ _Prepost_bytecount_c_(size)) +#define _Inout_opt_z_bytecount_c_(size) _SAL1_1_Source_(_Inout_opt_z_bytecount_c_, (size), _Prepost_z_ _Prepost_opt_bytecount_c_(size)) + +#define _Inout_ptrdiff_count_(size) _SAL1_1_Source_(_Inout_ptrdiff_count_, (size), _Pre_ptrdiff_count_(size)) +#define _Inout_opt_ptrdiff_count_(size) _SAL1_1_Source_(_Inout_opt_ptrdiff_count_, (size), _Pre_opt_ptrdiff_count_(size)) + +#define _Inout_count_x_(size) _SAL1_1_Source_(_Inout_count_x_, (size), _Prepost_count_x_(size)) +#define _Inout_opt_count_x_(size) _SAL1_1_Source_(_Inout_opt_count_x_, (size), _Prepost_opt_count_x_(size)) +#define _Inout_bytecount_x_(size) _SAL1_1_Source_(_Inout_bytecount_x_, (size), _Prepost_bytecount_x_(size)) +#define _Inout_opt_bytecount_x_(size) _SAL1_1_Source_(_Inout_opt_bytecount_x_, (size), _Prepost_opt_bytecount_x_(size)) + +// e.g. void AppendToLPSTR( _In_ LPCSTR szFrom, _Inout_cap_(cchTo) LPSTR* szTo, size_t cchTo ); +#define _Inout_cap_(size) _SAL1_1_Source_(_Inout_cap_, (size), _Pre_valid_cap_(size) _Post_valid_) +#define _Inout_opt_cap_(size) _SAL1_1_Source_(_Inout_opt_cap_, (size), _Pre_opt_valid_cap_(size) _Post_valid_) +#define _Inout_bytecap_(size) _SAL1_1_Source_(_Inout_bytecap_, (size), _Pre_valid_bytecap_(size) _Post_valid_) +#define _Inout_opt_bytecap_(size) _SAL1_1_Source_(_Inout_opt_bytecap_, (size), _Pre_opt_valid_bytecap_(size) _Post_valid_) + +#define _Inout_cap_c_(size) _SAL1_1_Source_(_Inout_cap_c_, (size), _Pre_valid_cap_c_(size) _Post_valid_) +#define _Inout_opt_cap_c_(size) _SAL1_1_Source_(_Inout_opt_cap_c_, (size), _Pre_opt_valid_cap_c_(size) _Post_valid_) +#define _Inout_bytecap_c_(size) _SAL1_1_Source_(_Inout_bytecap_c_, (size), _Pre_valid_bytecap_c_(size) _Post_valid_) +#define _Inout_opt_bytecap_c_(size) _SAL1_1_Source_(_Inout_opt_bytecap_c_, (size), _Pre_opt_valid_bytecap_c_(size) _Post_valid_) + +#define _Inout_cap_x_(size) _SAL1_1_Source_(_Inout_cap_x_, (size), _Pre_valid_cap_x_(size) _Post_valid_) +#define _Inout_opt_cap_x_(size) _SAL1_1_Source_(_Inout_opt_cap_x_, (size), _Pre_opt_valid_cap_x_(size) _Post_valid_) +#define _Inout_bytecap_x_(size) _SAL1_1_Source_(_Inout_bytecap_x_, (size), _Pre_valid_bytecap_x_(size) _Post_valid_) +#define _Inout_opt_bytecap_x_(size) _SAL1_1_Source_(_Inout_opt_bytecap_x_, (size), _Pre_opt_valid_bytecap_x_(size) _Post_valid_) + +// inout string buffers with writable size +// e.g. void AppendStr( _In_z_ const char* szFrom, _Inout_z_cap_(cchTo) char* szTo, size_t cchTo ); +#define _Inout_z_cap_(size) _SAL1_1_Source_(_Inout_z_cap_, (size), _Pre_z_cap_(size) _Post_z_) +#define _Inout_opt_z_cap_(size) _SAL1_1_Source_(_Inout_opt_z_cap_, (size), _Pre_opt_z_cap_(size) _Post_z_) +#define _Inout_z_bytecap_(size) _SAL1_1_Source_(_Inout_z_bytecap_, (size), _Pre_z_bytecap_(size) _Post_z_) +#define _Inout_opt_z_bytecap_(size) _SAL1_1_Source_(_Inout_opt_z_bytecap_, (size), _Pre_opt_z_bytecap_(size) _Post_z_) + +#define _Inout_z_cap_c_(size) _SAL1_1_Source_(_Inout_z_cap_c_, (size), _Pre_z_cap_c_(size) _Post_z_) +#define _Inout_opt_z_cap_c_(size) _SAL1_1_Source_(_Inout_opt_z_cap_c_, (size), _Pre_opt_z_cap_c_(size) _Post_z_) +#define _Inout_z_bytecap_c_(size) _SAL1_1_Source_(_Inout_z_bytecap_c_, (size), _Pre_z_bytecap_c_(size) _Post_z_) +#define _Inout_opt_z_bytecap_c_(size) _SAL1_1_Source_(_Inout_opt_z_bytecap_c_, (size), _Pre_opt_z_bytecap_c_(size) _Post_z_) + +#define _Inout_z_cap_x_(size) _SAL1_1_Source_(_Inout_z_cap_x_, (size), _Pre_z_cap_x_(size) _Post_z_) +#define _Inout_opt_z_cap_x_(size) _SAL1_1_Source_(_Inout_opt_z_cap_x_, (size), _Pre_opt_z_cap_x_(size) _Post_z_) +#define _Inout_z_bytecap_x_(size) _SAL1_1_Source_(_Inout_z_bytecap_x_, (size), _Pre_z_bytecap_x_(size) _Post_z_) +#define _Inout_opt_z_bytecap_x_(size) _SAL1_1_Source_(_Inout_opt_z_bytecap_x_, (size), _Pre_opt_z_bytecap_x_(size) _Post_z_) + + +// returning pointers to valid objects +#define _Ret_ _SAL1_1_Source_(_Ret_, (), _Ret_valid_) +#define _Ret_opt_ _SAL1_1_Source_(_Ret_opt_, (), _Ret_opt_valid_) + +// annotations to express 'boundedness' of integral value parameter +#define _In_bound_ _SAL1_1_Source_(_In_bound_, (), _In_bound_impl_) +#define _Out_bound_ _SAL1_1_Source_(_Out_bound_, (), _Out_bound_impl_) +#define _Ret_bound_ _SAL1_1_Source_(_Ret_bound_, (), _Ret_bound_impl_) +#define _Deref_in_bound_ _SAL1_1_Source_(_Deref_in_bound_, (), _Deref_in_bound_impl_) +#define _Deref_out_bound_ _SAL1_1_Source_(_Deref_out_bound_, (), _Deref_out_bound_impl_) +#define _Deref_inout_bound_ _SAL1_1_Source_(_Deref_inout_bound_, (), _Deref_in_bound_ _Deref_out_bound_) +#define _Deref_ret_bound_ _SAL1_1_Source_(_Deref_ret_bound_, (), _Deref_ret_bound_impl_) + +// e.g. HRESULT HrCreatePoint( _Deref_out_opt_ POINT** ppPT ); +#define _Deref_out_ _SAL1_1_Source_(_Deref_out_, (), _Out_ _Deref_post_valid_) +#define _Deref_out_opt_ _SAL1_1_Source_(_Deref_out_opt_, (), _Out_ _Deref_post_opt_valid_) +#define _Deref_opt_out_ _SAL1_1_Source_(_Deref_opt_out_, (), _Out_opt_ _Deref_post_valid_) +#define _Deref_opt_out_opt_ _SAL1_1_Source_(_Deref_opt_out_opt_, (), _Out_opt_ _Deref_post_opt_valid_) + +// e.g. void CloneString( _In_z_ const WCHAR* wzFrom, _Deref_out_z_ WCHAR** pWzTo ); +#define _Deref_out_z_ _SAL1_1_Source_(_Deref_out_z_, (), _Out_ _Deref_post_z_) +#define _Deref_out_opt_z_ _SAL1_1_Source_(_Deref_out_opt_z_, (), _Out_ _Deref_post_opt_z_) +#define _Deref_opt_out_z_ _SAL1_1_Source_(_Deref_opt_out_z_, (), _Out_opt_ _Deref_post_z_) +#define _Deref_opt_out_opt_z_ _SAL1_1_Source_(_Deref_opt_out_opt_z_, (), _Out_opt_ _Deref_post_opt_z_) + +// +// _Deref_pre_ --- +// +// describing conditions for array elements of dereferenced pointer parameters that must be met before the call + +// e.g. void SaveStringArray( _In_count_(cStrings) _Deref_pre_z_ const WCHAR* const rgpwch[] ); +#define _Deref_pre_z_ _SAL1_1_Source_(_Deref_pre_z_, (), _Deref_pre1_impl_(__notnull_impl_notref) _Deref_pre1_impl_(__zterm_impl) _Pre_valid_impl_) +#define _Deref_pre_opt_z_ _SAL1_1_Source_(_Deref_pre_opt_z_, (), _Deref_pre1_impl_(__maybenull_impl_notref) _Deref_pre1_impl_(__zterm_impl) _Pre_valid_impl_) + +// e.g. void FillInArrayOfStr32( _In_count_(cStrings) _Deref_pre_cap_c_(32) _Deref_post_z_ WCHAR* const rgpwch[] ); +// buffer capacity is described by another parameter +#define _Deref_pre_cap_(size) _SAL1_1_Source_(_Deref_pre_cap_, (size), _Deref_pre1_impl_(__notnull_impl_notref) _Deref_pre1_impl_(__cap_impl(size))) +#define _Deref_pre_opt_cap_(size) _SAL1_1_Source_(_Deref_pre_opt_cap_, (size), _Deref_pre1_impl_(__maybenull_impl_notref) _Deref_pre1_impl_(__cap_impl(size))) +#define _Deref_pre_bytecap_(size) _SAL1_1_Source_(_Deref_pre_bytecap_, (size), _Deref_pre1_impl_(__notnull_impl_notref) _Deref_pre1_impl_(__bytecap_impl(size))) +#define _Deref_pre_opt_bytecap_(size) _SAL1_1_Source_(_Deref_pre_opt_bytecap_, (size), _Deref_pre1_impl_(__maybenull_impl_notref) _Deref_pre1_impl_(__bytecap_impl(size))) + +// buffer capacity is described by a constant expression +#define _Deref_pre_cap_c_(size) _SAL1_1_Source_(_Deref_pre_cap_c_, (size), _Deref_pre1_impl_(__notnull_impl_notref) _Deref_pre1_impl_(__cap_c_impl(size))) +#define _Deref_pre_opt_cap_c_(size) _SAL1_1_Source_(_Deref_pre_opt_cap_c_, (size), _Deref_pre1_impl_(__maybenull_impl_notref) _Deref_pre1_impl_(__cap_c_impl(size))) +#define _Deref_pre_bytecap_c_(size) _SAL1_1_Source_(_Deref_pre_bytecap_c_, (size), _Deref_pre1_impl_(__notnull_impl_notref) _Deref_pre1_impl_(__bytecap_c_impl(size))) +#define _Deref_pre_opt_bytecap_c_(size) _SAL1_1_Source_(_Deref_pre_opt_bytecap_c_, (size), _Deref_pre1_impl_(__maybenull_impl_notref) _Deref_pre1_impl_(__bytecap_c_impl(size))) + +// buffer capacity is described by a complex condition +#define _Deref_pre_cap_x_(size) _SAL1_1_Source_(_Deref_pre_cap_x_, (size), _Deref_pre1_impl_(__notnull_impl_notref) _Deref_pre1_impl_(__cap_x_impl(size))) +#define _Deref_pre_opt_cap_x_(size) _SAL1_1_Source_(_Deref_pre_opt_cap_x_, (size), _Deref_pre1_impl_(__maybenull_impl_notref) _Deref_pre1_impl_(__cap_x_impl(size))) +#define _Deref_pre_bytecap_x_(size) _SAL1_1_Source_(_Deref_pre_bytecap_x_, (size), _Deref_pre1_impl_(__notnull_impl_notref) _Deref_pre1_impl_(__bytecap_x_impl(size))) +#define _Deref_pre_opt_bytecap_x_(size) _SAL1_1_Source_(_Deref_pre_opt_bytecap_x_, (size), _Deref_pre1_impl_(__maybenull_impl_notref) _Deref_pre1_impl_(__bytecap_x_impl(size))) + +// convenience macros for nullterminated buffers with given capacity +#define _Deref_pre_z_cap_(size) _SAL1_1_Source_(_Deref_pre_z_cap_, (size), _Deref_pre1_impl_(__notnull_impl_notref) _Deref_pre2_impl_(__zterm_impl,__cap_impl(size)) _Pre_valid_impl_) +#define _Deref_pre_opt_z_cap_(size) _SAL1_1_Source_(_Deref_pre_opt_z_cap_, (size), _Deref_pre1_impl_(__maybenull_impl_notref) _Deref_pre2_impl_(__zterm_impl,__cap_impl(size)) _Pre_valid_impl_) +#define _Deref_pre_z_bytecap_(size) _SAL1_1_Source_(_Deref_pre_z_bytecap_, (size), _Deref_pre1_impl_(__notnull_impl_notref) _Deref_pre2_impl_(__zterm_impl,__bytecap_impl(size)) _Pre_valid_impl_) +#define _Deref_pre_opt_z_bytecap_(size) _SAL1_1_Source_(_Deref_pre_opt_z_bytecap_, (size), _Deref_pre1_impl_(__maybenull_impl_notref) _Deref_pre2_impl_(__zterm_impl,__bytecap_impl(size)) _Pre_valid_impl_) + +#define _Deref_pre_z_cap_c_(size) _SAL1_1_Source_(_Deref_pre_z_cap_c_, (size), _Deref_pre1_impl_(__notnull_impl_notref) _Deref_pre2_impl_(__zterm_impl,__cap_c_impl(size)) _Pre_valid_impl_) +#define _Deref_pre_opt_z_cap_c_(size) _SAL1_1_Source_(_Deref_pre_opt_z_cap_c_, (size), _Deref_pre1_impl_(__maybenull_impl_notref) _Deref_pre2_impl_(__zterm_impl,__cap_c_impl(size)) _Pre_valid_impl_) +#define _Deref_pre_z_bytecap_c_(size) _SAL1_1_Source_(_Deref_pre_z_bytecap_c_, (size), _Deref_pre1_impl_(__notnull_impl_notref) _Deref_pre2_impl_(__zterm_impl,__bytecap_c_impl(size)) _Pre_valid_impl_) +#define _Deref_pre_opt_z_bytecap_c_(size) _SAL1_1_Source_(_Deref_pre_opt_z_bytecap_c_, (size), _Deref_pre1_impl_(__maybenull_impl_notref) _Deref_pre2_impl_(__zterm_impl,__bytecap_c_impl(size)) _Pre_valid_impl_) + +#define _Deref_pre_z_cap_x_(size) _SAL1_1_Source_(_Deref_pre_z_cap_x_, (size), _Deref_pre1_impl_(__notnull_impl_notref) _Deref_pre2_impl_(__zterm_impl,__cap_x_impl(size)) _Pre_valid_impl_) +#define _Deref_pre_opt_z_cap_x_(size) _SAL1_1_Source_(_Deref_pre_opt_z_cap_x_, (size), _Deref_pre1_impl_(__maybenull_impl_notref) _Deref_pre2_impl_(__zterm_impl,__cap_x_impl(size)) _Pre_valid_impl_) +#define _Deref_pre_z_bytecap_x_(size) _SAL1_1_Source_(_Deref_pre_z_bytecap_x_, (size), _Deref_pre1_impl_(__notnull_impl_notref) _Deref_pre2_impl_(__zterm_impl,__bytecap_x_impl(size)) _Pre_valid_impl_) +#define _Deref_pre_opt_z_bytecap_x_(size) _SAL1_1_Source_(_Deref_pre_opt_z_bytecap_x_, (size), _Deref_pre1_impl_(__maybenull_impl_notref) _Deref_pre2_impl_(__zterm_impl,__bytecap_x_impl(size)) _Pre_valid_impl_) + +// known capacity and valid but unknown readable extent +#define _Deref_pre_valid_cap_(size) _SAL1_1_Source_(_Deref_pre_valid_cap_, (size), _Deref_pre1_impl_(__notnull_impl_notref) _Deref_pre1_impl_(__cap_impl(size)) _Pre_valid_impl_) +#define _Deref_pre_opt_valid_cap_(size) _SAL1_1_Source_(_Deref_pre_opt_valid_cap_, (size), _Deref_pre1_impl_(__maybenull_impl_notref) _Deref_pre1_impl_(__cap_impl(size)) _Pre_valid_impl_) +#define _Deref_pre_valid_bytecap_(size) _SAL1_1_Source_(_Deref_pre_valid_bytecap_, (size), _Deref_pre1_impl_(__notnull_impl_notref) _Deref_pre1_impl_(__bytecap_impl(size)) _Pre_valid_impl_) +#define _Deref_pre_opt_valid_bytecap_(size) _SAL1_1_Source_(_Deref_pre_opt_valid_bytecap_, (size), _Deref_pre1_impl_(__maybenull_impl_notref) _Deref_pre1_impl_(__bytecap_impl(size)) _Pre_valid_impl_) + +#define _Deref_pre_valid_cap_c_(size) _SAL1_1_Source_(_Deref_pre_valid_cap_c_, (size), _Deref_pre1_impl_(__notnull_impl_notref) _Deref_pre1_impl_(__cap_c_impl(size)) _Pre_valid_impl_) +#define _Deref_pre_opt_valid_cap_c_(size) _SAL1_1_Source_(_Deref_pre_opt_valid_cap_c_, (size), _Deref_pre1_impl_(__maybenull_impl_notref) _Deref_pre1_impl_(__cap_c_impl(size)) _Pre_valid_impl_) +#define _Deref_pre_valid_bytecap_c_(size) _SAL1_1_Source_(_Deref_pre_valid_bytecap_c_, (size), _Deref_pre1_impl_(__notnull_impl_notref) _Deref_pre1_impl_(__bytecap_c_impl(size)) _Pre_valid_impl_) +#define _Deref_pre_opt_valid_bytecap_c_(size) _SAL1_1_Source_(_Deref_pre_opt_valid_bytecap_c_, (size), _Deref_pre1_impl_(__maybenull_impl_notref) _Deref_pre1_impl_(__bytecap_c_impl(size)) _Pre_valid_impl_) + +#define _Deref_pre_valid_cap_x_(size) _SAL1_1_Source_(_Deref_pre_valid_cap_x_, (size), _Deref_pre1_impl_(__notnull_impl_notref) _Deref_pre1_impl_(__cap_x_impl(size)) _Pre_valid_impl_) +#define _Deref_pre_opt_valid_cap_x_(size) _SAL1_1_Source_(_Deref_pre_opt_valid_cap_x_, (size), _Deref_pre1_impl_(__maybenull_impl_notref) _Deref_pre1_impl_(__cap_x_impl(size)) _Pre_valid_impl_) +#define _Deref_pre_valid_bytecap_x_(size) _SAL1_1_Source_(_Deref_pre_valid_bytecap_x_, (size), _Deref_pre1_impl_(__notnull_impl_notref) _Deref_pre1_impl_(__bytecap_x_impl(size)) _Pre_valid_impl_) +#define _Deref_pre_opt_valid_bytecap_x_(size) _SAL1_1_Source_(_Deref_pre_opt_valid_bytecap_x_, (size), _Deref_pre1_impl_(__maybenull_impl_notref) _Deref_pre1_impl_(__bytecap_x_impl(size)) _Pre_valid_impl_) + +// e.g. void SaveMatrix( _In_count_(n) _Deref_pre_count_(n) const Elem** matrix, size_t n ); +// valid buffer extent is described by another parameter +#define _Deref_pre_count_(size) _SAL1_1_Source_(_Deref_pre_count_, (size), _Deref_pre1_impl_(__notnull_impl_notref) _Deref_pre1_impl_(__count_impl(size)) _Pre_valid_impl_) +#define _Deref_pre_opt_count_(size) _SAL1_1_Source_(_Deref_pre_opt_count_, (size), _Deref_pre1_impl_(__maybenull_impl_notref) _Deref_pre1_impl_(__count_impl(size)) _Pre_valid_impl_) +#define _Deref_pre_bytecount_(size) _SAL1_1_Source_(_Deref_pre_bytecount_, (size), _Deref_pre1_impl_(__notnull_impl_notref) _Deref_pre1_impl_(__bytecount_impl(size)) _Pre_valid_impl_) +#define _Deref_pre_opt_bytecount_(size) _SAL1_1_Source_(_Deref_pre_opt_bytecount_, (size), _Deref_pre1_impl_(__maybenull_impl_notref) _Deref_pre1_impl_(__bytecount_impl(size)) _Pre_valid_impl_) + +// valid buffer extent is described by a constant expression +#define _Deref_pre_count_c_(size) _SAL1_1_Source_(_Deref_pre_count_c_, (size), _Deref_pre1_impl_(__notnull_impl_notref) _Deref_pre1_impl_(__count_c_impl(size)) _Pre_valid_impl_) +#define _Deref_pre_opt_count_c_(size) _SAL1_1_Source_(_Deref_pre_opt_count_c_, (size), _Deref_pre1_impl_(__maybenull_impl_notref) _Deref_pre1_impl_(__count_c_impl(size)) _Pre_valid_impl_) +#define _Deref_pre_bytecount_c_(size) _SAL1_1_Source_(_Deref_pre_bytecount_c_, (size), _Deref_pre1_impl_(__notnull_impl_notref) _Deref_pre1_impl_(__bytecount_c_impl(size)) _Pre_valid_impl_) +#define _Deref_pre_opt_bytecount_c_(size) _SAL1_1_Source_(_Deref_pre_opt_bytecount_c_, (size), _Deref_pre1_impl_(__maybenull_impl_notref) _Deref_pre1_impl_(__bytecount_c_impl(size)) _Pre_valid_impl_) + +// valid buffer extent is described by a complex expression +#define _Deref_pre_count_x_(size) _SAL1_1_Source_(_Deref_pre_count_x_, (size), _Deref_pre1_impl_(__notnull_impl_notref) _Deref_pre1_impl_(__count_x_impl(size)) _Pre_valid_impl_) +#define _Deref_pre_opt_count_x_(size) _SAL1_1_Source_(_Deref_pre_opt_count_x_, (size), _Deref_pre1_impl_(__maybenull_impl_notref) _Deref_pre1_impl_(__count_x_impl(size)) _Pre_valid_impl_) +#define _Deref_pre_bytecount_x_(size) _SAL1_1_Source_(_Deref_pre_bytecount_x_, (size), _Deref_pre1_impl_(__notnull_impl_notref) _Deref_pre1_impl_(__bytecount_x_impl(size)) _Pre_valid_impl_) +#define _Deref_pre_opt_bytecount_x_(size) _SAL1_1_Source_(_Deref_pre_opt_bytecount_x_, (size), _Deref_pre1_impl_(__maybenull_impl_notref) _Deref_pre1_impl_(__bytecount_x_impl(size)) _Pre_valid_impl_) + +// e.g. void PrintStringArray( _In_count_(cElems) _Deref_pre_valid_ LPCSTR rgStr[], size_t cElems ); +#define _Deref_pre_valid_ _SAL1_1_Source_(_Deref_pre_valid_, (), _Deref_pre1_impl_(__notnull_impl_notref) _Pre_valid_impl_) +#define _Deref_pre_opt_valid_ _SAL1_1_Source_(_Deref_pre_opt_valid_, (), _Deref_pre1_impl_(__maybenull_impl_notref) _Pre_valid_impl_) +#define _Deref_pre_invalid_ _SAL1_1_Source_(_Deref_pre_invalid_, (), _Deref_pre1_impl_(__notvalid_impl)) + +#define _Deref_pre_notnull_ _SAL1_1_Source_(_Deref_pre_notnull_, (), _Deref_pre1_impl_(__notnull_impl_notref)) +#define _Deref_pre_maybenull_ _SAL1_1_Source_(_Deref_pre_maybenull_, (), _Deref_pre1_impl_(__maybenull_impl_notref)) +#define _Deref_pre_null_ _SAL1_1_Source_(_Deref_pre_null_, (), _Deref_pre1_impl_(__null_impl_notref)) + +// restrict access rights +#define _Deref_pre_readonly_ _SAL1_1_Source_(_Deref_pre_readonly_, (), _Deref_pre1_impl_(__readaccess_impl_notref)) +#define _Deref_pre_writeonly_ _SAL1_1_Source_(_Deref_pre_writeonly_, (), _Deref_pre1_impl_(__writeaccess_impl_notref)) + +// +// _Deref_post_ --- +// +// describing conditions for array elements or dereferenced pointer parameters that hold after the call + +// e.g. void CloneString( _In_z_ const Wchar_t* wzIn _Out_ _Deref_post_z_ WCHAR** pWzOut ); +#define _Deref_post_z_ _SAL1_1_Source_(_Deref_post_z_, (), _Deref_post1_impl_(__notnull_impl_notref) _Deref_post1_impl_(__zterm_impl) _Post_valid_impl_) +#define _Deref_post_opt_z_ _SAL1_1_Source_(_Deref_post_opt_z_, (), _Deref_post1_impl_(__maybenull_impl_notref) _Deref_post1_impl_(__zterm_impl) _Post_valid_impl_) + +// e.g. HRESULT HrAllocateMemory( size_t cb, _Out_ _Deref_post_bytecap_(cb) void** ppv ); +// buffer capacity is described by another parameter +#define _Deref_post_cap_(size) _SAL1_1_Source_(_Deref_post_cap_, (size), _Deref_post1_impl_(__notnull_impl_notref) _Deref_post1_impl_(__cap_impl(size))) +#define _Deref_post_opt_cap_(size) _SAL1_1_Source_(_Deref_post_opt_cap_, (size), _Deref_post1_impl_(__maybenull_impl_notref) _Deref_post1_impl_(__cap_impl(size))) +#define _Deref_post_bytecap_(size) _SAL1_1_Source_(_Deref_post_bytecap_, (size), _Deref_post1_impl_(__notnull_impl_notref) _Deref_post1_impl_(__bytecap_impl(size))) +#define _Deref_post_opt_bytecap_(size) _SAL1_1_Source_(_Deref_post_opt_bytecap_, (size), _Deref_post1_impl_(__maybenull_impl_notref) _Deref_post1_impl_(__bytecap_impl(size))) + +// buffer capacity is described by a constant expression +#define _Deref_post_cap_c_(size) _SAL1_1_Source_(_Deref_post_cap_c_, (size), _Deref_post1_impl_(__notnull_impl_notref) _Deref_post1_impl_(__cap_c_impl(size))) +#define _Deref_post_opt_cap_c_(size) _SAL1_1_Source_(_Deref_post_opt_cap_c_, (size), _Deref_post1_impl_(__maybenull_impl_notref) _Deref_post1_impl_(__cap_c_impl(size))) +#define _Deref_post_bytecap_c_(size) _SAL1_1_Source_(_Deref_post_bytecap_c_, (size), _Deref_post1_impl_(__notnull_impl_notref) _Deref_post1_impl_(__bytecap_c_impl(size))) +#define _Deref_post_opt_bytecap_c_(size) _SAL1_1_Source_(_Deref_post_opt_bytecap_c_, (size), _Deref_post1_impl_(__maybenull_impl_notref) _Deref_post1_impl_(__bytecap_c_impl(size))) + +// buffer capacity is described by a complex expression +#define _Deref_post_cap_x_(size) _SAL1_1_Source_(_Deref_post_cap_x_, (size), _Deref_post1_impl_(__notnull_impl_notref) _Deref_post1_impl_(__cap_x_impl(size))) +#define _Deref_post_opt_cap_x_(size) _SAL1_1_Source_(_Deref_post_opt_cap_x_, (size), _Deref_post1_impl_(__maybenull_impl_notref) _Deref_post1_impl_(__cap_x_impl(size))) +#define _Deref_post_bytecap_x_(size) _SAL1_1_Source_(_Deref_post_bytecap_x_, (size), _Deref_post1_impl_(__notnull_impl_notref) _Deref_post1_impl_(__bytecap_x_impl(size))) +#define _Deref_post_opt_bytecap_x_(size) _SAL1_1_Source_(_Deref_post_opt_bytecap_x_, (size), _Deref_post1_impl_(__maybenull_impl_notref) _Deref_post1_impl_(__bytecap_x_impl(size))) + +// convenience macros for nullterminated buffers with given capacity +#define _Deref_post_z_cap_(size) _SAL1_1_Source_(_Deref_post_z_cap_, (size), _Deref_post1_impl_(__notnull_impl_notref) _Deref_post2_impl_(__zterm_impl,__cap_impl(size)) _Post_valid_impl_) +#define _Deref_post_opt_z_cap_(size) _SAL1_1_Source_(_Deref_post_opt_z_cap_, (size), _Deref_post1_impl_(__maybenull_impl_notref) _Deref_post2_impl_(__zterm_impl,__cap_impl(size)) _Post_valid_impl_) +#define _Deref_post_z_bytecap_(size) _SAL1_1_Source_(_Deref_post_z_bytecap_, (size), _Deref_post1_impl_(__notnull_impl_notref) _Deref_post2_impl_(__zterm_impl,__bytecap_impl(size)) _Post_valid_impl_) +#define _Deref_post_opt_z_bytecap_(size) _SAL1_1_Source_(_Deref_post_opt_z_bytecap_, (size), _Deref_post1_impl_(__maybenull_impl_notref) _Deref_post2_impl_(__zterm_impl,__bytecap_impl(size)) _Post_valid_impl_) + +#define _Deref_post_z_cap_c_(size) _SAL1_1_Source_(_Deref_post_z_cap_c_, (size), _Deref_post1_impl_(__notnull_impl_notref) _Deref_post2_impl_(__zterm_impl,__cap_c_impl(size)) _Post_valid_impl_) +#define _Deref_post_opt_z_cap_c_(size) _SAL1_1_Source_(_Deref_post_opt_z_cap_c_, (size), _Deref_post1_impl_(__maybenull_impl_notref) _Deref_post2_impl_(__zterm_impl,__cap_c_impl(size)) _Post_valid_impl_) +#define _Deref_post_z_bytecap_c_(size) _SAL1_1_Source_(_Deref_post_z_bytecap_c_, (size), _Deref_post1_impl_(__notnull_impl_notref) _Deref_post2_impl_(__zterm_impl,__bytecap_c_impl(size)) _Post_valid_impl_) +#define _Deref_post_opt_z_bytecap_c_(size) _SAL1_1_Source_(_Deref_post_opt_z_bytecap_c_, (size), _Deref_post1_impl_(__maybenull_impl_notref) _Deref_post2_impl_(__zterm_impl,__bytecap_c_impl(size)) _Post_valid_impl_) + +#define _Deref_post_z_cap_x_(size) _SAL1_1_Source_(_Deref_post_z_cap_x_, (size), _Deref_post1_impl_(__notnull_impl_notref) _Deref_post2_impl_(__zterm_impl,__cap_x_impl(size)) _Post_valid_impl_) +#define _Deref_post_opt_z_cap_x_(size) _SAL1_1_Source_(_Deref_post_opt_z_cap_x_, (size), _Deref_post1_impl_(__maybenull_impl_notref) _Deref_post2_impl_(__zterm_impl,__cap_x_impl(size)) _Post_valid_impl_) +#define _Deref_post_z_bytecap_x_(size) _SAL1_1_Source_(_Deref_post_z_bytecap_x_, (size), _Deref_post1_impl_(__notnull_impl_notref) _Deref_post2_impl_(__zterm_impl,__bytecap_x_impl(size)) _Post_valid_impl_) +#define _Deref_post_opt_z_bytecap_x_(size) _SAL1_1_Source_(_Deref_post_opt_z_bytecap_x_, (size), _Deref_post1_impl_(__maybenull_impl_notref) _Deref_post2_impl_(__zterm_impl,__bytecap_x_impl(size)) _Post_valid_impl_) + +// known capacity and valid but unknown readable extent +#define _Deref_post_valid_cap_(size) _SAL1_1_Source_(_Deref_post_valid_cap_, (size), _Deref_post1_impl_(__notnull_impl_notref) _Deref_post1_impl_(__cap_impl(size)) _Post_valid_impl_) +#define _Deref_post_opt_valid_cap_(size) _SAL1_1_Source_(_Deref_post_opt_valid_cap_, (size), _Deref_post1_impl_(__maybenull_impl_notref) _Deref_post1_impl_(__cap_impl(size)) _Post_valid_impl_) +#define _Deref_post_valid_bytecap_(size) _SAL1_1_Source_(_Deref_post_valid_bytecap_, (size), _Deref_post1_impl_(__notnull_impl_notref) _Deref_post1_impl_(__bytecap_impl(size)) _Post_valid_impl_) +#define _Deref_post_opt_valid_bytecap_(size) _SAL1_1_Source_(_Deref_post_opt_valid_bytecap_, (size), _Deref_post1_impl_(__maybenull_impl_notref) _Deref_post1_impl_(__bytecap_impl(size)) _Post_valid_impl_) + +#define _Deref_post_valid_cap_c_(size) _SAL1_1_Source_(_Deref_post_valid_cap_c_, (size), _Deref_post1_impl_(__notnull_impl_notref) _Deref_post1_impl_(__cap_c_impl(size)) _Post_valid_impl_) +#define _Deref_post_opt_valid_cap_c_(size) _SAL1_1_Source_(_Deref_post_opt_valid_cap_c_, (size), _Deref_post1_impl_(__maybenull_impl_notref) _Deref_post1_impl_(__cap_c_impl(size)) _Post_valid_impl_) +#define _Deref_post_valid_bytecap_c_(size) _SAL1_1_Source_(_Deref_post_valid_bytecap_c_, (size), _Deref_post1_impl_(__notnull_impl_notref) _Deref_post1_impl_(__bytecap_c_impl(size)) _Post_valid_impl_) +#define _Deref_post_opt_valid_bytecap_c_(size) _SAL1_1_Source_(_Deref_post_opt_valid_bytecap_c_, (size), _Deref_post1_impl_(__maybenull_impl_notref) _Deref_post1_impl_(__bytecap_c_impl(size)) _Post_valid_impl_) + +#define _Deref_post_valid_cap_x_(size) _SAL1_1_Source_(_Deref_post_valid_cap_x_, (size), _Deref_post1_impl_(__notnull_impl_notref) _Deref_post1_impl_(__cap_x_impl(size)) _Post_valid_impl_) +#define _Deref_post_opt_valid_cap_x_(size) _SAL1_1_Source_(_Deref_post_opt_valid_cap_x_, (size), _Deref_post1_impl_(__maybenull_impl_notref) _Deref_post1_impl_(__cap_x_impl(size)) _Post_valid_impl_) +#define _Deref_post_valid_bytecap_x_(size) _SAL1_1_Source_(_Deref_post_valid_bytecap_x_, (size), _Deref_post1_impl_(__notnull_impl_notref) _Deref_post1_impl_(__bytecap_x_impl(size)) _Post_valid_impl_) +#define _Deref_post_opt_valid_bytecap_x_(size) _SAL1_1_Source_(_Deref_post_opt_valid_bytecap_x_, (size), _Deref_post1_impl_(__maybenull_impl_notref) _Deref_post1_impl_(__bytecap_x_impl(size)) _Post_valid_impl_) + +// e.g. HRESULT HrAllocateZeroInitializedMemory( size_t cb, _Out_ _Deref_post_bytecount_(cb) void** ppv ); +// valid buffer extent is described by another parameter +#define _Deref_post_count_(size) _SAL1_1_Source_(_Deref_post_count_, (size), _Deref_post1_impl_(__notnull_impl_notref) _Deref_post1_impl_(__count_impl(size)) _Post_valid_impl_) +#define _Deref_post_opt_count_(size) _SAL1_1_Source_(_Deref_post_opt_count_, (size), _Deref_post1_impl_(__maybenull_impl_notref) _Deref_post1_impl_(__count_impl(size)) _Post_valid_impl_) +#define _Deref_post_bytecount_(size) _SAL1_1_Source_(_Deref_post_bytecount_, (size), _Deref_post1_impl_(__notnull_impl_notref) _Deref_post1_impl_(__bytecount_impl(size)) _Post_valid_impl_) +#define _Deref_post_opt_bytecount_(size) _SAL1_1_Source_(_Deref_post_opt_bytecount_, (size), _Deref_post1_impl_(__maybenull_impl_notref) _Deref_post1_impl_(__bytecount_impl(size)) _Post_valid_impl_) + +// buffer capacity is described by a constant expression +#define _Deref_post_count_c_(size) _SAL1_1_Source_(_Deref_post_count_c_, (size), _Deref_post1_impl_(__notnull_impl_notref) _Deref_post1_impl_(__count_c_impl(size)) _Post_valid_impl_) +#define _Deref_post_opt_count_c_(size) _SAL1_1_Source_(_Deref_post_opt_count_c_, (size), _Deref_post1_impl_(__maybenull_impl_notref) _Deref_post1_impl_(__count_c_impl(size)) _Post_valid_impl_) +#define _Deref_post_bytecount_c_(size) _SAL1_1_Source_(_Deref_post_bytecount_c_, (size), _Deref_post1_impl_(__notnull_impl_notref) _Deref_post1_impl_(__bytecount_c_impl(size)) _Post_valid_impl_) +#define _Deref_post_opt_bytecount_c_(size) _SAL1_1_Source_(_Deref_post_opt_bytecount_c_, (size), _Deref_post1_impl_(__maybenull_impl_notref) _Deref_post1_impl_(__bytecount_c_impl(size)) _Post_valid_impl_) + +// buffer capacity is described by a complex expression +#define _Deref_post_count_x_(size) _SAL1_1_Source_(_Deref_post_count_x_, (size), _Deref_post1_impl_(__notnull_impl_notref) _Deref_post1_impl_(__count_x_impl(size)) _Post_valid_impl_) +#define _Deref_post_opt_count_x_(size) _SAL1_1_Source_(_Deref_post_opt_count_x_, (size), _Deref_post1_impl_(__maybenull_impl_notref) _Deref_post1_impl_(__count_x_impl(size)) _Post_valid_impl_) +#define _Deref_post_bytecount_x_(size) _SAL1_1_Source_(_Deref_post_bytecount_x_, (size), _Deref_post1_impl_(__notnull_impl_notref) _Deref_post1_impl_(__bytecount_x_impl(size)) _Post_valid_impl_) +#define _Deref_post_opt_bytecount_x_(size) _SAL1_1_Source_(_Deref_post_opt_bytecount_x_, (size), _Deref_post1_impl_(__maybenull_impl_notref) _Deref_post1_impl_(__bytecount_x_impl(size)) _Post_valid_impl_) + +// e.g. void GetStrings( _Out_count_(cElems) _Deref_post_valid_ LPSTR const rgStr[], size_t cElems ); +#define _Deref_post_valid_ _SAL1_1_Source_(_Deref_post_valid_, (), _Deref_post1_impl_(__notnull_impl_notref) _Post_valid_impl_) +#define _Deref_post_opt_valid_ _SAL1_1_Source_(_Deref_post_opt_valid_, (), _Deref_post1_impl_(__maybenull_impl_notref) _Post_valid_impl_) + +#define _Deref_post_notnull_ _SAL1_1_Source_(_Deref_post_notnull_, (), _Deref_post1_impl_(__notnull_impl_notref)) +#define _Deref_post_maybenull_ _SAL1_1_Source_(_Deref_post_maybenull_, (), _Deref_post1_impl_(__maybenull_impl_notref)) +#define _Deref_post_null_ _SAL1_1_Source_(_Deref_post_null_, (), _Deref_post1_impl_(__null_impl_notref)) + +// +// _Deref_ret_ --- +// + +#define _Deref_ret_z_ _SAL1_1_Source_(_Deref_ret_z_, (), _Deref_ret1_impl_(__notnull_impl_notref) _Deref_ret1_impl_(__zterm_impl)) +#define _Deref_ret_opt_z_ _SAL1_1_Source_(_Deref_ret_opt_z_, (), _Deref_ret1_impl_(__maybenull_impl_notref) _Ret1_impl_(__zterm_impl)) + +// +// special _Deref_ --- +// +#define _Deref2_pre_readonly_ _SAL1_1_Source_(_Deref2_pre_readonly_, (), _Deref2_pre1_impl_(__readaccess_impl_notref)) + +// +// _Ret_ --- +// + +// e.g. _Ret_opt_valid_ LPSTR void* CloneSTR( _Pre_valid_ LPSTR src ); +#define _Ret_opt_valid_ _SAL1_1_Source_(_Ret_opt_valid_, (), _Ret1_impl_(__maybenull_impl_notref) _Ret_valid_impl_) +#define _Ret_opt_z_ _SAL1_1_Source_(_Ret_opt_z_, (), _Ret2_impl_(__maybenull_impl,__zterm_impl) _Ret_valid_impl_) + +// e.g. _Ret_opt_bytecap_(cb) void* AllocateMemory( size_t cb ); +// Buffer capacity is described by another parameter +#define _Ret_cap_(size) _SAL1_1_Source_(_Ret_cap_, (size), _Ret1_impl_(__notnull_impl_notref) _Ret1_impl_(__cap_impl(size))) +#define _Ret_opt_cap_(size) _SAL1_1_Source_(_Ret_opt_cap_, (size), _Ret1_impl_(__maybenull_impl_notref) _Ret1_impl_(__cap_impl(size))) +#define _Ret_bytecap_(size) _SAL1_1_Source_(_Ret_bytecap_, (size), _Ret1_impl_(__notnull_impl_notref) _Ret1_impl_(__bytecap_impl(size))) +#define _Ret_opt_bytecap_(size) _SAL1_1_Source_(_Ret_opt_bytecap_, (size), _Ret1_impl_(__maybenull_impl_notref) _Ret1_impl_(__bytecap_impl(size))) + +// Buffer capacity is described by a constant expression +#define _Ret_cap_c_(size) _SAL1_1_Source_(_Ret_cap_c_, (size), _Ret1_impl_(__notnull_impl_notref) _Ret1_impl_(__cap_c_impl(size))) +#define _Ret_opt_cap_c_(size) _SAL1_1_Source_(_Ret_opt_cap_c_, (size), _Ret1_impl_(__maybenull_impl_notref) _Ret1_impl_(__cap_c_impl(size))) +#define _Ret_bytecap_c_(size) _SAL1_1_Source_(_Ret_bytecap_c_, (size), _Ret1_impl_(__notnull_impl_notref) _Ret1_impl_(__bytecap_c_impl(size))) +#define _Ret_opt_bytecap_c_(size) _SAL1_1_Source_(_Ret_opt_bytecap_c_, (size), _Ret1_impl_(__maybenull_impl_notref) _Ret1_impl_(__bytecap_c_impl(size))) + +// Buffer capacity is described by a complex condition +#define _Ret_cap_x_(size) _SAL1_1_Source_(_Ret_cap_x_, (size), _Ret1_impl_(__notnull_impl_notref) _Ret1_impl_(__cap_x_impl(size))) +#define _Ret_opt_cap_x_(size) _SAL1_1_Source_(_Ret_opt_cap_x_, (size), _Ret1_impl_(__maybenull_impl_notref) _Ret1_impl_(__cap_x_impl(size))) +#define _Ret_bytecap_x_(size) _SAL1_1_Source_(_Ret_bytecap_x_, (size), _Ret1_impl_(__notnull_impl_notref) _Ret1_impl_(__bytecap_x_impl(size))) +#define _Ret_opt_bytecap_x_(size) _SAL1_1_Source_(_Ret_opt_bytecap_x_, (size), _Ret1_impl_(__maybenull_impl_notref) _Ret1_impl_(__bytecap_x_impl(size))) + +// return value is nullterminated and capacity is given by another parameter +#define _Ret_z_cap_(size) _SAL1_1_Source_(_Ret_z_cap_, (size), _Ret1_impl_(__notnull_impl_notref) _Ret2_impl_(__zterm_impl,__cap_impl(size)) _Ret_valid_impl_) +#define _Ret_opt_z_cap_(size) _SAL1_1_Source_(_Ret_opt_z_cap_, (size), _Ret1_impl_(__maybenull_impl_notref) _Ret2_impl_(__zterm_impl,__cap_impl(size)) _Ret_valid_impl_) +#define _Ret_z_bytecap_(size) _SAL1_1_Source_(_Ret_z_bytecap_, (size), _Ret1_impl_(__notnull_impl_notref) _Ret2_impl_(__zterm_impl,__bytecap_impl(size)) _Ret_valid_impl_) +#define _Ret_opt_z_bytecap_(size) _SAL1_1_Source_(_Ret_opt_z_bytecap_, (size), _Ret1_impl_(__maybenull_impl_notref) _Ret2_impl_(__zterm_impl,__bytecap_impl(size)) _Ret_valid_impl_) + +// e.g. _Ret_opt_bytecount_(cb) void* AllocateZeroInitializedMemory( size_t cb ); +// Valid Buffer extent is described by another parameter +#define _Ret_count_(size) _SAL1_1_Source_(_Ret_count_, (size), _Ret1_impl_(__notnull_impl_notref) _Ret1_impl_(__count_impl(size)) _Ret_valid_impl_) +#define _Ret_opt_count_(size) _SAL1_1_Source_(_Ret_opt_count_, (size), _Ret1_impl_(__maybenull_impl_notref) _Ret1_impl_(__count_impl(size)) _Ret_valid_impl_) +#define _Ret_bytecount_(size) _SAL1_1_Source_(_Ret_bytecount_, (size), _Ret1_impl_(__notnull_impl_notref) _Ret1_impl_(__bytecount_impl(size)) _Ret_valid_impl_) +#define _Ret_opt_bytecount_(size) _SAL1_1_Source_(_Ret_opt_bytecount_, (size), _Ret1_impl_(__maybenull_impl_notref) _Ret1_impl_(__bytecount_impl(size)) _Ret_valid_impl_) + +// Valid Buffer extent is described by a constant expression +#define _Ret_count_c_(size) _SAL1_1_Source_(_Ret_count_c_, (size), _Ret1_impl_(__notnull_impl_notref) _Ret1_impl_(__count_c_impl(size)) _Ret_valid_impl_) +#define _Ret_opt_count_c_(size) _SAL1_1_Source_(_Ret_opt_count_c_, (size), _Ret1_impl_(__maybenull_impl_notref) _Ret1_impl_(__count_c_impl(size)) _Ret_valid_impl_) +#define _Ret_bytecount_c_(size) _SAL1_1_Source_(_Ret_bytecount_c_, (size), _Ret1_impl_(__notnull_impl_notref) _Ret1_impl_(__bytecount_c_impl(size)) _Ret_valid_impl_) +#define _Ret_opt_bytecount_c_(size) _SAL1_1_Source_(_Ret_opt_bytecount_c_, (size), _Ret1_impl_(__maybenull_impl_notref) _Ret1_impl_(__bytecount_c_impl(size)) _Ret_valid_impl_) + +// Valid Buffer extent is described by a complex expression +#define _Ret_count_x_(size) _SAL1_1_Source_(_Ret_count_x_, (size), _Ret1_impl_(__notnull_impl_notref) _Ret1_impl_(__count_x_impl(size)) _Ret_valid_impl_) +#define _Ret_opt_count_x_(size) _SAL1_1_Source_(_Ret_opt_count_x_, (size), _Ret1_impl_(__maybenull_impl_notref) _Ret1_impl_(__count_x_impl(size)) _Ret_valid_impl_) +#define _Ret_bytecount_x_(size) _SAL1_1_Source_(_Ret_bytecount_x_, (size), _Ret1_impl_(__notnull_impl_notref) _Ret1_impl_(__bytecount_x_impl(size)) _Ret_valid_impl_) +#define _Ret_opt_bytecount_x_(size) _SAL1_1_Source_(_Ret_opt_bytecount_x_, (size), _Ret1_impl_(__maybenull_impl_notref) _Ret1_impl_(__bytecount_x_impl(size)) _Ret_valid_impl_) + +// return value is nullterminated and length is given by another parameter +#define _Ret_z_count_(size) _SAL1_1_Source_(_Ret_z_count_, (size), _Ret1_impl_(__notnull_impl_notref) _Ret2_impl_(__zterm_impl,__count_impl(size)) _Ret_valid_impl_) +#define _Ret_opt_z_count_(size) _SAL1_1_Source_(_Ret_opt_z_count_, (size), _Ret1_impl_(__maybenull_impl_notref) _Ret2_impl_(__zterm_impl,__count_impl(size)) _Ret_valid_impl_) +#define _Ret_z_bytecount_(size) _SAL1_1_Source_(_Ret_z_bytecount_, (size), _Ret1_impl_(__notnull_impl_notref) _Ret2_impl_(__zterm_impl,__bytecount_impl(size)) _Ret_valid_impl_) +#define _Ret_opt_z_bytecount_(size) _SAL1_1_Source_(_Ret_opt_z_bytecount_, (size), _Ret1_impl_(__maybenull_impl_notref) _Ret2_impl_(__zterm_impl,__bytecount_impl(size)) _Ret_valid_impl_) + + +// _Pre_ annotations --- +#define _Pre_opt_z_ _SAL1_1_Source_(_Pre_opt_z_, (), _Pre1_impl_(__maybenull_impl_notref) _Pre1_impl_(__zterm_impl) _Pre_valid_impl_) + +// restrict access rights +#define _Pre_readonly_ _SAL1_1_Source_(_Pre_readonly_, (), _Pre1_impl_(__readaccess_impl_notref)) +#define _Pre_writeonly_ _SAL1_1_Source_(_Pre_writeonly_, (), _Pre1_impl_(__writeaccess_impl_notref)) + +// e.g. void FreeMemory( _Pre_bytecap_(cb) _Post_ptr_invalid_ void* pv, size_t cb ); +// buffer capacity described by another parameter +#define _Pre_cap_(size) _SAL1_1_Source_(_Pre_cap_, (size), _Pre1_impl_(__notnull_impl_notref) _Pre1_impl_(__cap_impl(size))) +#define _Pre_opt_cap_(size) _SAL1_1_Source_(_Pre_opt_cap_, (size), _Pre1_impl_(__maybenull_impl_notref) _Pre1_impl_(__cap_impl(size))) +#define _Pre_bytecap_(size) _SAL1_1_Source_(_Pre_bytecap_, (size), _Pre1_impl_(__notnull_impl_notref) _Pre1_impl_(__bytecap_impl(size))) +#define _Pre_opt_bytecap_(size) _SAL1_1_Source_(_Pre_opt_bytecap_, (size), _Pre1_impl_(__maybenull_impl_notref) _Pre1_impl_(__bytecap_impl(size))) + +// buffer capacity described by a constant expression +#define _Pre_cap_c_(size) _SAL1_1_Source_(_Pre_cap_c_, (size), _Pre1_impl_(__notnull_impl_notref) _Pre1_impl_(__cap_c_impl(size))) +#define _Pre_opt_cap_c_(size) _SAL1_1_Source_(_Pre_opt_cap_c_, (size), _Pre1_impl_(__maybenull_impl_notref) _Pre1_impl_(__cap_c_impl(size))) +#define _Pre_bytecap_c_(size) _SAL1_1_Source_(_Pre_bytecap_c_, (size), _Pre1_impl_(__notnull_impl_notref) _Pre1_impl_(__bytecap_c_impl(size))) +#define _Pre_opt_bytecap_c_(size) _SAL1_1_Source_(_Pre_opt_bytecap_c_, (size), _Pre1_impl_(__maybenull_impl_notref) _Pre1_impl_(__bytecap_c_impl(size))) +#define _Pre_cap_c_one_ _SAL1_1_Source_(_Pre_cap_c_one_, (), _Pre1_impl_(__notnull_impl_notref) _Pre1_impl_(__cap_c_one_notref_impl)) +#define _Pre_opt_cap_c_one_ _SAL1_1_Source_(_Pre_opt_cap_c_one_, (), _Pre1_impl_(__maybenull_impl_notref) _Pre1_impl_(__cap_c_one_notref_impl)) + +// buffer capacity is described by another parameter multiplied by a constant expression +#define _Pre_cap_m_(mult,size) _SAL1_1_Source_(_Pre_cap_m_, (mult,size), _Pre1_impl_(__notnull_impl_notref) _Pre1_impl_(__mult_impl(mult,size))) +#define _Pre_opt_cap_m_(mult,size) _SAL1_1_Source_(_Pre_opt_cap_m_, (mult,size), _Pre1_impl_(__maybenull_impl_notref) _Pre1_impl_(__mult_impl(mult,size))) + +// buffer capacity described by size of other buffer, only used by dangerous legacy APIs +// e.g. int strcpy(_Pre_cap_for_(src) char* dst, const char* src); +#define _Pre_cap_for_(param) _SAL1_1_Source_(_Pre_cap_for_, (param), _Pre1_impl_(__notnull_impl_notref) _Pre1_impl_(__cap_for_impl(param))) +#define _Pre_opt_cap_for_(param) _SAL1_1_Source_(_Pre_opt_cap_for_, (param), _Pre1_impl_(__maybenull_impl_notref) _Pre1_impl_(__cap_for_impl(param))) + +// buffer capacity described by a complex condition +#define _Pre_cap_x_(size) _SAL1_1_Source_(_Pre_cap_x_, (size), _Pre1_impl_(__notnull_impl_notref) _Pre1_impl_(__cap_x_impl(size))) +#define _Pre_opt_cap_x_(size) _SAL1_1_Source_(_Pre_opt_cap_x_, (size), _Pre1_impl_(__maybenull_impl_notref) _Pre1_impl_(__cap_x_impl(size))) +#define _Pre_bytecap_x_(size) _SAL1_1_Source_(_Pre_bytecap_x_, (size), _Pre1_impl_(__notnull_impl_notref) _Pre1_impl_(__bytecap_x_impl(size))) +#define _Pre_opt_bytecap_x_(size) _SAL1_1_Source_(_Pre_opt_bytecap_x_, (size), _Pre1_impl_(__maybenull_impl_notref) _Pre1_impl_(__bytecap_x_impl(size))) + +// buffer capacity described by the difference to another pointer parameter +#define _Pre_ptrdiff_cap_(ptr) _SAL1_1_Source_(_Pre_ptrdiff_cap_, (ptr), _Pre1_impl_(__notnull_impl_notref) _Pre1_impl_(__cap_x_impl(__ptrdiff(ptr)))) +#define _Pre_opt_ptrdiff_cap_(ptr) _SAL1_1_Source_(_Pre_opt_ptrdiff_cap_, (ptr), _Pre1_impl_(__maybenull_impl_notref) _Pre1_impl_(__cap_x_impl(__ptrdiff(ptr)))) + +// e.g. void AppendStr( _Pre_z_ const char* szFrom, _Pre_z_cap_(cchTo) _Post_z_ char* szTo, size_t cchTo ); +#define _Pre_z_cap_(size) _SAL1_1_Source_(_Pre_z_cap_, (size), _Pre1_impl_(__notnull_impl_notref) _Pre2_impl_(__zterm_impl,__cap_impl(size)) _Pre_valid_impl_) +#define _Pre_opt_z_cap_(size) _SAL1_1_Source_(_Pre_opt_z_cap_, (size), _Pre1_impl_(__maybenull_impl_notref) _Pre2_impl_(__zterm_impl,__cap_impl(size)) _Pre_valid_impl_) +#define _Pre_z_bytecap_(size) _SAL1_1_Source_(_Pre_z_bytecap_, (size), _Pre1_impl_(__notnull_impl_notref) _Pre2_impl_(__zterm_impl,__bytecap_impl(size)) _Pre_valid_impl_) +#define _Pre_opt_z_bytecap_(size) _SAL1_1_Source_(_Pre_opt_z_bytecap_, (size), _Pre1_impl_(__maybenull_impl_notref) _Pre2_impl_(__zterm_impl,__bytecap_impl(size)) _Pre_valid_impl_) + +#define _Pre_z_cap_c_(size) _SAL1_1_Source_(_Pre_z_cap_c_, (size), _Pre1_impl_(__notnull_impl_notref) _Pre2_impl_(__zterm_impl,__cap_c_impl(size)) _Pre_valid_impl_) +#define _Pre_opt_z_cap_c_(size) _SAL1_1_Source_(_Pre_opt_z_cap_c_, (size), _Pre1_impl_(__maybenull_impl_notref) _Pre2_impl_(__zterm_impl,__cap_c_impl(size)) _Pre_valid_impl_) +#define _Pre_z_bytecap_c_(size) _SAL1_1_Source_(_Pre_z_bytecap_c_, (size), _Pre1_impl_(__notnull_impl_notref) _Pre2_impl_(__zterm_impl,__bytecap_c_impl(size)) _Pre_valid_impl_) +#define _Pre_opt_z_bytecap_c_(size) _SAL1_1_Source_(_Pre_opt_z_bytecap_c_, (size), _Pre1_impl_(__maybenull_impl_notref) _Pre2_impl_(__zterm_impl,__bytecap_c_impl(size)) _Pre_valid_impl_) + +#define _Pre_z_cap_x_(size) _SAL1_1_Source_(_Pre_z_cap_x_, (size), _Pre1_impl_(__notnull_impl_notref) _Pre2_impl_(__zterm_impl,__cap_x_impl(size)) _Pre_valid_impl_) +#define _Pre_opt_z_cap_x_(size) _SAL1_1_Source_(_Pre_opt_z_cap_x_, (size), _Pre1_impl_(__maybenull_impl_notref) _Pre2_impl_(__zterm_impl,__cap_x_impl(size)) _Pre_valid_impl_) +#define _Pre_z_bytecap_x_(size) _SAL1_1_Source_(_Pre_z_bytecap_x_, (size), _Pre1_impl_(__notnull_impl_notref) _Pre2_impl_(__zterm_impl,__bytecap_x_impl(size)) _Pre_valid_impl_) +#define _Pre_opt_z_bytecap_x_(size) _SAL1_1_Source_(_Pre_opt_z_bytecap_x_, (size), _Pre1_impl_(__maybenull_impl_notref) _Pre2_impl_(__zterm_impl,__bytecap_x_impl(size)) _Pre_valid_impl_) + +// known capacity and valid but unknown readable extent +#define _Pre_valid_cap_(size) _SAL1_1_Source_(_Pre_valid_cap_, (size), _Pre1_impl_(__notnull_impl_notref) _Pre1_impl_(__cap_impl(size)) _Pre_valid_impl_) +#define _Pre_opt_valid_cap_(size) _SAL1_1_Source_(_Pre_opt_valid_cap_, (size), _Pre1_impl_(__maybenull_impl_notref) _Pre1_impl_(__cap_impl(size)) _Pre_valid_impl_) +#define _Pre_valid_bytecap_(size) _SAL1_1_Source_(_Pre_valid_bytecap_, (size), _Pre1_impl_(__notnull_impl_notref) _Pre1_impl_(__bytecap_impl(size)) _Pre_valid_impl_) +#define _Pre_opt_valid_bytecap_(size) _SAL1_1_Source_(_Pre_opt_valid_bytecap_, (size), _Pre1_impl_(__maybenull_impl_notref) _Pre1_impl_(__bytecap_impl(size)) _Pre_valid_impl_) + +#define _Pre_valid_cap_c_(size) _SAL1_1_Source_(_Pre_valid_cap_c_, (size), _Pre1_impl_(__notnull_impl_notref) _Pre1_impl_(__cap_c_impl(size)) _Pre_valid_impl_) +#define _Pre_opt_valid_cap_c_(size) _SAL1_1_Source_(_Pre_opt_valid_cap_c_, (size), _Pre1_impl_(__maybenull_impl_notref) _Pre1_impl_(__cap_c_impl(size)) _Pre_valid_impl_) +#define _Pre_valid_bytecap_c_(size) _SAL1_1_Source_(_Pre_valid_bytecap_c_, (size), _Pre1_impl_(__notnull_impl_notref) _Pre1_impl_(__bytecap_c_impl(size)) _Pre_valid_impl_) +#define _Pre_opt_valid_bytecap_c_(size) _SAL1_1_Source_(_Pre_opt_valid_bytecap_c_, (size), _Pre1_impl_(__maybenull_impl_notref) _Pre1_impl_(__bytecap_c_impl(size)) _Pre_valid_impl_) + +#define _Pre_valid_cap_x_(size) _SAL1_1_Source_(_Pre_valid_cap_x_, (size), _Pre1_impl_(__notnull_impl_notref) _Pre1_impl_(__cap_x_impl(size)) _Pre_valid_impl_) +#define _Pre_opt_valid_cap_x_(size) _SAL1_1_Source_(_Pre_opt_valid_cap_x_, (size), _Pre1_impl_(__maybenull_impl_notref) _Pre1_impl_(__cap_x_impl(size)) _Pre_valid_impl_) +#define _Pre_valid_bytecap_x_(size) _SAL1_1_Source_(_Pre_valid_bytecap_x_, (size), _Pre1_impl_(__notnull_impl_notref) _Pre1_impl_(__bytecap_x_impl(size)) _Pre_valid_impl_) +#define _Pre_opt_valid_bytecap_x_(size) _SAL1_1_Source_(_Pre_opt_valid_bytecap_x_, (size), _Pre1_impl_(__maybenull_impl_notref) _Pre1_impl_(__bytecap_x_impl(size)) _Pre_valid_impl_) + +// e.g. void AppendCharRange( _Pre_count_(cchFrom) const char* rgFrom, size_t cchFrom, _Out_z_cap_(cchTo) char* szTo, size_t cchTo ); +// Valid buffer extent described by another parameter +#define _Pre_count_(size) _SAL1_1_Source_(_Pre_count_, (size), _Pre1_impl_(__notnull_impl_notref) _Pre1_impl_(__count_impl(size)) _Pre_valid_impl_) +#define _Pre_opt_count_(size) _SAL1_1_Source_(_Pre_opt_count_, (size), _Pre1_impl_(__maybenull_impl_notref) _Pre1_impl_(__count_impl(size)) _Pre_valid_impl_) +#define _Pre_bytecount_(size) _SAL1_1_Source_(_Pre_bytecount_, (size), _Pre1_impl_(__notnull_impl_notref) _Pre1_impl_(__bytecount_impl(size)) _Pre_valid_impl_) +#define _Pre_opt_bytecount_(size) _SAL1_1_Source_(_Pre_opt_bytecount_, (size), _Pre1_impl_(__maybenull_impl_notref) _Pre1_impl_(__bytecount_impl(size)) _Pre_valid_impl_) + +// Valid buffer extent described by a constant expression +#define _Pre_count_c_(size) _SAL1_1_Source_(_Pre_count_c_, (size), _Pre1_impl_(__notnull_impl_notref) _Pre1_impl_(__count_c_impl(size)) _Pre_valid_impl_) +#define _Pre_opt_count_c_(size) _SAL1_1_Source_(_Pre_opt_count_c_, (size), _Pre1_impl_(__maybenull_impl_notref) _Pre1_impl_(__count_c_impl(size)) _Pre_valid_impl_) +#define _Pre_bytecount_c_(size) _SAL1_1_Source_(_Pre_bytecount_c_, (size), _Pre1_impl_(__notnull_impl_notref) _Pre1_impl_(__bytecount_c_impl(size)) _Pre_valid_impl_) +#define _Pre_opt_bytecount_c_(size) _SAL1_1_Source_(_Pre_opt_bytecount_c_, (size), _Pre1_impl_(__maybenull_impl_notref) _Pre1_impl_(__bytecount_c_impl(size)) _Pre_valid_impl_) + +// Valid buffer extent described by a complex expression +#define _Pre_count_x_(size) _SAL1_1_Source_(_Pre_count_x_, (size), _Pre1_impl_(__notnull_impl_notref) _Pre1_impl_(__count_x_impl(size)) _Pre_valid_impl_) +#define _Pre_opt_count_x_(size) _SAL1_1_Source_(_Pre_opt_count_x_, (size), _Pre1_impl_(__maybenull_impl_notref) _Pre1_impl_(__count_x_impl(size)) _Pre_valid_impl_) +#define _Pre_bytecount_x_(size) _SAL1_1_Source_(_Pre_bytecount_x_, (size), _Pre1_impl_(__notnull_impl_notref) _Pre1_impl_(__bytecount_x_impl(size)) _Pre_valid_impl_) +#define _Pre_opt_bytecount_x_(size) _SAL1_1_Source_(_Pre_opt_bytecount_x_, (size), _Pre1_impl_(__maybenull_impl_notref) _Pre1_impl_(__bytecount_x_impl(size)) _Pre_valid_impl_) + +// Valid buffer extent described by the difference to another pointer parameter +#define _Pre_ptrdiff_count_(ptr) _SAL1_1_Source_(_Pre_ptrdiff_count_, (ptr), _Pre1_impl_(__notnull_impl_notref) _Pre1_impl_(__count_x_impl(__ptrdiff(ptr))) _Pre_valid_impl_) +#define _Pre_opt_ptrdiff_count_(ptr) _SAL1_1_Source_(_Pre_opt_ptrdiff_count_, (ptr), _Pre1_impl_(__maybenull_impl_notref) _Pre1_impl_(__count_x_impl(__ptrdiff(ptr))) _Pre_valid_impl_) + + +// char * strncpy(_Out_cap_(_Count) _Post_maybez_ char * _Dest, _In_z_ const char * _Source, _In_ size_t _Count) +// buffer maybe zero-terminated after the call +#define _Post_maybez_ _SAL1_1_Source_(_Post_maybez_, (), _Post1_impl_(__maybezterm_impl)) + +// e.g. SIZE_T HeapSize( _In_ HANDLE hHeap, DWORD dwFlags, _Pre_notnull_ _Post_bytecap_(return) LPCVOID lpMem ); +#define _Post_cap_(size) _SAL1_1_Source_(_Post_cap_, (size), _Post1_impl_(__cap_impl(size))) +#define _Post_bytecap_(size) _SAL1_1_Source_(_Post_bytecap_, (size), _Post1_impl_(__bytecap_impl(size))) + +// e.g. int strlen( _In_z_ _Post_count_(return+1) const char* sz ); +#define _Post_count_(size) _SAL1_1_Source_(_Post_count_, (size), _Post1_impl_(__count_impl(size)) _Post_valid_impl_) +#define _Post_bytecount_(size) _SAL1_1_Source_(_Post_bytecount_, (size), _Post1_impl_(__bytecount_impl(size)) _Post_valid_impl_) +#define _Post_count_c_(size) _SAL1_1_Source_(_Post_count_c_, (size), _Post1_impl_(__count_c_impl(size)) _Post_valid_impl_) +#define _Post_bytecount_c_(size) _SAL1_1_Source_(_Post_bytecount_c_, (size), _Post1_impl_(__bytecount_c_impl(size)) _Post_valid_impl_) +#define _Post_count_x_(size) _SAL1_1_Source_(_Post_count_x_, (size), _Post1_impl_(__count_x_impl(size)) _Post_valid_impl_) +#define _Post_bytecount_x_(size) _SAL1_1_Source_(_Post_bytecount_x_, (size), _Post1_impl_(__bytecount_x_impl(size)) _Post_valid_impl_) + +// e.g. size_t CopyStr( _In_z_ const char* szFrom, _Pre_cap_(cch) _Post_z_count_(return+1) char* szFrom, size_t cchFrom ); +#define _Post_z_count_(size) _SAL1_1_Source_(_Post_z_count_, (size), _Post2_impl_(__zterm_impl,__count_impl(size)) _Post_valid_impl_) +#define _Post_z_bytecount_(size) _SAL1_1_Source_(_Post_z_bytecount_, (size), _Post2_impl_(__zterm_impl,__bytecount_impl(size)) _Post_valid_impl_) +#define _Post_z_count_c_(size) _SAL1_1_Source_(_Post_z_count_c_, (size), _Post2_impl_(__zterm_impl,__count_c_impl(size)) _Post_valid_impl_) +#define _Post_z_bytecount_c_(size) _SAL1_1_Source_(_Post_z_bytecount_c_, (size), _Post2_impl_(__zterm_impl,__bytecount_c_impl(size)) _Post_valid_impl_) +#define _Post_z_count_x_(size) _SAL1_1_Source_(_Post_z_count_x_, (size), _Post2_impl_(__zterm_impl,__count_x_impl(size)) _Post_valid_impl_) +#define _Post_z_bytecount_x_(size) _SAL1_1_Source_(_Post_z_bytecount_x_, (size), _Post2_impl_(__zterm_impl,__bytecount_x_impl(size)) _Post_valid_impl_) + +// +// _Prepost_ --- +// +// describing conditions that hold before and after the function call + +#define _Prepost_opt_z_ _SAL1_1_Source_(_Prepost_opt_z_, (), _Pre_opt_z_ _Post_z_) + +#define _Prepost_count_(size) _SAL1_1_Source_(_Prepost_count_, (size), _Pre_count_(size) _Post_count_(size)) +#define _Prepost_opt_count_(size) _SAL1_1_Source_(_Prepost_opt_count_, (size), _Pre_opt_count_(size) _Post_count_(size)) +#define _Prepost_bytecount_(size) _SAL1_1_Source_(_Prepost_bytecount_, (size), _Pre_bytecount_(size) _Post_bytecount_(size)) +#define _Prepost_opt_bytecount_(size) _SAL1_1_Source_(_Prepost_opt_bytecount_, (size), _Pre_opt_bytecount_(size) _Post_bytecount_(size)) +#define _Prepost_count_c_(size) _SAL1_1_Source_(_Prepost_count_c_, (size), _Pre_count_c_(size) _Post_count_c_(size)) +#define _Prepost_opt_count_c_(size) _SAL1_1_Source_(_Prepost_opt_count_c_, (size), _Pre_opt_count_c_(size) _Post_count_c_(size)) +#define _Prepost_bytecount_c_(size) _SAL1_1_Source_(_Prepost_bytecount_c_, (size), _Pre_bytecount_c_(size) _Post_bytecount_c_(size)) +#define _Prepost_opt_bytecount_c_(size) _SAL1_1_Source_(_Prepost_opt_bytecount_c_, (size), _Pre_opt_bytecount_c_(size) _Post_bytecount_c_(size)) +#define _Prepost_count_x_(size) _SAL1_1_Source_(_Prepost_count_x_, (size), _Pre_count_x_(size) _Post_count_x_(size)) +#define _Prepost_opt_count_x_(size) _SAL1_1_Source_(_Prepost_opt_count_x_, (size), _Pre_opt_count_x_(size) _Post_count_x_(size)) +#define _Prepost_bytecount_x_(size) _SAL1_1_Source_(_Prepost_bytecount_x_, (size), _Pre_bytecount_x_(size) _Post_bytecount_x_(size)) +#define _Prepost_opt_bytecount_x_(size) _SAL1_1_Source_(_Prepost_opt_bytecount_x_, (size), _Pre_opt_bytecount_x_(size) _Post_bytecount_x_(size)) + +#define _Prepost_valid_ _SAL1_1_Source_(_Prepost_valid_, (), _Pre_valid_ _Post_valid_) +#define _Prepost_opt_valid_ _SAL1_1_Source_(_Prepost_opt_valid_, (), _Pre_opt_valid_ _Post_valid_) + +// +// _Deref_ --- +// +// short version for _Deref_pre_ _Deref_post_ +// describing conditions for array elements or dereferenced pointer parameters that hold before and after the call + +#define _Deref_prepost_z_ _SAL1_1_Source_(_Deref_prepost_z_, (), _Deref_pre_z_ _Deref_post_z_) +#define _Deref_prepost_opt_z_ _SAL1_1_Source_(_Deref_prepost_opt_z_, (), _Deref_pre_opt_z_ _Deref_post_opt_z_) + +#define _Deref_prepost_cap_(size) _SAL1_1_Source_(_Deref_prepost_cap_, (size), _Deref_pre_cap_(size) _Deref_post_cap_(size)) +#define _Deref_prepost_opt_cap_(size) _SAL1_1_Source_(_Deref_prepost_opt_cap_, (size), _Deref_pre_opt_cap_(size) _Deref_post_opt_cap_(size)) +#define _Deref_prepost_bytecap_(size) _SAL1_1_Source_(_Deref_prepost_bytecap_, (size), _Deref_pre_bytecap_(size) _Deref_post_bytecap_(size)) +#define _Deref_prepost_opt_bytecap_(size) _SAL1_1_Source_(_Deref_prepost_opt_bytecap_, (size), _Deref_pre_opt_bytecap_(size) _Deref_post_opt_bytecap_(size)) + +#define _Deref_prepost_cap_x_(size) _SAL1_1_Source_(_Deref_prepost_cap_x_, (size), _Deref_pre_cap_x_(size) _Deref_post_cap_x_(size)) +#define _Deref_prepost_opt_cap_x_(size) _SAL1_1_Source_(_Deref_prepost_opt_cap_x_, (size), _Deref_pre_opt_cap_x_(size) _Deref_post_opt_cap_x_(size)) +#define _Deref_prepost_bytecap_x_(size) _SAL1_1_Source_(_Deref_prepost_bytecap_x_, (size), _Deref_pre_bytecap_x_(size) _Deref_post_bytecap_x_(size)) +#define _Deref_prepost_opt_bytecap_x_(size) _SAL1_1_Source_(_Deref_prepost_opt_bytecap_x_, (size), _Deref_pre_opt_bytecap_x_(size) _Deref_post_opt_bytecap_x_(size)) + +#define _Deref_prepost_z_cap_(size) _SAL1_1_Source_(_Deref_prepost_z_cap_, (size), _Deref_pre_z_cap_(size) _Deref_post_z_cap_(size)) +#define _Deref_prepost_opt_z_cap_(size) _SAL1_1_Source_(_Deref_prepost_opt_z_cap_, (size), _Deref_pre_opt_z_cap_(size) _Deref_post_opt_z_cap_(size)) +#define _Deref_prepost_z_bytecap_(size) _SAL1_1_Source_(_Deref_prepost_z_bytecap_, (size), _Deref_pre_z_bytecap_(size) _Deref_post_z_bytecap_(size)) +#define _Deref_prepost_opt_z_bytecap_(size) _SAL1_1_Source_(_Deref_prepost_opt_z_bytecap_, (size), _Deref_pre_opt_z_bytecap_(size) _Deref_post_opt_z_bytecap_(size)) + +#define _Deref_prepost_valid_cap_(size) _SAL1_1_Source_(_Deref_prepost_valid_cap_, (size), _Deref_pre_valid_cap_(size) _Deref_post_valid_cap_(size)) +#define _Deref_prepost_opt_valid_cap_(size) _SAL1_1_Source_(_Deref_prepost_opt_valid_cap_, (size), _Deref_pre_opt_valid_cap_(size) _Deref_post_opt_valid_cap_(size)) +#define _Deref_prepost_valid_bytecap_(size) _SAL1_1_Source_(_Deref_prepost_valid_bytecap_, (size), _Deref_pre_valid_bytecap_(size) _Deref_post_valid_bytecap_(size)) +#define _Deref_prepost_opt_valid_bytecap_(size) _SAL1_1_Source_(_Deref_prepost_opt_valid_bytecap_, (size), _Deref_pre_opt_valid_bytecap_(size) _Deref_post_opt_valid_bytecap_(size)) + +#define _Deref_prepost_valid_cap_x_(size) _SAL1_1_Source_(_Deref_prepost_valid_cap_x_, (size), _Deref_pre_valid_cap_x_(size) _Deref_post_valid_cap_x_(size)) +#define _Deref_prepost_opt_valid_cap_x_(size) _SAL1_1_Source_(_Deref_prepost_opt_valid_cap_x_, (size), _Deref_pre_opt_valid_cap_x_(size) _Deref_post_opt_valid_cap_x_(size)) +#define _Deref_prepost_valid_bytecap_x_(size) _SAL1_1_Source_(_Deref_prepost_valid_bytecap_x_, (size), _Deref_pre_valid_bytecap_x_(size) _Deref_post_valid_bytecap_x_(size)) +#define _Deref_prepost_opt_valid_bytecap_x_(size) _SAL1_1_Source_(_Deref_prepost_opt_valid_bytecap_x_, (size), _Deref_pre_opt_valid_bytecap_x_(size) _Deref_post_opt_valid_bytecap_x_(size)) + +#define _Deref_prepost_count_(size) _SAL1_1_Source_(_Deref_prepost_count_, (size), _Deref_pre_count_(size) _Deref_post_count_(size)) +#define _Deref_prepost_opt_count_(size) _SAL1_1_Source_(_Deref_prepost_opt_count_, (size), _Deref_pre_opt_count_(size) _Deref_post_opt_count_(size)) +#define _Deref_prepost_bytecount_(size) _SAL1_1_Source_(_Deref_prepost_bytecount_, (size), _Deref_pre_bytecount_(size) _Deref_post_bytecount_(size)) +#define _Deref_prepost_opt_bytecount_(size) _SAL1_1_Source_(_Deref_prepost_opt_bytecount_, (size), _Deref_pre_opt_bytecount_(size) _Deref_post_opt_bytecount_(size)) + +#define _Deref_prepost_count_x_(size) _SAL1_1_Source_(_Deref_prepost_count_x_, (size), _Deref_pre_count_x_(size) _Deref_post_count_x_(size)) +#define _Deref_prepost_opt_count_x_(size) _SAL1_1_Source_(_Deref_prepost_opt_count_x_, (size), _Deref_pre_opt_count_x_(size) _Deref_post_opt_count_x_(size)) +#define _Deref_prepost_bytecount_x_(size) _SAL1_1_Source_(_Deref_prepost_bytecount_x_, (size), _Deref_pre_bytecount_x_(size) _Deref_post_bytecount_x_(size)) +#define _Deref_prepost_opt_bytecount_x_(size) _SAL1_1_Source_(_Deref_prepost_opt_bytecount_x_, (size), _Deref_pre_opt_bytecount_x_(size) _Deref_post_opt_bytecount_x_(size)) + +#define _Deref_prepost_valid_ _SAL1_1_Source_(_Deref_prepost_valid_, (), _Deref_pre_valid_ _Deref_post_valid_) +#define _Deref_prepost_opt_valid_ _SAL1_1_Source_(_Deref_prepost_opt_valid_, (), _Deref_pre_opt_valid_ _Deref_post_opt_valid_) + +// +// _Deref_ +// +// used with references to arrays + +#define _Deref_out_z_cap_c_(size) _SAL1_1_Source_(_Deref_out_z_cap_c_, (size), _Deref_pre_cap_c_(size) _Deref_post_z_) +#define _Deref_inout_z_cap_c_(size) _SAL1_1_Source_(_Deref_inout_z_cap_c_, (size), _Deref_pre_z_cap_c_(size) _Deref_post_z_) +#define _Deref_out_z_bytecap_c_(size) _SAL1_1_Source_(_Deref_out_z_bytecap_c_, (size), _Deref_pre_bytecap_c_(size) _Deref_post_z_) +#define _Deref_inout_z_bytecap_c_(size) _SAL1_1_Source_(_Deref_inout_z_bytecap_c_, (size), _Deref_pre_z_bytecap_c_(size) _Deref_post_z_) +#define _Deref_inout_z_ _SAL1_1_Source_(_Deref_inout_z_, (), _Deref_prepost_z_) + +// #pragma endregion Input Buffer SAL 1 compatibility macros + + +//============================================================================ +// Implementation Layer: +//============================================================================ + + +// Naming conventions: +// A symbol the begins with _SA_ is for the machinery of creating any +// annotations; many of those come from sourceannotations.h in the case +// of attributes. + +// A symbol that ends with _impl is the very lowest level macro. It is +// not required to be a legal standalone annotation, and in the case +// of attribute annotations, usually is not. (In the case of some declspec +// annotations, it might be, but it should not be assumed so.) Those +// symols will be used in the _PreN..., _PostN... and _RetN... annotations +// to build up more complete annotations. + +// A symbol ending in _impl_ is reserved to the implementation as well, +// but it does form a complete annotation; usually they are used to build +// up even higher level annotations. + + +#if _USE_ATTRIBUTES_FOR_SAL || _USE_DECLSPECS_FOR_SAL // [ +// Sharable "_impl" macros: these can be shared between the various annotation +// forms but are part of the implementation of the macros. These are collected +// here to assure that only necessary differences in the annotations +// exist. + +#define _Always_impl_(annos) _Group_(annos _SAL_nop_impl_) _On_failure_impl_(annos _SAL_nop_impl_) +#define _Bound_impl_ _SA_annotes0(SAL_bound) +#define _Field_range_impl_(min,max) _Range_impl_(min,max) +#define _Literal_impl_ _SA_annotes1(SAL_constant, __yes) +#define _Maybenull_impl_ _SA_annotes1(SAL_null, __maybe) +#define _Maybevalid_impl_ _SA_annotes1(SAL_valid, __maybe) +#define _Must_inspect_impl_ _Post_impl_ _SA_annotes0(SAL_mustInspect) +#define _Notliteral_impl_ _SA_annotes1(SAL_constant, __no) +#define _Notnull_impl_ _SA_annotes1(SAL_null, __no) +#define _Notvalid_impl_ _SA_annotes1(SAL_valid, __no) +#define _NullNull_terminated_impl_ _Group_(_SA_annotes1(SAL_nullTerminated, __yes) _SA_annotes1(SAL_readableTo,inexpressibleCount("NullNull terminated string"))) +#define _Null_impl_ _SA_annotes1(SAL_null, __yes) +#define _Null_terminated_impl_ _SA_annotes1(SAL_nullTerminated, __yes) +#define _Out_impl_ _Pre1_impl_(__notnull_impl_notref) _Pre1_impl_(__cap_c_one_notref_impl) _Post_valid_impl_ +#define _Out_opt_impl_ _Pre1_impl_(__maybenull_impl_notref) _Pre1_impl_(__cap_c_one_notref_impl) _Post_valid_impl_ +#define _Points_to_data_impl_ _At_(*_Curr_, _SA_annotes1(SAL_mayBePointer, __no)) +#define _Post_satisfies_impl_(cond) _Post_impl_ _Satisfies_impl_(cond) +#define _Post_valid_impl_ _Post1_impl_(__valid_impl) +#define _Pre_satisfies_impl_(cond) _Pre_impl_ _Satisfies_impl_(cond) +#define _Pre_valid_impl_ _Pre1_impl_(__valid_impl) +#define _Range_impl_(min,max) _SA_annotes2(SAL_range, min, max) +#define _Readable_bytes_impl_(size) _SA_annotes1(SAL_readableTo, byteCount(size)) +#define _Readable_elements_impl_(size) _SA_annotes1(SAL_readableTo, elementCount(size)) +#define _Ret_valid_impl_ _Ret1_impl_(__valid_impl) +#define _Satisfies_impl_(cond) _SA_annotes1(SAL_satisfies, cond) +#define _Valid_impl_ _SA_annotes1(SAL_valid, __yes) +#define _Writable_bytes_impl_(size) _SA_annotes1(SAL_writableTo, byteCount(size)) +#define _Writable_elements_impl_(size) _SA_annotes1(SAL_writableTo, elementCount(size)) + +#define _In_range_impl_(min,max) _Pre_impl_ _Range_impl_(min,max) +#define _Out_range_impl_(min,max) _Post_impl_ _Range_impl_(min,max) +#define _Ret_range_impl_(min,max) _Post_impl_ _Range_impl_(min,max) +#define _Deref_in_range_impl_(min,max) _Deref_pre_impl_ _Range_impl_(min,max) +#define _Deref_out_range_impl_(min,max) _Deref_post_impl_ _Range_impl_(min,max) +#define _Deref_ret_range_impl_(min,max) _Deref_post_impl_ _Range_impl_(min,max) + +#define _Deref_pre_impl_ _Pre_impl_ _Notref_impl_ _Deref_impl_ +#define _Deref_post_impl_ _Post_impl_ _Notref_impl_ _Deref_impl_ + +// The following are for the implementation machinery, and are not +// suitable for annotating general code. +// We're tying to phase this out, someday. The parser quotes the param. +#define __AuToQuOtE _SA_annotes0(SAL_AuToQuOtE) + +// Normally the parser does some simple type checking of annotation params, +// defer that check to the plugin. +#define __deferTypecheck _SA_annotes0(SAL_deferTypecheck) + +#define _SA_SPECSTRIZE( x ) #x +#define _SAL_nop_impl_ /* nothing */ +#define __nop_impl(x) x +#endif + + +#if _USE_ATTRIBUTES_FOR_SAL // [ + +// Using attributes for sal + +#include "codeanalysis\sourceannotations.h" + + +#define _SA_annotes0(n) [SAL_annotes(Name=#n)] +#define _SA_annotes1(n,pp1) [SAL_annotes(Name=#n, p1=_SA_SPECSTRIZE(pp1))] +#define _SA_annotes2(n,pp1,pp2) [SAL_annotes(Name=#n, p1=_SA_SPECSTRIZE(pp1), p2=_SA_SPECSTRIZE(pp2))] +#define _SA_annotes3(n,pp1,pp2,pp3) [SAL_annotes(Name=#n, p1=_SA_SPECSTRIZE(pp1), p2=_SA_SPECSTRIZE(pp2), p3=_SA_SPECSTRIZE(pp3))] + +#define _Pre_impl_ [SAL_pre] +#define _Post_impl_ [SAL_post] +#define _Deref_impl_ [SAL_deref] +#define _Notref_impl_ [SAL_notref] + + +// Declare a function to be an annotation or primop (respectively). +// Done this way so that they don't appear in the regular compiler's +// namespace. +#define __ANNOTATION(fun) _SA_annotes0(SAL_annotation) void __SA_##fun; +#define __PRIMOP(type, fun) _SA_annotes0(SAL_primop) type __SA_##fun; +#define __QUALIFIER(fun) _SA_annotes0(SAL_qualifier) void __SA_##fun; + +// Benign declspec needed here for WindowsPREfast +#define __In_impl_ [SA_Pre(Valid=SA_Yes)] [SA_Pre(Deref=1, Notref=1, Access=SA_Read)] __declspec("SAL_pre SAL_valid") + +#elif _USE_DECLSPECS_FOR_SAL // ][ + +// Using declspecs for sal + +#define _SA_annotes0(n) __declspec(#n) +#define _SA_annotes1(n,pp1) __declspec(#n "(" _SA_SPECSTRIZE(pp1) ")" ) +#define _SA_annotes2(n,pp1,pp2) __declspec(#n "(" _SA_SPECSTRIZE(pp1) "," _SA_SPECSTRIZE(pp2) ")") +#define _SA_annotes3(n,pp1,pp2,pp3) __declspec(#n "(" _SA_SPECSTRIZE(pp1) "," _SA_SPECSTRIZE(pp2) "," _SA_SPECSTRIZE(pp3) ")") + +#define _Pre_impl_ _SA_annotes0(SAL_pre) +#define _Post_impl_ _SA_annotes0(SAL_post) +#define _Deref_impl_ _SA_annotes0(SAL_deref) +#define _Notref_impl_ _SA_annotes0(SAL_notref) + +// Declare a function to be an annotation or primop (respectively). +// Done this way so that they don't appear in the regular compiler's +// namespace. +#define __ANNOTATION(fun) _SA_annotes0(SAL_annotation) void __SA_##fun + +#define __PRIMOP(type, fun) _SA_annotes0(SAL_primop) type __SA_##fun + +#define __QUALIFIER(fun) _SA_annotes0(SAL_qualifier) void __SA_##fun; + +#define __In_impl_ _Pre_impl_ _SA_annotes0(SAL_valid) _Pre_impl_ _Deref_impl_ _Notref_impl_ _SA_annotes0(SAL_readonly) + +#else // ][ + +// Using "nothing" for sal + +#define _SA_annotes0(n) +#define _SA_annotes1(n,pp1) +#define _SA_annotes2(n,pp1,pp2) +#define _SA_annotes3(n,pp1,pp2,pp3) + +#define __ANNOTATION(fun) +#define __PRIMOP(type, fun) +#define __QUALIFIER(type, fun) + +#endif // ] + +#if _USE_ATTRIBUTES_FOR_SAL || _USE_DECLSPECS_FOR_SAL // [ + +// Declare annotations that need to be declared. +__ANNOTATION(SAL_useHeader(void)); +__ANNOTATION(SAL_bound(void)); +__ANNOTATION(SAL_allocator(void)); //??? resolve with PFD +__ANNOTATION(SAL_file_parser(__AuToQuOtE __In_impl_ char *, __In_impl_ char *)); +__ANNOTATION(SAL_source_code_content(__In_impl_ char *)); +__ANNOTATION(SAL_analysisHint(__AuToQuOtE __In_impl_ char *)); +__ANNOTATION(SAL_untrusted_data_source(__AuToQuOtE __In_impl_ char *)); +__ANNOTATION(SAL_untrusted_data_source_this(__AuToQuOtE __In_impl_ char *)); +__ANNOTATION(SAL_validated(__AuToQuOtE __In_impl_ char *)); +__ANNOTATION(SAL_validated_this(__AuToQuOtE __In_impl_ char *)); +__ANNOTATION(SAL_encoded(void)); +__ANNOTATION(SAL_adt(__AuToQuOtE __In_impl_ char *, __AuToQuOtE __In_impl_ char *)); +__ANNOTATION(SAL_add_adt_property(__AuToQuOtE __In_impl_ char *, __AuToQuOtE __In_impl_ char *)); +__ANNOTATION(SAL_remove_adt_property(__AuToQuOtE __In_impl_ char *, __AuToQuOtE __In_impl_ char *)); +__ANNOTATION(SAL_transfer_adt_property_from(__AuToQuOtE __In_impl_ char *)); +__ANNOTATION(SAL_post_type(__AuToQuOtE __In_impl_ char *)); +__ANNOTATION(SAL_volatile(void)); +__ANNOTATION(SAL_nonvolatile(void)); +__ANNOTATION(SAL_entrypoint(__AuToQuOtE __In_impl_ char *, __AuToQuOtE __In_impl_ char *)); +__ANNOTATION(SAL_blocksOn(__In_impl_ void*)); +__ANNOTATION(SAL_mustInspect(void)); + +// Only appears in model files, but needs to be declared. +__ANNOTATION(SAL_TypeName(__AuToQuOtE __In_impl_ char *)); + +// To be declared well-known soon. +__ANNOTATION(SAL_interlocked(void);) + +#pragma warning (suppress: 28227 28241) +__ANNOTATION(SAL_name(__In_impl_ char *, __In_impl_ char *, __In_impl_ char *);) + +__PRIMOP(char *, _Macro_value_(__In_impl_ char *)); +__PRIMOP(int, _Macro_defined_(__In_impl_ char *)); +__PRIMOP(char *, _Strstr_(__In_impl_ char *, __In_impl_ char *)); + +#endif // ] + +#if _USE_ATTRIBUTES_FOR_SAL // [ + +#define _Check_return_impl_ [SA_Post(MustCheck=SA_Yes)] + +#define _Success_impl_(expr) [SA_Success(Condition=#expr)] +#define _On_failure_impl_(annos) [SAL_context(p1="SAL_failed")] _Group_(_Post_impl_ _Group_(annos _SAL_nop_impl_)) + +#define _Printf_format_string_impl_ [SA_FormatString(Style="printf")] +#define _Scanf_format_string_impl_ [SA_FormatString(Style="scanf")] +#define _Scanf_s_format_string_impl_ [SA_FormatString(Style="scanf_s")] + +#define _In_bound_impl_ [SA_PreBound(Deref=0)] +#define _Out_bound_impl_ [SA_PostBound(Deref=0)] +#define _Ret_bound_impl_ [SA_PostBound(Deref=0)] +#define _Deref_in_bound_impl_ [SA_PreBound(Deref=1)] +#define _Deref_out_bound_impl_ [SA_PostBound(Deref=1)] +#define _Deref_ret_bound_impl_ [SA_PostBound(Deref=1)] + +#define __valid_impl Valid=SA_Yes +#define __maybevalid_impl Valid=SA_Maybe +#define __notvalid_impl Valid=SA_No + +#define __null_impl Null=SA_Yes +#define __maybenull_impl Null=SA_Maybe +#define __notnull_impl Null=SA_No + +#define __null_impl_notref Null=SA_Yes,Notref=1 +#define __maybenull_impl_notref Null=SA_Maybe,Notref=1 +#define __notnull_impl_notref Null=SA_No,Notref=1 + +#define __zterm_impl NullTerminated=SA_Yes +#define __maybezterm_impl NullTerminated=SA_Maybe +#define __maybzterm_impl NullTerminated=SA_Maybe +#define __notzterm_impl NullTerminated=SA_No + +#define __readaccess_impl Access=SA_Read +#define __writeaccess_impl Access=SA_Write +#define __allaccess_impl Access=SA_ReadWrite + +#define __readaccess_impl_notref Access=SA_Read,Notref=1 +#define __writeaccess_impl_notref Access=SA_Write,Notref=1 +#define __allaccess_impl_notref Access=SA_ReadWrite,Notref=1 + +#if _MSC_VER >= 1610 /*IFSTRIP=IGN*/ // [ + +// For SAL2, we need to expect general expressions. + +#define __cap_impl(size) WritableElements="\n"#size +#define __bytecap_impl(size) WritableBytes="\n"#size +#define __bytecount_impl(size) ValidBytes="\n"#size +#define __count_impl(size) ValidElements="\n"#size + +#else // ][ + +#define __cap_impl(size) WritableElements=#size +#define __bytecap_impl(size) WritableBytes=#size +#define __bytecount_impl(size) ValidBytes=#size +#define __count_impl(size) ValidElements=#size + +#endif // ] + +#define __cap_c_impl(size) WritableElementsConst=size +#define __cap_c_one_notref_impl WritableElementsConst=1,Notref=1 +#define __cap_for_impl(param) WritableElementsLength=#param +#define __cap_x_impl(size) WritableElements="\n@"#size + +#define __bytecap_c_impl(size) WritableBytesConst=size +#define __bytecap_x_impl(size) WritableBytes="\n@"#size + +#define __mult_impl(mult,size) __cap_impl((mult)*(size)) + +#define __count_c_impl(size) ValidElementsConst=size +#define __count_x_impl(size) ValidElements="\n@"#size + +#define __bytecount_c_impl(size) ValidBytesConst=size +#define __bytecount_x_impl(size) ValidBytes="\n@"#size + + +#define _At_impl_(target, annos) [SAL_at(p1=#target)] _Group_(annos) +#define _At_buffer_impl_(target, iter, bound, annos) [SAL_at_buffer(p1=#target, p2=#iter, p3=#bound)] _Group_(annos) +#define _When_impl_(expr, annos) [SAL_when(p1=#expr)] _Group_(annos) + +#define _Group_impl_(annos) [SAL_begin] annos [SAL_end] +#define _GrouP_impl_(annos) [SAL_BEGIN] annos [SAL_END] + +#define _Use_decl_anno_impl_ _SA_annotes0(SAL_useHeader) // this is a special case! + +#define _Pre1_impl_(p1) [SA_Pre(p1)] +#define _Pre2_impl_(p1,p2) [SA_Pre(p1,p2)] +#define _Pre3_impl_(p1,p2,p3) [SA_Pre(p1,p2,p3)] + +#define _Post1_impl_(p1) [SA_Post(p1)] +#define _Post2_impl_(p1,p2) [SA_Post(p1,p2)] +#define _Post3_impl_(p1,p2,p3) [SA_Post(p1,p2,p3)] + +#define _Ret1_impl_(p1) [SA_Post(p1)] +#define _Ret2_impl_(p1,p2) [SA_Post(p1,p2)] +#define _Ret3_impl_(p1,p2,p3) [SA_Post(p1,p2,p3)] + +#define _Deref_pre1_impl_(p1) [SA_Pre(Deref=1,p1)] +#define _Deref_pre2_impl_(p1,p2) [SA_Pre(Deref=1,p1,p2)] +#define _Deref_pre3_impl_(p1,p2,p3) [SA_Pre(Deref=1,p1,p2,p3)] + + +#define _Deref_post1_impl_(p1) [SA_Post(Deref=1,p1)] +#define _Deref_post2_impl_(p1,p2) [SA_Post(Deref=1,p1,p2)] +#define _Deref_post3_impl_(p1,p2,p3) [SA_Post(Deref=1,p1,p2,p3)] + +#define _Deref_ret1_impl_(p1) [SA_Post(Deref=1,p1)] +#define _Deref_ret2_impl_(p1,p2) [SA_Post(Deref=1,p1,p2)] +#define _Deref_ret3_impl_(p1,p2,p3) [SA_Post(Deref=1,p1,p2,p3)] + +#define _Deref2_pre1_impl_(p1) [SA_Pre(Deref=2,Notref=1,p1)] +#define _Deref2_post1_impl_(p1) [SA_Post(Deref=2,Notref=1,p1)] +#define _Deref2_ret1_impl_(p1) [SA_Post(Deref=2,Notref=1,p1)] + +// Obsolete -- may be needed for transition to attributes. +#define __inner_typefix(ctype) [SAL_typefix(p1=_SA_SPECSTRIZE(ctype))] +#define __inner_exceptthat [SAL_except] + + +#elif _USE_DECLSPECS_FOR_SAL // ][ + +#define _Check_return_impl_ __post _SA_annotes0(SAL_checkReturn) + +#define _Success_impl_(expr) _SA_annotes1(SAL_success, expr) +#define _On_failure_impl_(annos) _SA_annotes1(SAL_context, SAL_failed) _Group_(_Post_impl_ _Group_(_SAL_nop_impl_ annos)) + +#define _Printf_format_string_impl_ _SA_annotes1(SAL_IsFormatString, "printf") +#define _Scanf_format_string_impl_ _SA_annotes1(SAL_IsFormatString, "scanf") +#define _Scanf_s_format_string_impl_ _SA_annotes1(SAL_IsFormatString, "scanf_s") + +#define _In_bound_impl_ _Pre_impl_ _Bound_impl_ +#define _Out_bound_impl_ _Post_impl_ _Bound_impl_ +#define _Ret_bound_impl_ _Post_impl_ _Bound_impl_ +#define _Deref_in_bound_impl_ _Deref_pre_impl_ _Bound_impl_ +#define _Deref_out_bound_impl_ _Deref_post_impl_ _Bound_impl_ +#define _Deref_ret_bound_impl_ _Deref_post_impl_ _Bound_impl_ + + +#define __null_impl _SA_annotes0(SAL_null) // _SA_annotes1(SAL_null, __yes) +#define __notnull_impl _SA_annotes0(SAL_notnull) // _SA_annotes1(SAL_null, __no) +#define __maybenull_impl _SA_annotes0(SAL_maybenull) // _SA_annotes1(SAL_null, __maybe) + +#define __valid_impl _SA_annotes0(SAL_valid) // _SA_annotes1(SAL_valid, __yes) +#define __notvalid_impl _SA_annotes0(SAL_notvalid) // _SA_annotes1(SAL_valid, __no) +#define __maybevalid_impl _SA_annotes0(SAL_maybevalid) // _SA_annotes1(SAL_valid, __maybe) + +#define __null_impl_notref _Notref_ _Null_impl_ +#define __maybenull_impl_notref _Notref_ _Maybenull_impl_ +#define __notnull_impl_notref _Notref_ _Notnull_impl_ + +#define __zterm_impl _SA_annotes1(SAL_nullTerminated, __yes) +#define __maybezterm_impl _SA_annotes1(SAL_nullTerminated, __maybe) +#define __maybzterm_impl _SA_annotes1(SAL_nullTerminated, __maybe) +#define __notzterm_impl _SA_annotes1(SAL_nullTerminated, __no) + +#define __readaccess_impl _SA_annotes1(SAL_access, 0x1) +#define __writeaccess_impl _SA_annotes1(SAL_access, 0x2) +#define __allaccess_impl _SA_annotes1(SAL_access, 0x3) + +#define __readaccess_impl_notref _Notref_ _SA_annotes1(SAL_access, 0x1) +#define __writeaccess_impl_notref _Notref_ _SA_annotes1(SAL_access, 0x2) +#define __allaccess_impl_notref _Notref_ _SA_annotes1(SAL_access, 0x3) + +#define __cap_impl(size) _SA_annotes1(SAL_writableTo,elementCount(size)) +#define __cap_c_impl(size) _SA_annotes1(SAL_writableTo,elementCount(size)) +#define __cap_c_one_notref_impl _Notref_ _SA_annotes1(SAL_writableTo,elementCount(1)) +#define __cap_for_impl(param) _SA_annotes1(SAL_writableTo,inexpressibleCount(sizeof(param))) +#define __cap_x_impl(size) _SA_annotes1(SAL_writableTo,inexpressibleCount(#size)) + +#define __bytecap_impl(size) _SA_annotes1(SAL_writableTo,byteCount(size)) +#define __bytecap_c_impl(size) _SA_annotes1(SAL_writableTo,byteCount(size)) +#define __bytecap_x_impl(size) _SA_annotes1(SAL_writableTo,inexpressibleCount(#size)) + +#define __mult_impl(mult,size) _SA_annotes1(SAL_writableTo,(mult)*(size)) + +#define __count_impl(size) _SA_annotes1(SAL_readableTo,elementCount(size)) +#define __count_c_impl(size) _SA_annotes1(SAL_readableTo,elementCount(size)) +#define __count_x_impl(size) _SA_annotes1(SAL_readableTo,inexpressibleCount(#size)) + +#define __bytecount_impl(size) _SA_annotes1(SAL_readableTo,byteCount(size)) +#define __bytecount_c_impl(size) _SA_annotes1(SAL_readableTo,byteCount(size)) +#define __bytecount_x_impl(size) _SA_annotes1(SAL_readableTo,inexpressibleCount(#size)) + +#define _At_impl_(target, annos) _SA_annotes0(SAL_at(target)) _Group_(annos) +#define _At_buffer_impl_(target, iter, bound, annos) _SA_annotes3(SAL_at_buffer, target, iter, bound) _Group_(annos) +#define _Group_impl_(annos) _SA_annotes0(SAL_begin) annos _SA_annotes0(SAL_end) +#define _GrouP_impl_(annos) _SA_annotes0(SAL_BEGIN) annos _SA_annotes0(SAL_END) +#define _When_impl_(expr, annos) _SA_annotes0(SAL_when(expr)) _Group_(annos) + +#define _Use_decl_anno_impl_ __declspec("SAL_useHeader()") // this is a special case! + +#define _Pre1_impl_(p1) _Pre_impl_ p1 +#define _Pre2_impl_(p1,p2) _Pre_impl_ p1 _Pre_impl_ p2 +#define _Pre3_impl_(p1,p2,p3) _Pre_impl_ p1 _Pre_impl_ p2 _Pre_impl_ p3 + +#define _Post1_impl_(p1) _Post_impl_ p1 +#define _Post2_impl_(p1,p2) _Post_impl_ p1 _Post_impl_ p2 +#define _Post3_impl_(p1,p2,p3) _Post_impl_ p1 _Post_impl_ p2 _Post_impl_ p3 + +#define _Ret1_impl_(p1) _Post_impl_ p1 +#define _Ret2_impl_(p1,p2) _Post_impl_ p1 _Post_impl_ p2 +#define _Ret3_impl_(p1,p2,p3) _Post_impl_ p1 _Post_impl_ p2 _Post_impl_ p3 + +#define _Deref_pre1_impl_(p1) _Deref_pre_impl_ p1 +#define _Deref_pre2_impl_(p1,p2) _Deref_pre_impl_ p1 _Deref_pre_impl_ p2 +#define _Deref_pre3_impl_(p1,p2,p3) _Deref_pre_impl_ p1 _Deref_pre_impl_ p2 _Deref_pre_impl_ p3 + +#define _Deref_post1_impl_(p1) _Deref_post_impl_ p1 +#define _Deref_post2_impl_(p1,p2) _Deref_post_impl_ p1 _Deref_post_impl_ p2 +#define _Deref_post3_impl_(p1,p2,p3) _Deref_post_impl_ p1 _Deref_post_impl_ p2 _Deref_post_impl_ p3 + +#define _Deref_ret1_impl_(p1) _Deref_post_impl_ p1 +#define _Deref_ret2_impl_(p1,p2) _Deref_post_impl_ p1 _Deref_post_impl_ p2 +#define _Deref_ret3_impl_(p1,p2,p3) _Deref_post_impl_ p1 _Deref_post_impl_ p2 _Deref_post_impl_ p3 + +#define _Deref2_pre1_impl_(p1) _Deref_pre_impl_ _Notref_impl_ _Deref_impl_ p1 +#define _Deref2_post1_impl_(p1) _Deref_post_impl_ _Notref_impl_ _Deref_impl_ p1 +#define _Deref2_ret1_impl_(p1) _Deref_post_impl_ _Notref_impl_ _Deref_impl_ p1 + +#define __inner_typefix(ctype) _SA_annotes1(SAL_typefix, ctype) +#define __inner_exceptthat _SA_annotes0(SAL_except) + +#elif defined(_MSC_EXTENSIONS) && !defined( MIDL_PASS ) && !defined(__midl) && !defined(RC_INVOKED) && defined(_PFT_VER) && _MSC_VER >= 1400 /*IFSTRIP=IGN*/ // ][ + +// minimum attribute expansion for foreground build + +#pragma push_macro( "SA" ) +#pragma push_macro( "REPEATABLE" ) + +#ifdef __cplusplus // [ +#define SA( id ) id +#define REPEATABLE [repeatable] +#else // !__cplusplus // ][ +#define SA( id ) SA_##id +#define REPEATABLE +#endif // !__cplusplus // ] + +REPEATABLE +[source_annotation_attribute( SA( Parameter ) )] +struct __P_impl +{ +#ifdef __cplusplus // [ + __P_impl(); +#endif // ] + int __d_; +}; +typedef struct __P_impl __P_impl; + +REPEATABLE +[source_annotation_attribute( SA( ReturnValue ) )] +struct __R_impl +{ +#ifdef __cplusplus // [ + __R_impl(); +#endif // ] + int __d_; +}; +typedef struct __R_impl __R_impl; + +[source_annotation_attribute( SA( Method ) )] +struct __M_ +{ +#ifdef __cplusplus // [ + __M_(); +#endif // ] + int __d_; +}; +typedef struct __M_ __M_; + +[source_annotation_attribute( SA( All ) )] +struct __A_ +{ +#ifdef __cplusplus // [ + __A_(); +#endif // ] + int __d_; +}; +typedef struct __A_ __A_; + +[source_annotation_attribute( SA( Field ) )] +struct __F_ +{ +#ifdef __cplusplus // [ + __F_(); +#endif // ] + int __d_; +}; +typedef struct __F_ __F_; + +#pragma pop_macro( "REPEATABLE" ) +#pragma pop_macro( "SA" ) + + +#define _SAL_nop_impl_ + +#define _At_impl_(target, annos) [__A_(__d_=0)] +#define _At_buffer_impl_(target, iter, bound, annos) [__A_(__d_=0)] +#define _When_impl_(expr, annos) annos +#define _Group_impl_(annos) annos +#define _GrouP_impl_(annos) annos +#define _Use_decl_anno_impl_ [__M_(__d_=0)] + +#define _Points_to_data_impl_ [__P_impl(__d_=0)] +#define _Literal_impl_ [__P_impl(__d_=0)] +#define _Notliteral_impl_ [__P_impl(__d_=0)] + +#define _Pre_valid_impl_ [__P_impl(__d_=0)] +#define _Post_valid_impl_ [__P_impl(__d_=0)] +#define _Ret_valid_impl_ [__R_impl(__d_=0)] + +#define _Check_return_impl_ [__R_impl(__d_=0)] +#define _Must_inspect_impl_ [__R_impl(__d_=0)] + +#define _Success_impl_(expr) [__M_(__d_=0)] +#define _On_failure_impl_(expr) [__M_(__d_=0)] +#define _Always_impl_(expr) [__M_(__d_=0)] + +#define _Printf_format_string_impl_ [__P_impl(__d_=0)] +#define _Scanf_format_string_impl_ [__P_impl(__d_=0)] +#define _Scanf_s_format_string_impl_ [__P_impl(__d_=0)] + +#define _Raises_SEH_exception_impl_ [__M_(__d_=0)] +#define _Maybe_raises_SEH_exception_impl_ [__M_(__d_=0)] + +#define _In_bound_impl_ [__P_impl(__d_=0)] +#define _Out_bound_impl_ [__P_impl(__d_=0)] +#define _Ret_bound_impl_ [__R_impl(__d_=0)] +#define _Deref_in_bound_impl_ [__P_impl(__d_=0)] +#define _Deref_out_bound_impl_ [__P_impl(__d_=0)] +#define _Deref_ret_bound_impl_ [__R_impl(__d_=0)] + +#define _Range_impl_(min,max) [__P_impl(__d_=0)] +#define _In_range_impl_(min,max) [__P_impl(__d_=0)] +#define _Out_range_impl_(min,max) [__P_impl(__d_=0)] +#define _Ret_range_impl_(min,max) [__R_impl(__d_=0)] +#define _Deref_in_range_impl_(min,max) [__P_impl(__d_=0)] +#define _Deref_out_range_impl_(min,max) [__P_impl(__d_=0)] +#define _Deref_ret_range_impl_(min,max) [__R_impl(__d_=0)] + +#define _Field_range_impl_(min,max) [__F_(__d_=0)] + +#define _Pre_satisfies_impl_(cond) [__A_(__d_=0)] +#define _Post_satisfies_impl_(cond) [__A_(__d_=0)] +#define _Satisfies_impl_(cond) [__A_(__d_=0)] + +#define _Null_impl_ [__A_(__d_=0)] +#define _Notnull_impl_ [__A_(__d_=0)] +#define _Maybenull_impl_ [__A_(__d_=0)] + +#define _Valid_impl_ [__A_(__d_=0)] +#define _Notvalid_impl_ [__A_(__d_=0)] +#define _Maybevalid_impl_ [__A_(__d_=0)] + +#define _Readable_bytes_impl_(size) [__A_(__d_=0)] +#define _Readable_elements_impl_(size) [__A_(__d_=0)] +#define _Writable_bytes_impl_(size) [__A_(__d_=0)] +#define _Writable_elements_impl_(size) [__A_(__d_=0)] + +#define _Null_terminated_impl_ [__A_(__d_=0)] +#define _NullNull_terminated_impl_ [__A_(__d_=0)] + +#define _Pre_impl_ [__P_impl(__d_=0)] +#define _Pre1_impl_(p1) [__P_impl(__d_=0)] +#define _Pre2_impl_(p1,p2) [__P_impl(__d_=0)] +#define _Pre3_impl_(p1,p2,p3) [__P_impl(__d_=0)] + +#define _Post_impl_ [__P_impl(__d_=0)] +#define _Post1_impl_(p1) [__P_impl(__d_=0)] +#define _Post2_impl_(p1,p2) [__P_impl(__d_=0)] +#define _Post3_impl_(p1,p2,p3) [__P_impl(__d_=0)] + +#define _Ret1_impl_(p1) [__R_impl(__d_=0)] +#define _Ret2_impl_(p1,p2) [__R_impl(__d_=0)] +#define _Ret3_impl_(p1,p2,p3) [__R_impl(__d_=0)] + +#define _Deref_pre1_impl_(p1) [__P_impl(__d_=0)] +#define _Deref_pre2_impl_(p1,p2) [__P_impl(__d_=0)] +#define _Deref_pre3_impl_(p1,p2,p3) [__P_impl(__d_=0)] + +#define _Deref_post1_impl_(p1) [__P_impl(__d_=0)] +#define _Deref_post2_impl_(p1,p2) [__P_impl(__d_=0)] +#define _Deref_post3_impl_(p1,p2,p3) [__P_impl(__d_=0)] + +#define _Deref_ret1_impl_(p1) [__R_impl(__d_=0)] +#define _Deref_ret2_impl_(p1,p2) [__R_impl(__d_=0)] +#define _Deref_ret3_impl_(p1,p2,p3) [__R_impl(__d_=0)] + +#define _Deref2_pre1_impl_(p1) //[__P_impl(__d_=0)] +#define _Deref2_post1_impl_(p1) //[__P_impl(__d_=0)] +#define _Deref2_ret1_impl_(p1) //[__P_impl(__d_=0)] + +#else // ][ + + +#define _SAL_nop_impl_ X + +#define _At_impl_(target, annos) +#define _When_impl_(expr, annos) +#define _Group_impl_(annos) +#define _GrouP_impl_(annos) +#define _At_buffer_impl_(target, iter, bound, annos) +#define _Use_decl_anno_impl_ +#define _Points_to_data_impl_ +#define _Literal_impl_ +#define _Notliteral_impl_ +#define _Notref_impl_ + +#define _Pre_valid_impl_ +#define _Post_valid_impl_ +#define _Ret_valid_impl_ + +#define _Check_return_impl_ +#define _Must_inspect_impl_ + +#define _Success_impl_(expr) +#define _On_failure_impl_(annos) +#define _Always_impl_(annos) + +#define _Printf_format_string_impl_ +#define _Scanf_format_string_impl_ +#define _Scanf_s_format_string_impl_ + +#define _In_bound_impl_ +#define _Out_bound_impl_ +#define _Ret_bound_impl_ +#define _Deref_in_bound_impl_ +#define _Deref_out_bound_impl_ +#define _Deref_ret_bound_impl_ + +#define _Range_impl_(min,max) +#define _In_range_impl_(min,max) +#define _Out_range_impl_(min,max) +#define _Ret_range_impl_(min,max) +#define _Deref_in_range_impl_(min,max) +#define _Deref_out_range_impl_(min,max) +#define _Deref_ret_range_impl_(min,max) + +#define _Satisfies_impl_(expr) +#define _Pre_satisfies_impl_(expr) +#define _Post_satisfies_impl_(expr) + +#define _Null_impl_ +#define _Notnull_impl_ +#define _Maybenull_impl_ + +#define _Valid_impl_ +#define _Notvalid_impl_ +#define _Maybevalid_impl_ + +#define _Field_range_impl_(min,max) + +#define _Pre_impl_ +#define _Pre1_impl_(p1) +#define _Pre2_impl_(p1,p2) +#define _Pre3_impl_(p1,p2,p3) + +#define _Post_impl_ +#define _Post1_impl_(p1) +#define _Post2_impl_(p1,p2) +#define _Post3_impl_(p1,p2,p3) + +#define _Ret1_impl_(p1) +#define _Ret2_impl_(p1,p2) +#define _Ret3_impl_(p1,p2,p3) + +#define _Deref_pre1_impl_(p1) +#define _Deref_pre2_impl_(p1,p2) +#define _Deref_pre3_impl_(p1,p2,p3) + +#define _Deref_post1_impl_(p1) +#define _Deref_post2_impl_(p1,p2) +#define _Deref_post3_impl_(p1,p2,p3) + +#define _Deref_ret1_impl_(p1) +#define _Deref_ret2_impl_(p1,p2) +#define _Deref_ret3_impl_(p1,p2,p3) + +#define _Deref2_pre1_impl_(p1) +#define _Deref2_post1_impl_(p1) +#define _Deref2_ret1_impl_(p1) + +#define _Readable_bytes_impl_(size) +#define _Readable_elements_impl_(size) +#define _Writable_bytes_impl_(size) +#define _Writable_elements_impl_(size) + +#define _Null_terminated_impl_ +#define _NullNull_terminated_impl_ + +// Obsolete -- may be needed for transition to attributes. +#define __inner_typefix(ctype) +#define __inner_exceptthat + +#endif // ] + +// This section contains the deprecated annotations + +/* + ------------------------------------------------------------------------------- + Introduction + + sal.h provides a set of annotations to describe how a function uses its + parameters - the assumptions it makes about them, and the guarantees it makes + upon finishing. + + Annotations may be placed before either a function parameter's type or its return + type, and describe the function's behavior regarding the parameter or return value. + There are two classes of annotations: buffer annotations and advanced annotations. + Buffer annotations describe how functions use their pointer parameters, and + advanced annotations either describe complex/unusual buffer behavior, or provide + additional information about a parameter that is not otherwise expressible. + + ------------------------------------------------------------------------------- + Buffer Annotations + + The most important annotations in sal.h provide a consistent way to annotate + buffer parameters or return values for a function. Each of these annotations describes + a single buffer (which could be a string, a fixed-length or variable-length array, + or just a pointer) that the function interacts with: where it is, how large it is, + how much is initialized, and what the function does with it. + + The appropriate macro for a given buffer can be constructed using the table below. + Just pick the appropriate values from each category, and combine them together + with a leading underscore. Some combinations of values do not make sense as buffer + annotations. Only meaningful annotations can be added to your code; for a list of + these, see the buffer annotation definitions section. + + Only a single buffer annotation should be used for each parameter. + + |------------|------------|---------|--------|----------|----------|---------------| + | Level | Usage | Size | Output | NullTerm | Optional | Parameters | + |------------|------------|---------|--------|----------|----------|---------------| + | <> | <> | <> | <> | _z | <> | <> | + | _deref | _in | _ecount | _full | _nz | _opt | (size) | + | _deref_opt | _out | _bcount | _part | | | (size,length) | + | | _inout | | | | | | + | | | | | | | | + |------------|------------|---------|--------|----------|----------|---------------| + + Level: Describes the buffer pointer's level of indirection from the parameter or + return value 'p'. + + <> : p is the buffer pointer. + _deref : *p is the buffer pointer. p must not be NULL. + _deref_opt : *p may be the buffer pointer. p may be NULL, in which case the rest of + the annotation is ignored. + + Usage: Describes how the function uses the buffer. + + <> : The buffer is not accessed. If used on the return value or with _deref, the + function will provide the buffer, and it will be uninitialized at exit. + Otherwise, the caller must provide the buffer. This should only be used + for alloc and free functions. + _in : The function will only read from the buffer. The caller must provide the + buffer and initialize it. Cannot be used with _deref. + _out : The function will only write to the buffer. If used on the return value or + with _deref, the function will provide the buffer and initialize it. + Otherwise, the caller must provide the buffer, and the function will + initialize it. + _inout : The function may freely read from and write to the buffer. The caller must + provide the buffer and initialize it. If used with _deref, the buffer may + be reallocated by the function. + + Size: Describes the total size of the buffer. This may be less than the space actually + allocated for the buffer, in which case it describes the accessible amount. + + <> : No buffer size is given. If the type specifies the buffer size (such as + with LPSTR and LPWSTR), that amount is used. Otherwise, the buffer is one + element long. Must be used with _in, _out, or _inout. + _ecount : The buffer size is an explicit element count. + _bcount : The buffer size is an explicit byte count. + + Output: Describes how much of the buffer will be initialized by the function. For + _inout buffers, this also describes how much is initialized at entry. Omit this + category for _in buffers; they must be fully initialized by the caller. + + <> : The type specifies how much is initialized. For instance, a function initializing + an LPWSTR must NULL-terminate the string. + _full : The function initializes the entire buffer. + _part : The function initializes part of the buffer, and explicitly indicates how much. + + NullTerm: States if the present of a '\0' marks the end of valid elements in the buffer. + _z : A '\0' indicated the end of the buffer + _nz : The buffer may not be null terminated and a '\0' does not indicate the end of the + buffer. + Optional: Describes if the buffer itself is optional. + + <> : The pointer to the buffer must not be NULL. + _opt : The pointer to the buffer might be NULL. It will be checked before being dereferenced. + + Parameters: Gives explicit counts for the size and length of the buffer. + + <> : There is no explicit count. Use when neither _ecount nor _bcount is used. + (size) : Only the buffer's total size is given. Use with _ecount or _bcount but not _part. + (size,length) : The buffer's total size and initialized length are given. Use with _ecount_part + and _bcount_part. + + ------------------------------------------------------------------------------- + Buffer Annotation Examples + + LWSTDAPI_(BOOL) StrToIntExA( + __in LPCSTR pszString, + DWORD dwFlags, + __out int *piRet -- A pointer whose dereference will be filled in. + ); + + void MyPaintingFunction( + __in HWND hwndControl, -- An initialized read-only parameter. + __in_opt HDC hdcOptional, -- An initialized read-only parameter that might be NULL. + __inout IPropertyStore *ppsStore -- An initialized parameter that may be freely used + -- and modified. + ); + + LWSTDAPI_(BOOL) PathCompactPathExA( + __out_ecount(cchMax) LPSTR pszOut, -- A string buffer with cch elements that will + -- be NULL terminated on exit. + __in LPCSTR pszSrc, + UINT cchMax, + DWORD dwFlags + ); + + HRESULT SHLocalAllocBytes( + size_t cb, + __deref_bcount(cb) T **ppv -- A pointer whose dereference will be set to an + -- uninitialized buffer with cb bytes. + ); + + __inout_bcount_full(cb) : A buffer with cb elements that is fully initialized at + entry and exit, and may be written to by this function. + + __out_ecount_part(count, *countOut) : A buffer with count elements that will be + partially initialized by this function. The function indicates how much it + initialized by setting *countOut. + + ------------------------------------------------------------------------------- + Advanced Annotations + + Advanced annotations describe behavior that is not expressible with the regular + buffer macros. These may be used either to annotate buffer parameters that involve + complex or conditional behavior, or to enrich existing annotations with additional + information. + + __success(expr) f : + indicates whether function f succeeded or not. If is true at exit, + all the function's guarantees (as given by other annotations) must hold. If + is false at exit, the caller should not expect any of the function's guarantees + to hold. If not used, the function must always satisfy its guarantees. Added + automatically to functions that indicate success in standard ways, such as by + returning an HRESULT. + + __nullterminated p : + Pointer p is a buffer that may be read or written up to and including the first + NULL character or pointer. May be used on typedefs, which marks valid (properly + initialized) instances of that type as being NULL-terminated. + + __nullnullterminated p : + Pointer p is a buffer that may be read or written up to and including the first + sequence of two NULL characters or pointers. May be used on typedefs, which marks + valid instances of that type as being double-NULL terminated. + + __reserved v : + Value v must be 0/NULL, reserved for future use. + + __checkReturn v : + Return value v must not be ignored by callers of this function. + + __typefix(ctype) v : + Value v should be treated as an instance of ctype, rather than its declared type. + + __override f : + Specify C#-style 'override' behaviour for overriding virtual methods. + + __callback f : + Function f can be used as a function pointer. + + __format_string p : + Pointer p is a string that contains % markers in the style of printf. + + __blocksOn(resource) f : + Function f blocks on the resource 'resource'. + + FALLTHROUGH : + Annotates switch statement labels where fall-through is desired, to distinguish + from forgotten break statements. + + ------------------------------------------------------------------------------- + Advanced Annotation Examples + + __success(return != FALSE) LWSTDAPI_(BOOL) + PathCanonicalizeA(__out_ecount(MAX_PATH) LPSTR pszBuf, LPCSTR pszPath) : + pszBuf is only guaranteed to be NULL-terminated when TRUE is returned. + + typedef __nullterminated WCHAR* LPWSTR : Initialized LPWSTRs are NULL-terminated strings. + + __out_ecount(cch) __typefix(LPWSTR) void *psz : psz is a buffer parameter which will be + a NULL-terminated WCHAR string at exit, and which initially contains cch WCHARs. + + ------------------------------------------------------------------------------- +*/ + +#define __specstrings + +#ifdef __cplusplus // [ +#ifndef __nothrow // [ +# define __nothrow NOTHROW_DECL +#endif // ] +extern "C" { +#else // ][ +#ifndef __nothrow // [ +# define __nothrow +#endif // ] +#endif /* #ifdef __cplusplus */ // ] + + +/* + ------------------------------------------------------------------------------- + Helper Macro Definitions + + These express behavior common to many of the high-level annotations. + DO NOT USE THESE IN YOUR CODE. + ------------------------------------------------------------------------------- +*/ + +/* + The helper annotations are only understood by the compiler version used by + various defect detection tools. When the regular compiler is running, they + are defined into nothing, and do not affect the compiled code. +*/ + +#if !defined(__midl) && defined(_PREFAST_) // [ + + /* + In the primitive "SAL_*" annotations "SAL" stands for Standard + Annotation Language. These "SAL_*" annotations are the + primitives the compiler understands and high-level MACROs + will decompose into these primivates. + */ + + #define _SA_SPECSTRIZE( x ) #x + + /* + __notnull p + __maybenull p + + Annotates a pointer p. States that pointer p is never null or maybe null. + */ + + #define __notnull _Notnull_impl_ + #define __maybenull _Maybenull_impl_ + + /* + __readonly l + __notreadonly l + __maybereadonly l + + Annotates a location l. States that location l is not modified after + this point. If the annotation is placed on the precondition state of + a function, the restriction only applies until the postcondition state + of the function. __maybereadonly states that the annotated location + may be modified, whereas __notreadonly states that a location must be + modified. + */ + + #define __readonly _Pre1_impl_(__readaccess_impl) + #define __notreadonly _Pre1_impl_(__allaccess_impl) + #define __maybereadonly _Pre1_impl_(__readaccess_impl) + + /* + __valid v + __notvalid v + __maybevalid v + + Annotates any value v. States that the value satisfies all properties of + valid values of its type. For example, for a string buffer, valid means + that the buffer pointer is either NULL or points to a NULL-terminated string. + */ + + #define __valid _Valid_impl_ + #define __notvalid _Notvalid_impl_ + #define __maybevalid _Maybevalid_impl_ + + /* + __readableTo(extent) p + + Annotates a buffer pointer p. If the buffer can be read, extent describes + how much of the buffer is readable. For a reader of the buffer, this is + an explicit permission to read up to that amount, rather than a restriction to + read only up to it. + */ + + #define __readableTo(extent) _SA_annotes1(SAL_readableTo, extent) + + /* + + __elem_readableTo(size) + + Annotates a buffer pointer p as being readable to size elements. + */ + + #define __elem_readableTo(size) _SA_annotes1(SAL_readableTo, elementCount( size )) + + /* + __byte_readableTo(size) + + Annotates a buffer pointer p as being readable to size bytes. + */ + #define __byte_readableTo(size) _SA_annotes1(SAL_readableTo, byteCount(size)) + + /* + __writableTo(extent) p + + Annotates a buffer pointer p. If the buffer can be modified, extent + describes how much of the buffer is writable (usually the allocation + size). For a writer of the buffer, this is an explicit permission to + write up to that amount, rather than a restriction to write only up to it. + */ + #define __writableTo(size) _SA_annotes1(SAL_writableTo, size) + + /* + __elem_writableTo(size) + + Annotates a buffer pointer p as being writable to size elements. + */ + #define __elem_writableTo(size) _SA_annotes1(SAL_writableTo, elementCount( size )) + + /* + __byte_writableTo(size) + + Annotates a buffer pointer p as being writable to size bytes. + */ + #define __byte_writableTo(size) _SA_annotes1(SAL_writableTo, byteCount( size)) + + /* + __deref p + + Annotates a pointer p. The next annotation applies one dereference down + in the type. If readableTo(p, size) then the next annotation applies to + all elements *(p+i) for which i satisfies the size. If p is a pointer + to a struct, the next annotation applies to all fields of the struct. + */ + #define __deref _Deref_impl_ + + /* + __pre __next_annotation + + The next annotation applies in the precondition state + */ + #define __pre _Pre_impl_ + + /* + __post __next_annotation + + The next annotation applies in the postcondition state + */ + #define __post _Post_impl_ + + /* + __precond() + + When is true, the next annotation applies in the precondition state + (currently not enabled) + */ + #define __precond(expr) __pre + + /* + __postcond() + + When is true, the next annotation applies in the postcondition state + (currently not enabled) + */ + #define __postcond(expr) __post + + /* + __exceptthat + + Given a set of annotations Q containing __exceptthat maybeP, the effect of + the except clause is to erase any P or notP annotations (explicit or + implied) within Q at the same level of dereferencing that the except + clause appears, and to replace it with maybeP. + + Example 1: __valid __pre_except_maybenull on a pointer p means that the + pointer may be null, and is otherwise valid, thus overriding + the implicit notnull annotation implied by __valid on + pointers. + + Example 2: __valid __deref __pre_except_maybenull on an int **p means + that p is not null (implied by valid), but the elements + pointed to by p could be null, and are otherwise valid. + */ + #define __exceptthat __inner_exceptthat + + /* + _refparam + + Added to all out parameter macros to indicate that they are all reference + parameters. + */ + #define __refparam _Notref_ __deref __notreadonly + + /* + __inner_* + + Helper macros that directly correspond to certain high-level annotations. + + */ + + /* + Macros to classify the entrypoints and indicate their category. + + Pre-defined control point categories include: RPC, LPC, DeviceDriver, UserToKernel, ISAPI, COM. + + */ + #define __inner_control_entrypoint(category) _SA_annotes2(SAL_entrypoint, controlEntry, category) + + + /* + Pre-defined data entry point categories include: Registry, File, Network. + */ + #define __inner_data_entrypoint(category) _SA_annotes2(SAL_entrypoint, dataEntry, category) + + #define __inner_override _SA_annotes0(__override) + #define __inner_callback _SA_annotes0(__callback) + #define __inner_blocksOn(resource) _SA_annotes1(SAL_blocksOn, resource) + + #define __post_except_maybenull __post __inner_exceptthat _Maybenull_impl_ + #define __pre_except_maybenull __pre __inner_exceptthat _Maybenull_impl_ + + #define __post_deref_except_maybenull __post __deref __inner_exceptthat _Maybenull_impl_ + #define __pre_deref_except_maybenull __pre __deref __inner_exceptthat _Maybenull_impl_ + + #define __inexpressible_readableTo(size) _Readable_elements_impl_(_Inexpressible_(size)) + #define __inexpressible_writableTo(size) _Writable_elements_impl_(_Inexpressible_(size)) + + +#else // ][ + #define __notnull + #define __deref + #define __maybenull + #define __readonly + #define __notreadonly + #define __maybereadonly + #define __valid + #define __notvalid + #define __maybevalid + #define __readableTo(extent) + #define __elem_readableTo(size) + #define __byte_readableTo(size) + #define __writableTo(size) + #define __elem_writableTo(size) + #define __byte_writableTo(size) + #define __pre + #define __post + #define __precond(expr) + #define __postcond(expr) + #define __exceptthat + #define __inner_override + #define __inner_callback + #define __inner_blocksOn(resource) + #define __refparam + #define __inner_control_entrypoint(category) + #define __inner_data_entrypoint(category) + + #define __post_except_maybenull + #define __pre_except_maybenull + #define __post_deref_except_maybenull + #define __pre_deref_except_maybenull + + #define __inexpressible_readableTo(size) + #define __inexpressible_writableTo(size) + +#endif /* #if !defined(__midl) && defined(_PREFAST_) */ // ] + +/* +------------------------------------------------------------------------------- +Buffer Annotation Definitions + +Any of these may be used to directly annotate functions, but only one should +be used for each parameter. To determine which annotation to use for a given +buffer, use the table in the buffer annotations section. +------------------------------------------------------------------------------- +*/ + +#define __ecount(size) _SAL1_Source_(__ecount, (size), __notnull __elem_writableTo(size)) +#define __bcount(size) _SAL1_Source_(__bcount, (size), __notnull __byte_writableTo(size)) +#define __in_ecount(size) _SAL1_Source_(__in_ecount, (size), _In_reads_(size)) +#define __in_bcount(size) _SAL1_Source_(__in_bcount, (size), _In_reads_bytes_(size)) +#define __in_z _SAL1_Source_(__in_z, (), _In_z_) +#define __in_ecount_z(size) _SAL1_Source_(__in_ecount_z, (size), _In_reads_z_(size)) +#define __in_bcount_z(size) _SAL1_Source_(__in_bcount_z, (size), __in_bcount(size) __pre __nullterminated) +#define __in_nz _SAL1_Source_(__in_nz, (), __in) +#define __in_ecount_nz(size) _SAL1_Source_(__in_ecount_nz, (size), __in_ecount(size)) +#define __in_bcount_nz(size) _SAL1_Source_(__in_bcount_nz, (size), __in_bcount(size)) +#define __out_ecount(size) _SAL1_Source_(__out_ecount, (size), _Out_writes_(size)) +#define __out_bcount(size) _SAL1_Source_(__out_bcount, (size), _Out_writes_bytes_(size)) +#define __out_ecount_part(size,length) _SAL1_Source_(__out_ecount_part, (size,length), _Out_writes_to_(size,length)) +#define __out_bcount_part(size,length) _SAL1_Source_(__out_bcount_part, (size,length), _Out_writes_bytes_to_(size,length)) +#define __out_ecount_full(size) _SAL1_Source_(__out_ecount_full, (size), _Out_writes_all_(size)) +#define __out_bcount_full(size) _SAL1_Source_(__out_bcount_full, (size), _Out_writes_bytes_all_(size)) +#define __out_z _SAL1_Source_(__out_z, (), __post __valid __refparam __post __nullterminated) +#define __out_z_opt _SAL1_Source_(__out_z_opt, (), __post __valid __refparam __post __nullterminated __pre_except_maybenull) +#define __out_ecount_z(size) _SAL1_Source_(__out_ecount_z, (size), __ecount(size) __post __valid __refparam __post __nullterminated) +#define __out_bcount_z(size) _SAL1_Source_(__out_bcount_z, (size), __bcount(size) __post __valid __refparam __post __nullterminated) +#define __out_ecount_part_z(size,length) _SAL1_Source_(__out_ecount_part_z, (size,length), __out_ecount_part(size,length) __post __nullterminated) +#define __out_bcount_part_z(size,length) _SAL1_Source_(__out_bcount_part_z, (size,length), __out_bcount_part(size,length) __post __nullterminated) +#define __out_ecount_full_z(size) _SAL1_Source_(__out_ecount_full_z, (size), __out_ecount_full(size) __post __nullterminated) +#define __out_bcount_full_z(size) _SAL1_Source_(__out_bcount_full_z, (size), __out_bcount_full(size) __post __nullterminated) +#define __out_nz _SAL1_Source_(__out_nz, (), __post __valid __refparam) +#define __out_nz_opt _SAL1_Source_(__out_nz_opt, (), __post __valid __refparam __post_except_maybenull_) +#define __out_ecount_nz(size) _SAL1_Source_(__out_ecount_nz, (size), __ecount(size) __post __valid __refparam) +#define __out_bcount_nz(size) _SAL1_Source_(__out_bcount_nz, (size), __bcount(size) __post __valid __refparam) +#define __inout _SAL1_Source_(__inout, (), _Inout_) +#define __inout_ecount(size) _SAL1_Source_(__inout_ecount, (size), _Inout_updates_(size)) +#define __inout_bcount(size) _SAL1_Source_(__inout_bcount, (size), _Inout_updates_bytes_(size)) +#define __inout_ecount_part(size,length) _SAL1_Source_(__inout_ecount_part, (size,length), _Inout_updates_to_(size,length)) +#define __inout_bcount_part(size,length) _SAL1_Source_(__inout_bcount_part, (size,length), _Inout_updates_bytes_to_(size,length)) +#define __inout_ecount_full(size) _SAL1_Source_(__inout_ecount_full, (size), _Inout_updates_all_(size)) +#define __inout_bcount_full(size) _SAL1_Source_(__inout_bcount_full, (size), _Inout_updates_bytes_all_(size)) +#define __inout_z _SAL1_Source_(__inout_z, (), _Inout_z_) +#define __inout_ecount_z(size) _SAL1_Source_(__inout_ecount_z, (size), _Inout_updates_z_(size)) +#define __inout_bcount_z(size) _SAL1_Source_(__inout_bcount_z, (size), __inout_bcount(size) __pre __nullterminated __post __nullterminated) +#define __inout_nz _SAL1_Source_(__inout_nz, (), __inout) +#define __inout_ecount_nz(size) _SAL1_Source_(__inout_ecount_nz, (size), __inout_ecount(size)) +#define __inout_bcount_nz(size) _SAL1_Source_(__inout_bcount_nz, (size), __inout_bcount(size)) +#define __ecount_opt(size) _SAL1_Source_(__ecount_opt, (size), __ecount(size) __pre_except_maybenull) +#define __bcount_opt(size) _SAL1_Source_(__bcount_opt, (size), __bcount(size) __pre_except_maybenull) +#define __in_opt _SAL1_Source_(__in_opt, (), _In_opt_) +#define __in_ecount_opt(size) _SAL1_Source_(__in_ecount_opt, (size), _In_reads_opt_(size)) +#define __in_bcount_opt(size) _SAL1_Source_(__in_bcount_opt, (size), _In_reads_bytes_opt_(size)) +#define __in_z_opt _SAL1_Source_(__in_z_opt, (), _In_opt_z_) +#define __in_ecount_z_opt(size) _SAL1_Source_(__in_ecount_z_opt, (size), __in_ecount_opt(size) __pre __nullterminated) +#define __in_bcount_z_opt(size) _SAL1_Source_(__in_bcount_z_opt, (size), __in_bcount_opt(size) __pre __nullterminated) +#define __in_nz_opt _SAL1_Source_(__in_nz_opt, (), __in_opt) +#define __in_ecount_nz_opt(size) _SAL1_Source_(__in_ecount_nz_opt, (size), __in_ecount_opt(size)) +#define __in_bcount_nz_opt(size) _SAL1_Source_(__in_bcount_nz_opt, (size), __in_bcount_opt(size)) +#define __out_opt _SAL1_Source_(__out_opt, (), _Out_opt_) +#define __out_ecount_opt(size) _SAL1_Source_(__out_ecount_opt, (size), _Out_writes_opt_(size)) +#define __out_bcount_opt(size) _SAL1_Source_(__out_bcount_opt, (size), _Out_writes_bytes_opt_(size)) +#define __out_ecount_part_opt(size,length) _SAL1_Source_(__out_ecount_part_opt, (size,length), __out_ecount_part(size,length) __pre_except_maybenull) +#define __out_bcount_part_opt(size,length) _SAL1_Source_(__out_bcount_part_opt, (size,length), __out_bcount_part(size,length) __pre_except_maybenull) +#define __out_ecount_full_opt(size) _SAL1_Source_(__out_ecount_full_opt, (size), __out_ecount_full(size) __pre_except_maybenull) +#define __out_bcount_full_opt(size) _SAL1_Source_(__out_bcount_full_opt, (size), __out_bcount_full(size) __pre_except_maybenull) +#define __out_ecount_z_opt(size) _SAL1_Source_(__out_ecount_z_opt, (size), __out_ecount_opt(size) __post __nullterminated) +#define __out_bcount_z_opt(size) _SAL1_Source_(__out_bcount_z_opt, (size), __out_bcount_opt(size) __post __nullterminated) +#define __out_ecount_part_z_opt(size,length) _SAL1_Source_(__out_ecount_part_z_opt, (size,length), __out_ecount_part_opt(size,length) __post __nullterminated) +#define __out_bcount_part_z_opt(size,length) _SAL1_Source_(__out_bcount_part_z_opt, (size,length), __out_bcount_part_opt(size,length) __post __nullterminated) +#define __out_ecount_full_z_opt(size) _SAL1_Source_(__out_ecount_full_z_opt, (size), __out_ecount_full_opt(size) __post __nullterminated) +#define __out_bcount_full_z_opt(size) _SAL1_Source_(__out_bcount_full_z_opt, (size), __out_bcount_full_opt(size) __post __nullterminated) +#define __out_ecount_nz_opt(size) _SAL1_Source_(__out_ecount_nz_opt, (size), __out_ecount_opt(size) __post __nullterminated) +#define __out_bcount_nz_opt(size) _SAL1_Source_(__out_bcount_nz_opt, (size), __out_bcount_opt(size) __post __nullterminated) +#define __inout_opt _SAL1_Source_(__inout_opt, (), _Inout_opt_) +#define __inout_ecount_opt(size) _SAL1_Source_(__inout_ecount_opt, (size), __inout_ecount(size) __pre_except_maybenull) +#define __inout_bcount_opt(size) _SAL1_Source_(__inout_bcount_opt, (size), __inout_bcount(size) __pre_except_maybenull) +#define __inout_ecount_part_opt(size,length) _SAL1_Source_(__inout_ecount_part_opt, (size,length), __inout_ecount_part(size,length) __pre_except_maybenull) +#define __inout_bcount_part_opt(size,length) _SAL1_Source_(__inout_bcount_part_opt, (size,length), __inout_bcount_part(size,length) __pre_except_maybenull) +#define __inout_ecount_full_opt(size) _SAL1_Source_(__inout_ecount_full_opt, (size), __inout_ecount_full(size) __pre_except_maybenull) +#define __inout_bcount_full_opt(size) _SAL1_Source_(__inout_bcount_full_opt, (size), __inout_bcount_full(size) __pre_except_maybenull) +#define __inout_z_opt _SAL1_Source_(__inout_z_opt, (), __inout_opt __pre __nullterminated __post __nullterminated) +#define __inout_ecount_z_opt(size) _SAL1_Source_(__inout_ecount_z_opt, (size), __inout_ecount_opt(size) __pre __nullterminated __post __nullterminated) +#define __inout_ecount_z_opt(size) _SAL1_Source_(__inout_ecount_z_opt, (size), __inout_ecount_opt(size) __pre __nullterminated __post __nullterminated) +#define __inout_bcount_z_opt(size) _SAL1_Source_(__inout_bcount_z_opt, (size), __inout_bcount_opt(size)) +#define __inout_nz_opt _SAL1_Source_(__inout_nz_opt, (), __inout_opt) +#define __inout_ecount_nz_opt(size) _SAL1_Source_(__inout_ecount_nz_opt, (size), __inout_ecount_opt(size)) +#define __inout_bcount_nz_opt(size) _SAL1_Source_(__inout_bcount_nz_opt, (size), __inout_bcount_opt(size)) +#define __deref_ecount(size) _SAL1_Source_(__deref_ecount, (size), _Notref_ __ecount(1) __post _Notref_ __elem_readableTo(1) __post _Notref_ __deref _Notref_ __notnull __post __deref __elem_writableTo(size)) +#define __deref_bcount(size) _SAL1_Source_(__deref_bcount, (size), _Notref_ __ecount(1) __post _Notref_ __elem_readableTo(1) __post _Notref_ __deref _Notref_ __notnull __post __deref __byte_writableTo(size)) +#define __deref_out _SAL1_Source_(__deref_out, (), _Outptr_) +#define __deref_out_ecount(size) _SAL1_Source_(__deref_out_ecount, (size), _Outptr_result_buffer_(size)) +#define __deref_out_bcount(size) _SAL1_Source_(__deref_out_bcount, (size), _Outptr_result_bytebuffer_(size)) +#define __deref_out_ecount_part(size,length) _SAL1_Source_(__deref_out_ecount_part, (size,length), _Outptr_result_buffer_to_(size,length)) +#define __deref_out_bcount_part(size,length) _SAL1_Source_(__deref_out_bcount_part, (size,length), _Outptr_result_bytebuffer_to_(size,length)) +#define __deref_out_ecount_full(size) _SAL1_Source_(__deref_out_ecount_full, (size), __deref_out_ecount_part(size,size)) +#define __deref_out_bcount_full(size) _SAL1_Source_(__deref_out_bcount_full, (size), __deref_out_bcount_part(size,size)) +#define __deref_out_z _SAL1_Source_(__deref_out_z, (), _Outptr_result_z_) +#define __deref_out_ecount_z(size) _SAL1_Source_(__deref_out_ecount_z, (size), __deref_out_ecount(size) __post __deref __nullterminated) +#define __deref_out_bcount_z(size) _SAL1_Source_(__deref_out_bcount_z, (size), __deref_out_bcount(size) __post __deref __nullterminated) +#define __deref_out_nz _SAL1_Source_(__deref_out_nz, (), __deref_out) +#define __deref_out_ecount_nz(size) _SAL1_Source_(__deref_out_ecount_nz, (size), __deref_out_ecount(size)) +#define __deref_out_bcount_nz(size) _SAL1_Source_(__deref_out_bcount_nz, (size), __deref_out_ecount(size)) +#define __deref_inout _SAL1_Source_(__deref_inout, (), _Notref_ __notnull _Notref_ __elem_readableTo(1) __pre __deref __valid __post _Notref_ __deref __valid __refparam) +#define __deref_inout_z _SAL1_Source_(__deref_inout_z, (), __deref_inout __pre __deref __nullterminated __post _Notref_ __deref __nullterminated) +#define __deref_inout_ecount(size) _SAL1_Source_(__deref_inout_ecount, (size), __deref_inout __pre __deref __elem_writableTo(size) __post _Notref_ __deref __elem_writableTo(size)) +#define __deref_inout_bcount(size) _SAL1_Source_(__deref_inout_bcount, (size), __deref_inout __pre __deref __byte_writableTo(size) __post _Notref_ __deref __byte_writableTo(size)) +#define __deref_inout_ecount_part(size,length) _SAL1_Source_(__deref_inout_ecount_part, (size,length), __deref_inout_ecount(size) __pre __deref __elem_readableTo(length) __post __deref __elem_readableTo(length)) +#define __deref_inout_bcount_part(size,length) _SAL1_Source_(__deref_inout_bcount_part, (size,length), __deref_inout_bcount(size) __pre __deref __byte_readableTo(length) __post __deref __byte_readableTo(length)) +#define __deref_inout_ecount_full(size) _SAL1_Source_(__deref_inout_ecount_full, (size), __deref_inout_ecount_part(size,size)) +#define __deref_inout_bcount_full(size) _SAL1_Source_(__deref_inout_bcount_full, (size), __deref_inout_bcount_part(size,size)) +#define __deref_inout_ecount_z(size) _SAL1_Source_(__deref_inout_ecount_z, (size), __deref_inout_ecount(size) __pre __deref __nullterminated __post __deref __nullterminated) +#define __deref_inout_bcount_z(size) _SAL1_Source_(__deref_inout_bcount_z, (size), __deref_inout_bcount(size) __pre __deref __nullterminated __post __deref __nullterminated) +#define __deref_inout_nz _SAL1_Source_(__deref_inout_nz, (), __deref_inout) +#define __deref_inout_ecount_nz(size) _SAL1_Source_(__deref_inout_ecount_nz, (size), __deref_inout_ecount(size)) +#define __deref_inout_bcount_nz(size) _SAL1_Source_(__deref_inout_bcount_nz, (size), __deref_inout_ecount(size)) +#define __deref_ecount_opt(size) _SAL1_Source_(__deref_ecount_opt, (size), __deref_ecount(size) __post_deref_except_maybenull) +#define __deref_bcount_opt(size) _SAL1_Source_(__deref_bcount_opt, (size), __deref_bcount(size) __post_deref_except_maybenull) +#define __deref_out_opt _SAL1_Source_(__deref_out_opt, (), __deref_out __post_deref_except_maybenull) +#define __deref_out_ecount_opt(size) _SAL1_Source_(__deref_out_ecount_opt, (size), __deref_out_ecount(size) __post_deref_except_maybenull) +#define __deref_out_bcount_opt(size) _SAL1_Source_(__deref_out_bcount_opt, (size), __deref_out_bcount(size) __post_deref_except_maybenull) +#define __deref_out_ecount_part_opt(size,length) _SAL1_Source_(__deref_out_ecount_part_opt, (size,length), __deref_out_ecount_part(size,length) __post_deref_except_maybenull) +#define __deref_out_bcount_part_opt(size,length) _SAL1_Source_(__deref_out_bcount_part_opt, (size,length), __deref_out_bcount_part(size,length) __post_deref_except_maybenull) +#define __deref_out_ecount_full_opt(size) _SAL1_Source_(__deref_out_ecount_full_opt, (size), __deref_out_ecount_full(size) __post_deref_except_maybenull) +#define __deref_out_bcount_full_opt(size) _SAL1_Source_(__deref_out_bcount_full_opt, (size), __deref_out_bcount_full(size) __post_deref_except_maybenull) +#define __deref_out_z_opt _SAL1_Source_(__deref_out_z_opt, (), _Outptr_result_maybenull_z_) +#define __deref_out_ecount_z_opt(size) _SAL1_Source_(__deref_out_ecount_z_opt, (size), __deref_out_ecount_opt(size) __post __deref __nullterminated) +#define __deref_out_bcount_z_opt(size) _SAL1_Source_(__deref_out_bcount_z_opt, (size), __deref_out_bcount_opt(size) __post __deref __nullterminated) +#define __deref_out_nz_opt _SAL1_Source_(__deref_out_nz_opt, (), __deref_out_opt) +#define __deref_out_ecount_nz_opt(size) _SAL1_Source_(__deref_out_ecount_nz_opt, (size), __deref_out_ecount_opt(size)) +#define __deref_out_bcount_nz_opt(size) _SAL1_Source_(__deref_out_bcount_nz_opt, (size), __deref_out_bcount_opt(size)) +#define __deref_inout_opt _SAL1_Source_(__deref_inout_opt, (), __deref_inout __pre_deref_except_maybenull __post_deref_except_maybenull) +#define __deref_inout_ecount_opt(size) _SAL1_Source_(__deref_inout_ecount_opt, (size), __deref_inout_ecount(size) __pre_deref_except_maybenull __post_deref_except_maybenull) +#define __deref_inout_bcount_opt(size) _SAL1_Source_(__deref_inout_bcount_opt, (size), __deref_inout_bcount(size) __pre_deref_except_maybenull __post_deref_except_maybenull) +#define __deref_inout_ecount_part_opt(size,length) _SAL1_Source_(__deref_inout_ecount_part_opt, (size,length), __deref_inout_ecount_part(size,length) __pre_deref_except_maybenull __post_deref_except_maybenull) +#define __deref_inout_bcount_part_opt(size,length) _SAL1_Source_(__deref_inout_bcount_part_opt, (size,length), __deref_inout_bcount_part(size,length) __pre_deref_except_maybenull __post_deref_except_maybenull) +#define __deref_inout_ecount_full_opt(size) _SAL1_Source_(__deref_inout_ecount_full_opt, (size), __deref_inout_ecount_full(size) __pre_deref_except_maybenull __post_deref_except_maybenull) +#define __deref_inout_bcount_full_opt(size) _SAL1_Source_(__deref_inout_bcount_full_opt, (size), __deref_inout_bcount_full(size) __pre_deref_except_maybenull __post_deref_except_maybenull) +#define __deref_inout_z_opt _SAL1_Source_(__deref_inout_z_opt, (), __deref_inout_opt __pre __deref __nullterminated __post __deref __nullterminated) +#define __deref_inout_ecount_z_opt(size) _SAL1_Source_(__deref_inout_ecount_z_opt, (size), __deref_inout_ecount_opt(size) __pre __deref __nullterminated __post __deref __nullterminated) +#define __deref_inout_bcount_z_opt(size) _SAL1_Source_(__deref_inout_bcount_z_opt, (size), __deref_inout_bcount_opt(size) __pre __deref __nullterminated __post __deref __nullterminated) +#define __deref_inout_nz_opt _SAL1_Source_(__deref_inout_nz_opt, (), __deref_inout_opt) +#define __deref_inout_ecount_nz_opt(size) _SAL1_Source_(__deref_inout_ecount_nz_opt, (size), __deref_inout_ecount_opt(size)) +#define __deref_inout_bcount_nz_opt(size) _SAL1_Source_(__deref_inout_bcount_nz_opt, (size), __deref_inout_bcount_opt(size)) +#define __deref_opt_ecount(size) _SAL1_Source_(__deref_opt_ecount, (size), __deref_ecount(size) __pre_except_maybenull) +#define __deref_opt_bcount(size) _SAL1_Source_(__deref_opt_bcount, (size), __deref_bcount(size) __pre_except_maybenull) +#define __deref_opt_out _SAL1_Source_(__deref_opt_out, (), _Outptr_opt_) +#define __deref_opt_out_z _SAL1_Source_(__deref_opt_out_z, (), _Outptr_opt_result_z_) +#define __deref_opt_out_ecount(size) _SAL1_Source_(__deref_opt_out_ecount, (size), __deref_out_ecount(size) __pre_except_maybenull) +#define __deref_opt_out_bcount(size) _SAL1_Source_(__deref_opt_out_bcount, (size), __deref_out_bcount(size) __pre_except_maybenull) +#define __deref_opt_out_ecount_part(size,length) _SAL1_Source_(__deref_opt_out_ecount_part, (size,length), __deref_out_ecount_part(size,length) __pre_except_maybenull) +#define __deref_opt_out_bcount_part(size,length) _SAL1_Source_(__deref_opt_out_bcount_part, (size,length), __deref_out_bcount_part(size,length) __pre_except_maybenull) +#define __deref_opt_out_ecount_full(size) _SAL1_Source_(__deref_opt_out_ecount_full, (size), __deref_out_ecount_full(size) __pre_except_maybenull) +#define __deref_opt_out_bcount_full(size) _SAL1_Source_(__deref_opt_out_bcount_full, (size), __deref_out_bcount_full(size) __pre_except_maybenull) +#define __deref_opt_inout _SAL1_Source_(__deref_opt_inout, (), _Inout_opt_) +#define __deref_opt_inout_ecount(size) _SAL1_Source_(__deref_opt_inout_ecount, (size), __deref_inout_ecount(size) __pre_except_maybenull) +#define __deref_opt_inout_bcount(size) _SAL1_Source_(__deref_opt_inout_bcount, (size), __deref_inout_bcount(size) __pre_except_maybenull) +#define __deref_opt_inout_ecount_part(size,length) _SAL1_Source_(__deref_opt_inout_ecount_part, (size,length), __deref_inout_ecount_part(size,length) __pre_except_maybenull) +#define __deref_opt_inout_bcount_part(size,length) _SAL1_Source_(__deref_opt_inout_bcount_part, (size,length), __deref_inout_bcount_part(size,length) __pre_except_maybenull) +#define __deref_opt_inout_ecount_full(size) _SAL1_Source_(__deref_opt_inout_ecount_full, (size), __deref_inout_ecount_full(size) __pre_except_maybenull) +#define __deref_opt_inout_bcount_full(size) _SAL1_Source_(__deref_opt_inout_bcount_full, (size), __deref_inout_bcount_full(size) __pre_except_maybenull) +#define __deref_opt_inout_z _SAL1_Source_(__deref_opt_inout_z, (), __deref_opt_inout __pre __deref __nullterminated __post __deref __nullterminated) +#define __deref_opt_inout_ecount_z(size) _SAL1_Source_(__deref_opt_inout_ecount_z, (size), __deref_opt_inout_ecount(size) __pre __deref __nullterminated __post __deref __nullterminated) +#define __deref_opt_inout_bcount_z(size) _SAL1_Source_(__deref_opt_inout_bcount_z, (size), __deref_opt_inout_bcount(size) __pre __deref __nullterminated __post __deref __nullterminated) +#define __deref_opt_inout_nz _SAL1_Source_(__deref_opt_inout_nz, (), __deref_opt_inout) +#define __deref_opt_inout_ecount_nz(size) _SAL1_Source_(__deref_opt_inout_ecount_nz, (size), __deref_opt_inout_ecount(size)) +#define __deref_opt_inout_bcount_nz(size) _SAL1_Source_(__deref_opt_inout_bcount_nz, (size), __deref_opt_inout_bcount(size)) +#define __deref_opt_ecount_opt(size) _SAL1_Source_(__deref_opt_ecount_opt, (size), __deref_ecount_opt(size) __pre_except_maybenull) +#define __deref_opt_bcount_opt(size) _SAL1_Source_(__deref_opt_bcount_opt, (size), __deref_bcount_opt(size) __pre_except_maybenull) +#define __deref_opt_out_opt _SAL1_Source_(__deref_opt_out_opt, (), _Outptr_opt_result_maybenull_) +#define __deref_opt_out_ecount_opt(size) _SAL1_Source_(__deref_opt_out_ecount_opt, (size), __deref_out_ecount_opt(size) __pre_except_maybenull) +#define __deref_opt_out_bcount_opt(size) _SAL1_Source_(__deref_opt_out_bcount_opt, (size), __deref_out_bcount_opt(size) __pre_except_maybenull) +#define __deref_opt_out_ecount_part_opt(size,length) _SAL1_Source_(__deref_opt_out_ecount_part_opt, (size,length), __deref_out_ecount_part_opt(size,length) __pre_except_maybenull) +#define __deref_opt_out_bcount_part_opt(size,length) _SAL1_Source_(__deref_opt_out_bcount_part_opt, (size,length), __deref_out_bcount_part_opt(size,length) __pre_except_maybenull) +#define __deref_opt_out_ecount_full_opt(size) _SAL1_Source_(__deref_opt_out_ecount_full_opt, (size), __deref_out_ecount_full_opt(size) __pre_except_maybenull) +#define __deref_opt_out_bcount_full_opt(size) _SAL1_Source_(__deref_opt_out_bcount_full_opt, (size), __deref_out_bcount_full_opt(size) __pre_except_maybenull) +#define __deref_opt_out_z_opt _SAL1_Source_(__deref_opt_out_z_opt, (), __post __deref __valid __refparam __pre_except_maybenull __pre_deref_except_maybenull __post_deref_except_maybenull __post __deref __nullterminated) +#define __deref_opt_out_ecount_z_opt(size) _SAL1_Source_(__deref_opt_out_ecount_z_opt, (size), __deref_opt_out_ecount_opt(size) __post __deref __nullterminated) +#define __deref_opt_out_bcount_z_opt(size) _SAL1_Source_(__deref_opt_out_bcount_z_opt, (size), __deref_opt_out_bcount_opt(size) __post __deref __nullterminated) +#define __deref_opt_out_nz_opt _SAL1_Source_(__deref_opt_out_nz_opt, (), __deref_opt_out_opt) +#define __deref_opt_out_ecount_nz_opt(size) _SAL1_Source_(__deref_opt_out_ecount_nz_opt, (size), __deref_opt_out_ecount_opt(size)) +#define __deref_opt_out_bcount_nz_opt(size) _SAL1_Source_(__deref_opt_out_bcount_nz_opt, (size), __deref_opt_out_bcount_opt(size)) +#define __deref_opt_inout_opt _SAL1_Source_(__deref_opt_inout_opt, (), __deref_inout_opt __pre_except_maybenull) +#define __deref_opt_inout_ecount_opt(size) _SAL1_Source_(__deref_opt_inout_ecount_opt, (size), __deref_inout_ecount_opt(size) __pre_except_maybenull) +#define __deref_opt_inout_bcount_opt(size) _SAL1_Source_(__deref_opt_inout_bcount_opt, (size), __deref_inout_bcount_opt(size) __pre_except_maybenull) +#define __deref_opt_inout_ecount_part_opt(size,length) _SAL1_Source_(__deref_opt_inout_ecount_part_opt, (size,length), __deref_inout_ecount_part_opt(size,length) __pre_except_maybenull) +#define __deref_opt_inout_bcount_part_opt(size,length) _SAL1_Source_(__deref_opt_inout_bcount_part_opt, (size,length), __deref_inout_bcount_part_opt(size,length) __pre_except_maybenull) +#define __deref_opt_inout_ecount_full_opt(size) _SAL1_Source_(__deref_opt_inout_ecount_full_opt, (size), __deref_inout_ecount_full_opt(size) __pre_except_maybenull) +#define __deref_opt_inout_bcount_full_opt(size) _SAL1_Source_(__deref_opt_inout_bcount_full_opt, (size), __deref_inout_bcount_full_opt(size) __pre_except_maybenull) +#define __deref_opt_inout_z_opt _SAL1_Source_(__deref_opt_inout_z_opt, (), __deref_opt_inout_opt __pre __deref __nullterminated __post __deref __nullterminated) +#define __deref_opt_inout_ecount_z_opt(size) _SAL1_Source_(__deref_opt_inout_ecount_z_opt, (size), __deref_opt_inout_ecount_opt(size) __pre __deref __nullterminated __post __deref __nullterminated) +#define __deref_opt_inout_bcount_z_opt(size) _SAL1_Source_(__deref_opt_inout_bcount_z_opt, (size), __deref_opt_inout_bcount_opt(size) __pre __deref __nullterminated __post __deref __nullterminated) +#define __deref_opt_inout_nz_opt _SAL1_Source_(__deref_opt_inout_nz_opt, (), __deref_opt_inout_opt) +#define __deref_opt_inout_ecount_nz_opt(size) _SAL1_Source_(__deref_opt_inout_ecount_nz_opt, (size), __deref_opt_inout_ecount_opt(size)) +#define __deref_opt_inout_bcount_nz_opt(size) _SAL1_Source_(__deref_opt_inout_bcount_nz_opt, (size), __deref_opt_inout_bcount_opt(size)) + +/* +------------------------------------------------------------------------------- +Advanced Annotation Definitions + +Any of these may be used to directly annotate functions, and may be used in +combination with each other or with regular buffer macros. For an explanation +of each annotation, see the advanced annotations section. +------------------------------------------------------------------------------- +*/ + +#define __success(expr) _Success_(expr) +#define __nullterminated _Null_terminated_ +#define __nullnullterminated +#define __clr_reserved _SAL1_Source_(__reserved, (), _Reserved_) +#define __checkReturn _SAL1_Source_(__checkReturn, (), _Check_return_) +#define __typefix(ctype) _SAL1_Source_(__typefix, (ctype), __inner_typefix(ctype)) +#define __override __inner_override +#define __callback __inner_callback +#define __format_string _Printf_format_string_ +#define __blocksOn(resource) __inner_blocksOn(resource) +#define __control_entrypoint(category) __inner_control_entrypoint(category) +#define __data_entrypoint(category) __inner_data_entrypoint(category) +#define __useHeader _Use_decl_anno_impl_ +#define __on_failure(annotes) _On_failure_impl_(annotes _SAL_nop_impl_) + +#ifndef __has_cpp_attribute +#define __has_cpp_attribute(x) (0) +#endif + +#ifndef __fallthrough // [ +#if __has_cpp_attribute(fallthrough) +#define __fallthrough [[fallthrough]] +#else +#define __fallthrough +#endif +#endif // ] + +#ifndef __analysis_assume // [ +#ifdef _PREFAST_ // [ +#define __analysis_assume(expr) __assume(expr) +#else // ][ +#define __analysis_assume(expr) +#endif // ] +#endif // ] + +#ifndef _Analysis_assume_ // [ +#ifdef _PREFAST_ // [ +#define _Analysis_assume_(expr) __assume(expr) +#else // ][ +#define _Analysis_assume_(expr) +#endif // ] +#endif // ] + +#define _Analysis_noreturn_ _SAL2_Source_(_Analysis_noreturn_, (), _SA_annotes0(SAL_terminates)) + +#ifdef _PREFAST_ // [ +__inline __nothrow +void __AnalysisAssumeNullterminated(_Post_ __nullterminated void *p); + +#define _Analysis_assume_nullterminated_(x) __AnalysisAssumeNullterminated(x) +#else // ][ +#define _Analysis_assume_nullterminated_(x) +#endif // ] + +// +// Set the analysis mode (global flags to analysis). +// They take effect at the point of declaration; use at global scope +// as a declaration. +// + +// Synthesize a unique symbol. +#define ___MKID(x, y) x ## y +#define __MKID(x, y) ___MKID(x, y) +#define __GENSYM(x) __MKID(x, __COUNTER__) + +__ANNOTATION(SAL_analysisMode(__AuToQuOtE __In_impl_ char *mode);) + +#define _Analysis_mode_impl_(mode) _SA_annotes1(SAL_analysisMode, #mode) + +#define _Analysis_mode_(mode) \ + typedef _Analysis_mode_impl_(mode) int \ + __GENSYM(__prefast_analysis_mode_flag); + +// The following are predefined: +// _Analysis_operator_new_throw_ (operator new throws) +// _Analysis_operator_new_null_ (operator new returns null) +// _Analysis_operator_new_never_fails_ (operator new never fails) +// + +// Function class annotations. +__ANNOTATION(SAL_functionClassNew(__In_impl_ char*);) +__PRIMOP(int, _In_function_class_(__In_impl_ char*);) +#define _In_function_class_(x) _In_function_class_(#x) + +#define _Function_class_(x) _SA_annotes1(SAL_functionClassNew, #x) + +/* + * interlocked operand used in interlocked instructions + */ +//#define _Interlocked_operand_ _Pre_ _SA_annotes0(SAL_interlocked) + +#define _Enum_is_bitflag_ _SA_annotes0(SAL_enumIsBitflag) +#define _Strict_type_match_ _SA_annotes0(SAL_strictType2) + +#define _Maybe_raises_SEH_exception_ _Pre_ _SA_annotes1(SAL_inTry,__yes) +#define _Raises_SEH_exception_ _Group_(_Maybe_raises_SEH_exception_ _Analysis_noreturn_) + +#ifdef __cplusplus // [ +} +#endif // ] diff --git a/include/test.h b/include/test.h new file mode 100644 index 0000000..96e34a7 --- /dev/null +++ b/include/test.h @@ -0,0 +1,13 @@ +#pragma once + +#ifdef __cplusplus +extern "C" { +#endif + + void load(); + void draw(); + void update(float lx, float ly, float ry); + +#ifdef __cplusplus +} +#endif diff --git a/main.lua b/main.lua new file mode 100644 index 0000000..0672ade --- /dev/null +++ b/main.lua @@ -0,0 +1,56 @@ +local ffi = require 'ffi' +local joysticks + +function init() + joysticks = love.joystick.getJoysticks() + + ffi.cdef[[ +void load(); +void draw(); +void update(float lx, float ly, float ry); +]] + test = ffi.load("./test.so") + test.load() + +end + +local update = function(dt) + for _, joystick in ipairs(joysticks) do + local lx = joystick:getGamepadAxis("leftx") + local ly = joystick:getGamepadAxis("lefty") + local ry = joystick:getGamepadAxis("righty") + test.update(lx, ly, ry) + end +end + +local draw = function() + test.draw() +end + +function love.run() + init() + + love.timer.step() + + return function() + love.event.pump() + for name, a,b,c,d,e,f,g,h in love.event.poll() do + if name == "quit" then + if c or not love.quit or not love.quit() then + return a or 0, b + end + end + if name == "keypressed" then + keypressed(a, b, c) + end + end + + local dt = love.timer.step() + update(dt) + + draw() + + love.graphics.present() + love.timer.sleep(0.001) + end +end diff --git a/minecraft/block_id_to_texture_id.data b/minecraft/block_id_to_texture_id.data new file mode 100644 index 0000000..94099df Binary files /dev/null and b/minecraft/block_id_to_texture_id.data differ diff --git a/minecraft/gen/blocks.py b/minecraft/gen/blocks.py new file mode 100644 index 0000000..6f3905b --- /dev/null +++ b/minecraft/gen/blocks.py @@ -0,0 +1,104 @@ +import struct + +def id_to_px(i): + x = i % 16 + y = i // 16 + return x * 16, y * 16 + +def px_to_id(px, py): + x = px // 16 + y = py // 16 + i = y * 16 + x + return i + +unk = 185 + +mapping = [ + (1, 1, "stone"), + (2, 0, "grass"), + (31, 0, "grass"), # fixme actually tallgrass + (3, 2, "dirt"), + (4, 16, "stonebrick"), + (5, 4, "wood"), + (6, 15, "sapling"), + (7, 17, "bedrock"), + (8, 205, "water"), # flowing + (9, 205, "water"), # still + (10, 237, "lava"), # flowing + (11, 237, "lava"), # still + (12, 18, "sand"), + (13, 19, "gravel"), + (14, 32, "oreGold"), + (15, 33, "oreIron"), + (16, 34, "oreCoal"), + (17, 20, "log"), + (18, 52, "leaves"), + (19, 48, "sponge"), + (20, 49, "glass"), + (35, 64, "cloth"), + (37, 13, "flower"), + (38, 12, "rose"), + (39, 29, "mushroom"), + (40, 28, "mushroom"), + (41, 39, "blockGold"), + (42, 38, "blockIron"), + (43, 5, "stoneSlab"), # double + (44, 5, "stoneSlab"), # single + (45, 7, "brick"), + (46, 8, "tnt"), + (47, 35, "bookshelf"), + (48, 36, "stoneMoss"), + (49, 37, "obsidian"), + (50, 80, "torch"), + (51, 31, "fire"), + (52, 65, "mobSpawner"), + (53, 4, "stairsWood"), + (54, 27, "chest"), + (55, 84, "redstoneDust"), + (56, 50, "oreDiamond"), + (57, 40, "blockDiamond"), + (58, 43, "workbench"), + (59, 88, "crops"), + (60, 87, "farmland"), + (61, 44, "furnace"), # off + (62, 61, "furnace"), # burning + (63, unk, "sign"), + (64, 81, "doorWood"), + (65, 83, "ladder"), + (66, 128, "rail"), + (67, 16, "stairsStone"), + (68, unk, "sign"), + (69, 96, "lever"), + (70, 6, "pressurePlate"), + (71, 82, "doorIron"), + (72, 6, "pressurePlate"), + (73, 51, "oreRedstone"), + (74, 51, "oreRedstone"), + (75, 115, "notGate"), + (76, 99, "notGate"), + (77, unk, "button"), + (78, 66, "snow"), + (79, 67, "ice"), + (80, 66, "snow"), + (81, 70, "cactus"), + (82, 72, "clay"), + (83, 73, "reeds"), + (84, 74, "jukebox"), + (85, 4, "fence"), + (86, 102, "pumpkin"), + (87, 103, "hellrock"), + (88, 104, "hellsand"), + (89, 105, "lightgem"), + (90, 14, "portal"), + (91, 102, "pumpkin"), +] + +lookup = { + k: v for k, v, _ in mapping +} + +with open("block_id_to_texture_id.data", "wb") as f: + for i in range(256): + value = lookup.get(i, unk) + f.write(struct.pack("I", mem[ix:ix+4]) + chunk_offset = (chunk_location >> 8) & 0xffffff + chunk_sector_count = chunk_location & 0xff + yield chunk_offset, chunk_sector_count + +def parse_locations(mem, offset): + locations = list(_parse_locations(mem, offset)) + return offset + 1024 * 4, locations + +def _parse_timestamps(mem, offset): + for i in range(1024): + ix = offset + i * 4 + timestamp, = struct.unpack(">I", mem[ix:ix+4]) + yield timestamp + +def parse_timestamps(mem, offset): + timestamps = list(_parse_timestamps(mem, offset)) + return offset + 1024 * 4, timestamps + +def print_locations(locations): + for y in range(32): + for x in range(32): + offset, count = locations[y * 32 + x] + print(str(offset).rjust(4), end=' ') + print() + +class CountZeroException(Exception): + pass + +def parse_payload(mem, location): + offset, count = location + if count == 0: + raise CountZeroException() + ix = offset * 4096 + payload = mem[ix:ix + count * 4096] + length, = struct.unpack(">I", payload[0:4]) + assert length <= count * 4096, (length, count) + compression_type = payload[4] + data = payload[5:5 + (length - 1)] + assert compression_type == 2, compression_type + uncompressed = zlib.decompress(data) + return memoryview(uncompressed) + +class TAG: + Byte = 0x01 + Short = 0x02 + Int = 0x03 + Long = 0x04 + Float = 0x05 + Double = 0x06 + ByteArray = 0x07 + String = 0x08 + List = 0x09 + Compound = 0x0a + +@dataclass +class Byte: + name: str + value: int + +@dataclass +class Short: + name: str + value: int + +@dataclass +class Int: + name: str + value: int + +@dataclass +class Long: + name: str + value: int + +@dataclass +class Float: + name: str + value: float + +@dataclass +class Double: + name: str + value: float + +@dataclass +class ByteArray: + name: str + value: bytes + +@dataclass +class String: + name: str + value: str + +@dataclass +class List: + name: str + items: list + +@dataclass +class Compound: + name: str + tags: list + +def indent(level): + return " " * level + +def parse_tag_inner(mem, offset, level, tag_type, name): + payload = mem[offset:] + if tag_type == TAG.Byte: + value, = struct.unpack(">b", payload[0:1]) + return offset + 1, Byte(name, value) + if tag_type == TAG.Short: + value, = struct.unpack(">h", payload[0:2]) + return offset + 2, Short(name, value) + elif tag_type == TAG.Int: + value, = struct.unpack(">i", payload[0:4]) + return offset + 4, Int(name, value) + elif tag_type == TAG.Long: + value, = struct.unpack(">q", payload[0:8]) + return offset + 8, Long(name, value) + elif tag_type == TAG.Float: + value, = struct.unpack(">f", payload[0:4]) + return offset + 4, Float(name, value) + elif tag_type == TAG.Double: + value, = struct.unpack(">d", payload[0:8]) + return offset + 8, Double(name, value) + elif tag_type == TAG.ByteArray: + size, = struct.unpack(">i", payload[0:4]) + value = bytes(payload[4:4+size]) + return offset + 4 + size, ByteArray(name, value) + elif tag_type == TAG.String: + size, = struct.unpack(">H", payload[0:2]) + value = bytes(payload[2:2+size]).decode('utf-8') + return offset + 2 + size, String(name, value) + elif tag_type == TAG.List: + list_content_tag_id, size = struct.unpack(">BI", payload[0:5]) + items = [] + offset = offset + 5 + for i in range(size): + payload = mem[offset:] + offset, item = parse_tag_inner(mem, offset, level, list_content_tag_id, None) + items.append(item) + return offset, List(name, items) + elif tag_type == TAG.Compound: + tags = [] + while payload[0] != 0: + offset, tag = parse_tag(mem, offset, level+1) + payload = mem[offset:] + tags.append(tag) + return offset + 1, Compound(name, tags) + else: + assert False, tag_type + +def parse_tag(mem, offset, level): + data = mem[offset:] + tag_type = data[0] + name_length, = struct.unpack(">H", data[1:3]) + name = bytes(data[3:3+name_length]) + #print(indent(level), tag_type, name_length, name) + offset = offset + 3 + name_length + return parse_tag_inner(mem, offset, level, tag_type, name) + +@dataclass +class Level: + blocks: bytes + data: bytes + sky_light: bytes + block_light: bytes + height_map: bytes + x_pos: int + z_pos: int + +def level_from_tag(tag): + assert type(tag) == Compound + assert tag.name == b'' + assert len(tag.tags) == 1 + level, = tag.tags + assert type(level) == Compound + assert level.name == b'Level' + + name_mapping = { + b'Blocks': 'blocks', + b'Data': 'data', + b'SkyLight': 'sky_light', + b'BlockLight': 'block_light', + b'HeightMap': 'height_map', + b'xPos': 'x_pos', + b'zPos': 'z_pos', + } + + args = {} + + for tag in level.tags: + if tag.name in name_mapping: + arg_name = name_mapping[tag.name] + args[arg_name] = tag.value + + return Level(**args) + +def parse_location(mem, location): + uncompressed = parse_payload(mem, location) + offset, tag = parse_tag(uncompressed, 0, 0) + assert offset == len(uncompressed), (offset, len(uncompressed)) + level = level_from_tag(tag) + return level + +def xyz_from_block_index(block_index): + assert block_index >= 0 and block_index < (128 * 16 * 16) + x = int(block_index / (128 * 16)) + y = int(block_index % 128) + z = int(int(block_index / 128) % 16) + return x, y, z + +def block_index_from_xyz(x, y, z): + assert x >= 0 and x < 16 + assert y >= 0 and y < 128 + assert z >= 0 and z < 16 + return int(y + z * 128 + x * 128 * 16) + +def wrap_n(nc, chunk_c): + if nc < 0: + nc = 15 + chunk_c = chunk_c - 1 + if nc > 15: + nc = 0 + chunk_c = chunk_c + 1 + return nc, chunk_c + +def vec3_add(v1, v2): + return ( + v1[0] + v2[0], + v1[1] + v2[1], + v1[2] + v2[2], + ) + +def vec3_mul(v, s): + return ( + v[0] * s, + v[1] * s, + v[2] * s, + ) + +vertex_table = [ + ((-1.0, 1.0, -1.0), (0.0, 1.0, 0.0), (1.0, 0.0)), + ((1.0, 1.0, 1.0), (0.0, 1.0, 0.0), (0.0, 1.0)), + ((1.0, 1.0, -1.0), (0.0, 1.0, 0.0), (0.0, 0.0)), + ((1.0, 1.0, 1.0), (0.0, 0.0, 1.0), (1.0, 1.0)), + + ((-1.0, -1.0, 1.0), (0.0, 0.0, 1.0), (0.0, 0.0)), + ((1.0, -1.0, 1.0), (0.0, 0.0, 1.0), (1.0, 0.0)), + ((-1.0, 1.0, 1.0), (-1.0, 0.0, 0.0), (1.0, 1.0)), + ((-1.0, -1.0, -1.0), (-1.0, 0.0, 0.0), (0.0, 0.0)), + + ((-1.0, -1.0, 1.0), (-1.0, 0.0, 0.0), (1.0, 0.0)), + ((1.0, -1.0, -1.0), (0.0, -1.0, 0.0), (1.0, 0.0)), + ((-1.0, -1.0, 1.0), (0.0, -1.0, 0.0), (0.0, 1.0)), + ((-1.0, -1.0, -1.0), (0.0, -1.0, 0.0), (0.0, 0.0)), + + ((1.0, 1.0, -1.0), (1.0, 0.0, 0.0), (1.0, 1.0)), + ((1.0, -1.0, 1.0), (1.0, 0.0, 0.0), (0.0, 0.0)), + ((1.0, -1.0, -1.0), (1.0, 0.0, 0.0), (1.0, 0.0)), + ((-1.0, 1.0, -1.0), (0.0, 0.0, -1.0), (1.0, 1.0)), + ((1.0, -1.0, -1.0), (0.0, 0.0, -1.0), (0.0, 0.0)), + ((-1.0, -1.0, -1.0), (0.0, 0.0, -1.0), (1.0, 0.0)), + ((-1.0, 1.0, 1.0), (0.0, 1.0, 0.0), (1.0, 1.0)), + ((-1.0, 1.0, 1.0), (0.0, 0.0, 1.0), (0.0, 1.0)), + ((-1.0, 1.0, -1.0), (-1.0, 0.0, 0.0), (0.0, 1.0)), + ((1.0, -1.0, 1.0), (0.0, -1.0, 0.0), (1.0, 1.0)), + ((1.0, 1.0, 1.0), (1.0, 0.0, 0.0), (0.0, 1.0)), + ((1.0, 1.0, -1.0), (0.0, 0.0, -1.0), (0.0, 1.0)) +] + +faces_by_normal = { + (-1.0, 0.0, 0.0): [6, 7, 8, 6, 20, 7], + (0.0, -1.0, 0.0): [9, 10, 11, 9, 21, 10], + (0.0, 0.0, -1.0): [15, 16, 17, 15, 23, 16], + (0.0, 0.0, 1.0): [3, 4, 5, 3, 19, 4], + (0.0, 1.0, 0.0): [0, 1, 2, 0, 18, 1], + (1.0, 0.0, 0.0): [12, 13, 14, 12, 22, 13] +} + +vertex_buffer = {} + +def add_vertex(vertex): + if vertex in vertex_buffer: + return vertex_buffer[vertex] + else: + index = len(vertex_buffer) + vertex_buffer[vertex] = index + return index + +def emit_face(center_position, block_id, triangles): + for index in triangles: + position, normal, texture = vertex_table[index] + position = vec3_add(vec3_mul(position, 0.5), center_position) + vertex = (position, normal, texture, block_id) + new_index = add_vertex(vertex) + yield new_index + +def block_neighbors(level_table, chunk_x, chunk_z, block_index): + block_id = level_table[(chunk_x, chunk_z)].blocks[block_index] + if block_id == 0: + return + + def neighbor_exists(nx, ny, nz): + if ny > 127 or ny < 0: + return False + nx, n_chunk_x = wrap_n(nx, chunk_x) + nz, n_chunk_z = wrap_n(nz, chunk_z) + if nx > 15 or nx < 0: + return True + if nz > 15 or nz < 0: + return True + n_block_index = block_index_from_xyz(nx, ny, nz) + key = (n_chunk_x, n_chunk_z) + if key not in level_table: + return True + n_block_id = level_table[key].blocks[n_block_index] + return n_block_id != 0 + + x, y, z = xyz_from_block_index(block_index) + + center_position = vec3_add((x, y, z), (chunk_x * 16, 0, chunk_z * 16)) + + for normal, triangles in faces_by_normal.items(): + neighbor = vec3_add(normal, (x, y, z)) + if not neighbor_exists(*neighbor): + yield from emit_face(center_position, block_id, triangles) + + #yield chunk_x, chunk_z, block_index, block_id + #break + +def devoxelize_region(level_table): + for chunk_x, chunk_z in level_table.keys(): + for block_index in range(128 * 16 * 16): + yield from block_neighbors(level_table, chunk_x, chunk_z, block_index) + +from collections import defaultdict +counts = defaultdict(int) + +def linearized_vertex_buffer(): + for vertex, i in sorted(vertex_buffer.items(), key=lambda kv: kv[1]): + yield vertex + +def main(mcr_path, data_path): + with open(mcr_path, "rb") as f: + buf = f.read() + mem = memoryview(buf) + + offset = 0 + offset, locations = parse_locations(mem, offset) + offset, timestamps = parse_timestamps(mem, offset) + assert offset == 0x2000 + + level_table = {} + for location in locations: + try: + level = parse_location(mem, location) + except CountZeroException: + continue + x, z = level.x_pos, level.z_pos + level_table[(x, z)] = level + #with open(f"blocks__{x:02x}_{z:02x}.data", "wb") as f: + # f.write(level.blocks) + + with open(data_path + ".idx", "wb") as f: + for index in devoxelize_region(level_table): + f.write(struct.pack(" +#include +#include +#include + +#ifndef GLAD_IMPL_UTIL_C_ +#define GLAD_IMPL_UTIL_C_ + +#ifdef _MSC_VER +#define GLAD_IMPL_UTIL_SSCANF sscanf_s +#else +#define GLAD_IMPL_UTIL_SSCANF sscanf +#endif + +#endif /* GLAD_IMPL_UTIL_C_ */ + +#ifdef __cplusplus +extern "C" { +#endif + + + +int GLAD_GL_VERSION_1_0 = 0; +int GLAD_GL_VERSION_1_1 = 0; +int GLAD_GL_VERSION_1_2 = 0; +int GLAD_GL_VERSION_1_3 = 0; +int GLAD_GL_VERSION_1_4 = 0; +int GLAD_GL_VERSION_1_5 = 0; +int GLAD_GL_VERSION_2_0 = 0; +int GLAD_GL_VERSION_2_1 = 0; +int GLAD_GL_VERSION_3_0 = 0; +int GLAD_GL_VERSION_3_1 = 0; +int GLAD_GL_VERSION_3_2 = 0; +int GLAD_GL_VERSION_3_3 = 0; +int GLAD_GL_VERSION_4_0 = 0; +int GLAD_GL_VERSION_4_1 = 0; +int GLAD_GL_VERSION_4_2 = 0; +int GLAD_GL_VERSION_4_3 = 0; + + + +PFNGLACTIVESHADERPROGRAMPROC glad_glActiveShaderProgram = NULL; +PFNGLACTIVETEXTUREPROC glad_glActiveTexture = NULL; +PFNGLATTACHSHADERPROC glad_glAttachShader = NULL; +PFNGLBEGINCONDITIONALRENDERPROC glad_glBeginConditionalRender = NULL; +PFNGLBEGINQUERYPROC glad_glBeginQuery = NULL; +PFNGLBEGINQUERYINDEXEDPROC glad_glBeginQueryIndexed = NULL; +PFNGLBEGINTRANSFORMFEEDBACKPROC glad_glBeginTransformFeedback = NULL; +PFNGLBINDATTRIBLOCATIONPROC glad_glBindAttribLocation = NULL; +PFNGLBINDBUFFERPROC glad_glBindBuffer = NULL; +PFNGLBINDBUFFERBASEPROC glad_glBindBufferBase = NULL; +PFNGLBINDBUFFERRANGEPROC glad_glBindBufferRange = NULL; +PFNGLBINDFRAGDATALOCATIONPROC glad_glBindFragDataLocation = NULL; +PFNGLBINDFRAGDATALOCATIONINDEXEDPROC glad_glBindFragDataLocationIndexed = NULL; +PFNGLBINDFRAMEBUFFERPROC glad_glBindFramebuffer = NULL; +PFNGLBINDIMAGETEXTUREPROC glad_glBindImageTexture = NULL; +PFNGLBINDPROGRAMPIPELINEPROC glad_glBindProgramPipeline = NULL; +PFNGLBINDRENDERBUFFERPROC glad_glBindRenderbuffer = NULL; +PFNGLBINDSAMPLERPROC glad_glBindSampler = NULL; +PFNGLBINDTEXTUREPROC glad_glBindTexture = NULL; +PFNGLBINDTRANSFORMFEEDBACKPROC glad_glBindTransformFeedback = NULL; +PFNGLBINDVERTEXARRAYPROC glad_glBindVertexArray = NULL; +PFNGLBINDVERTEXBUFFERPROC glad_glBindVertexBuffer = NULL; +PFNGLBLENDCOLORPROC glad_glBlendColor = NULL; +PFNGLBLENDEQUATIONPROC glad_glBlendEquation = NULL; +PFNGLBLENDEQUATIONSEPARATEPROC glad_glBlendEquationSeparate = NULL; +PFNGLBLENDEQUATIONSEPARATEIPROC glad_glBlendEquationSeparatei = NULL; +PFNGLBLENDEQUATIONIPROC glad_glBlendEquationi = NULL; +PFNGLBLENDFUNCPROC glad_glBlendFunc = NULL; +PFNGLBLENDFUNCSEPARATEPROC glad_glBlendFuncSeparate = NULL; +PFNGLBLENDFUNCSEPARATEIPROC glad_glBlendFuncSeparatei = NULL; +PFNGLBLENDFUNCIPROC glad_glBlendFunci = NULL; +PFNGLBLITFRAMEBUFFERPROC glad_glBlitFramebuffer = NULL; +PFNGLBUFFERDATAPROC glad_glBufferData = NULL; +PFNGLBUFFERSUBDATAPROC glad_glBufferSubData = NULL; +PFNGLCHECKFRAMEBUFFERSTATUSPROC glad_glCheckFramebufferStatus = NULL; +PFNGLCLAMPCOLORPROC glad_glClampColor = NULL; +PFNGLCLEARPROC glad_glClear = NULL; +PFNGLCLEARBUFFERDATAPROC glad_glClearBufferData = NULL; +PFNGLCLEARBUFFERSUBDATAPROC glad_glClearBufferSubData = NULL; +PFNGLCLEARBUFFERFIPROC glad_glClearBufferfi = NULL; +PFNGLCLEARBUFFERFVPROC glad_glClearBufferfv = NULL; +PFNGLCLEARBUFFERIVPROC glad_glClearBufferiv = NULL; +PFNGLCLEARBUFFERUIVPROC glad_glClearBufferuiv = NULL; +PFNGLCLEARCOLORPROC glad_glClearColor = NULL; +PFNGLCLEARDEPTHPROC glad_glClearDepth = NULL; +PFNGLCLEARDEPTHFPROC glad_glClearDepthf = NULL; +PFNGLCLEARSTENCILPROC glad_glClearStencil = NULL; +PFNGLCLIENTWAITSYNCPROC glad_glClientWaitSync = NULL; +PFNGLCOLORMASKPROC glad_glColorMask = NULL; +PFNGLCOLORMASKIPROC glad_glColorMaski = NULL; +PFNGLCOMPILESHADERPROC glad_glCompileShader = NULL; +PFNGLCOMPRESSEDTEXIMAGE1DPROC glad_glCompressedTexImage1D = NULL; +PFNGLCOMPRESSEDTEXIMAGE2DPROC glad_glCompressedTexImage2D = NULL; +PFNGLCOMPRESSEDTEXIMAGE3DPROC glad_glCompressedTexImage3D = NULL; +PFNGLCOMPRESSEDTEXSUBIMAGE1DPROC glad_glCompressedTexSubImage1D = NULL; +PFNGLCOMPRESSEDTEXSUBIMAGE2DPROC glad_glCompressedTexSubImage2D = NULL; +PFNGLCOMPRESSEDTEXSUBIMAGE3DPROC glad_glCompressedTexSubImage3D = NULL; +PFNGLCOPYBUFFERSUBDATAPROC glad_glCopyBufferSubData = NULL; +PFNGLCOPYIMAGESUBDATAPROC glad_glCopyImageSubData = NULL; +PFNGLCOPYTEXIMAGE1DPROC glad_glCopyTexImage1D = NULL; +PFNGLCOPYTEXIMAGE2DPROC glad_glCopyTexImage2D = NULL; +PFNGLCOPYTEXSUBIMAGE1DPROC glad_glCopyTexSubImage1D = NULL; +PFNGLCOPYTEXSUBIMAGE2DPROC glad_glCopyTexSubImage2D = NULL; +PFNGLCOPYTEXSUBIMAGE3DPROC glad_glCopyTexSubImage3D = NULL; +PFNGLCREATEPROGRAMPROC glad_glCreateProgram = NULL; +PFNGLCREATESHADERPROC glad_glCreateShader = NULL; +PFNGLCREATESHADERPROGRAMVPROC glad_glCreateShaderProgramv = NULL; +PFNGLCULLFACEPROC glad_glCullFace = NULL; +PFNGLDEBUGMESSAGECALLBACKPROC glad_glDebugMessageCallback = NULL; +PFNGLDEBUGMESSAGECONTROLPROC glad_glDebugMessageControl = NULL; +PFNGLDEBUGMESSAGEINSERTPROC glad_glDebugMessageInsert = NULL; +PFNGLDELETEBUFFERSPROC glad_glDeleteBuffers = NULL; +PFNGLDELETEFRAMEBUFFERSPROC glad_glDeleteFramebuffers = NULL; +PFNGLDELETEPROGRAMPROC glad_glDeleteProgram = NULL; +PFNGLDELETEPROGRAMPIPELINESPROC glad_glDeleteProgramPipelines = NULL; +PFNGLDELETEQUERIESPROC glad_glDeleteQueries = NULL; +PFNGLDELETERENDERBUFFERSPROC glad_glDeleteRenderbuffers = NULL; +PFNGLDELETESAMPLERSPROC glad_glDeleteSamplers = NULL; +PFNGLDELETESHADERPROC glad_glDeleteShader = NULL; +PFNGLDELETESYNCPROC glad_glDeleteSync = NULL; +PFNGLDELETETEXTURESPROC glad_glDeleteTextures = NULL; +PFNGLDELETETRANSFORMFEEDBACKSPROC glad_glDeleteTransformFeedbacks = NULL; +PFNGLDELETEVERTEXARRAYSPROC glad_glDeleteVertexArrays = NULL; +PFNGLDEPTHFUNCPROC glad_glDepthFunc = NULL; +PFNGLDEPTHMASKPROC glad_glDepthMask = NULL; +PFNGLDEPTHRANGEPROC glad_glDepthRange = NULL; +PFNGLDEPTHRANGEARRAYVPROC glad_glDepthRangeArrayv = NULL; +PFNGLDEPTHRANGEINDEXEDPROC glad_glDepthRangeIndexed = NULL; +PFNGLDEPTHRANGEFPROC glad_glDepthRangef = NULL; +PFNGLDETACHSHADERPROC glad_glDetachShader = NULL; +PFNGLDISABLEPROC glad_glDisable = NULL; +PFNGLDISABLEVERTEXATTRIBARRAYPROC glad_glDisableVertexAttribArray = NULL; +PFNGLDISABLEIPROC glad_glDisablei = NULL; +PFNGLDISPATCHCOMPUTEPROC glad_glDispatchCompute = NULL; +PFNGLDISPATCHCOMPUTEINDIRECTPROC glad_glDispatchComputeIndirect = NULL; +PFNGLDRAWARRAYSPROC glad_glDrawArrays = NULL; +PFNGLDRAWARRAYSINDIRECTPROC glad_glDrawArraysIndirect = NULL; +PFNGLDRAWARRAYSINSTANCEDPROC glad_glDrawArraysInstanced = NULL; +PFNGLDRAWARRAYSINSTANCEDBASEINSTANCEPROC glad_glDrawArraysInstancedBaseInstance = NULL; +PFNGLDRAWBUFFERPROC glad_glDrawBuffer = NULL; +PFNGLDRAWBUFFERSPROC glad_glDrawBuffers = NULL; +PFNGLDRAWELEMENTSPROC glad_glDrawElements = NULL; +PFNGLDRAWELEMENTSBASEVERTEXPROC glad_glDrawElementsBaseVertex = NULL; +PFNGLDRAWELEMENTSINDIRECTPROC glad_glDrawElementsIndirect = NULL; +PFNGLDRAWELEMENTSINSTANCEDPROC glad_glDrawElementsInstanced = NULL; +PFNGLDRAWELEMENTSINSTANCEDBASEINSTANCEPROC glad_glDrawElementsInstancedBaseInstance = NULL; +PFNGLDRAWELEMENTSINSTANCEDBASEVERTEXPROC glad_glDrawElementsInstancedBaseVertex = NULL; +PFNGLDRAWELEMENTSINSTANCEDBASEVERTEXBASEINSTANCEPROC glad_glDrawElementsInstancedBaseVertexBaseInstance = NULL; +PFNGLDRAWRANGEELEMENTSPROC glad_glDrawRangeElements = NULL; +PFNGLDRAWRANGEELEMENTSBASEVERTEXPROC glad_glDrawRangeElementsBaseVertex = NULL; +PFNGLDRAWTRANSFORMFEEDBACKPROC glad_glDrawTransformFeedback = NULL; +PFNGLDRAWTRANSFORMFEEDBACKINSTANCEDPROC glad_glDrawTransformFeedbackInstanced = NULL; +PFNGLDRAWTRANSFORMFEEDBACKSTREAMPROC glad_glDrawTransformFeedbackStream = NULL; +PFNGLDRAWTRANSFORMFEEDBACKSTREAMINSTANCEDPROC glad_glDrawTransformFeedbackStreamInstanced = NULL; +PFNGLENABLEPROC glad_glEnable = NULL; +PFNGLENABLEVERTEXATTRIBARRAYPROC glad_glEnableVertexAttribArray = NULL; +PFNGLENABLEIPROC glad_glEnablei = NULL; +PFNGLENDCONDITIONALRENDERPROC glad_glEndConditionalRender = NULL; +PFNGLENDQUERYPROC glad_glEndQuery = NULL; +PFNGLENDQUERYINDEXEDPROC glad_glEndQueryIndexed = NULL; +PFNGLENDTRANSFORMFEEDBACKPROC glad_glEndTransformFeedback = NULL; +PFNGLFENCESYNCPROC glad_glFenceSync = NULL; +PFNGLFINISHPROC glad_glFinish = NULL; +PFNGLFLUSHPROC glad_glFlush = NULL; +PFNGLFLUSHMAPPEDBUFFERRANGEPROC glad_glFlushMappedBufferRange = NULL; +PFNGLFRAMEBUFFERPARAMETERIPROC glad_glFramebufferParameteri = NULL; +PFNGLFRAMEBUFFERRENDERBUFFERPROC glad_glFramebufferRenderbuffer = NULL; +PFNGLFRAMEBUFFERTEXTUREPROC glad_glFramebufferTexture = NULL; +PFNGLFRAMEBUFFERTEXTURE1DPROC glad_glFramebufferTexture1D = NULL; +PFNGLFRAMEBUFFERTEXTURE2DPROC glad_glFramebufferTexture2D = NULL; +PFNGLFRAMEBUFFERTEXTURE3DPROC glad_glFramebufferTexture3D = NULL; +PFNGLFRAMEBUFFERTEXTURELAYERPROC glad_glFramebufferTextureLayer = NULL; +PFNGLFRONTFACEPROC glad_glFrontFace = NULL; +PFNGLGENBUFFERSPROC glad_glGenBuffers = NULL; +PFNGLGENFRAMEBUFFERSPROC glad_glGenFramebuffers = NULL; +PFNGLGENPROGRAMPIPELINESPROC glad_glGenProgramPipelines = NULL; +PFNGLGENQUERIESPROC glad_glGenQueries = NULL; +PFNGLGENRENDERBUFFERSPROC glad_glGenRenderbuffers = NULL; +PFNGLGENSAMPLERSPROC glad_glGenSamplers = NULL; +PFNGLGENTEXTURESPROC glad_glGenTextures = NULL; +PFNGLGENTRANSFORMFEEDBACKSPROC glad_glGenTransformFeedbacks = NULL; +PFNGLGENVERTEXARRAYSPROC glad_glGenVertexArrays = NULL; +PFNGLGENERATEMIPMAPPROC glad_glGenerateMipmap = NULL; +PFNGLGETACTIVEATOMICCOUNTERBUFFERIVPROC glad_glGetActiveAtomicCounterBufferiv = NULL; +PFNGLGETACTIVEATTRIBPROC glad_glGetActiveAttrib = NULL; +PFNGLGETACTIVESUBROUTINENAMEPROC glad_glGetActiveSubroutineName = NULL; +PFNGLGETACTIVESUBROUTINEUNIFORMNAMEPROC glad_glGetActiveSubroutineUniformName = NULL; +PFNGLGETACTIVESUBROUTINEUNIFORMIVPROC glad_glGetActiveSubroutineUniformiv = NULL; +PFNGLGETACTIVEUNIFORMPROC glad_glGetActiveUniform = NULL; +PFNGLGETACTIVEUNIFORMBLOCKNAMEPROC glad_glGetActiveUniformBlockName = NULL; +PFNGLGETACTIVEUNIFORMBLOCKIVPROC glad_glGetActiveUniformBlockiv = NULL; +PFNGLGETACTIVEUNIFORMNAMEPROC glad_glGetActiveUniformName = NULL; +PFNGLGETACTIVEUNIFORMSIVPROC glad_glGetActiveUniformsiv = NULL; +PFNGLGETATTACHEDSHADERSPROC glad_glGetAttachedShaders = NULL; +PFNGLGETATTRIBLOCATIONPROC glad_glGetAttribLocation = NULL; +PFNGLGETBOOLEANI_VPROC glad_glGetBooleani_v = NULL; +PFNGLGETBOOLEANVPROC glad_glGetBooleanv = NULL; +PFNGLGETBUFFERPARAMETERI64VPROC glad_glGetBufferParameteri64v = NULL; +PFNGLGETBUFFERPARAMETERIVPROC glad_glGetBufferParameteriv = NULL; +PFNGLGETBUFFERPOINTERVPROC glad_glGetBufferPointerv = NULL; +PFNGLGETBUFFERSUBDATAPROC glad_glGetBufferSubData = NULL; +PFNGLGETCOMPRESSEDTEXIMAGEPROC glad_glGetCompressedTexImage = NULL; +PFNGLGETDEBUGMESSAGELOGPROC glad_glGetDebugMessageLog = NULL; +PFNGLGETDOUBLEI_VPROC glad_glGetDoublei_v = NULL; +PFNGLGETDOUBLEVPROC glad_glGetDoublev = NULL; +PFNGLGETERRORPROC glad_glGetError = NULL; +PFNGLGETFLOATI_VPROC glad_glGetFloati_v = NULL; +PFNGLGETFLOATVPROC glad_glGetFloatv = NULL; +PFNGLGETFRAGDATAINDEXPROC glad_glGetFragDataIndex = NULL; +PFNGLGETFRAGDATALOCATIONPROC glad_glGetFragDataLocation = NULL; +PFNGLGETFRAMEBUFFERATTACHMENTPARAMETERIVPROC glad_glGetFramebufferAttachmentParameteriv = NULL; +PFNGLGETFRAMEBUFFERPARAMETERIVPROC glad_glGetFramebufferParameteriv = NULL; +PFNGLGETINTEGER64I_VPROC glad_glGetInteger64i_v = NULL; +PFNGLGETINTEGER64VPROC glad_glGetInteger64v = NULL; +PFNGLGETINTEGERI_VPROC glad_glGetIntegeri_v = NULL; +PFNGLGETINTEGERVPROC glad_glGetIntegerv = NULL; +PFNGLGETINTERNALFORMATI64VPROC glad_glGetInternalformati64v = NULL; +PFNGLGETINTERNALFORMATIVPROC glad_glGetInternalformativ = NULL; +PFNGLGETMULTISAMPLEFVPROC glad_glGetMultisamplefv = NULL; +PFNGLGETOBJECTLABELPROC glad_glGetObjectLabel = NULL; +PFNGLGETOBJECTPTRLABELPROC glad_glGetObjectPtrLabel = NULL; +PFNGLGETPOINTERVPROC glad_glGetPointerv = NULL; +PFNGLGETPROGRAMBINARYPROC glad_glGetProgramBinary = NULL; +PFNGLGETPROGRAMINFOLOGPROC glad_glGetProgramInfoLog = NULL; +PFNGLGETPROGRAMINTERFACEIVPROC glad_glGetProgramInterfaceiv = NULL; +PFNGLGETPROGRAMPIPELINEINFOLOGPROC glad_glGetProgramPipelineInfoLog = NULL; +PFNGLGETPROGRAMPIPELINEIVPROC glad_glGetProgramPipelineiv = NULL; +PFNGLGETPROGRAMRESOURCEINDEXPROC glad_glGetProgramResourceIndex = NULL; +PFNGLGETPROGRAMRESOURCELOCATIONPROC glad_glGetProgramResourceLocation = NULL; +PFNGLGETPROGRAMRESOURCELOCATIONINDEXPROC glad_glGetProgramResourceLocationIndex = NULL; +PFNGLGETPROGRAMRESOURCENAMEPROC glad_glGetProgramResourceName = NULL; +PFNGLGETPROGRAMRESOURCEIVPROC glad_glGetProgramResourceiv = NULL; +PFNGLGETPROGRAMSTAGEIVPROC glad_glGetProgramStageiv = NULL; +PFNGLGETPROGRAMIVPROC glad_glGetProgramiv = NULL; +PFNGLGETQUERYINDEXEDIVPROC glad_glGetQueryIndexediv = NULL; +PFNGLGETQUERYOBJECTI64VPROC glad_glGetQueryObjecti64v = NULL; +PFNGLGETQUERYOBJECTIVPROC glad_glGetQueryObjectiv = NULL; +PFNGLGETQUERYOBJECTUI64VPROC glad_glGetQueryObjectui64v = NULL; +PFNGLGETQUERYOBJECTUIVPROC glad_glGetQueryObjectuiv = NULL; +PFNGLGETQUERYIVPROC glad_glGetQueryiv = NULL; +PFNGLGETRENDERBUFFERPARAMETERIVPROC glad_glGetRenderbufferParameteriv = NULL; +PFNGLGETSAMPLERPARAMETERIIVPROC glad_glGetSamplerParameterIiv = NULL; +PFNGLGETSAMPLERPARAMETERIUIVPROC glad_glGetSamplerParameterIuiv = NULL; +PFNGLGETSAMPLERPARAMETERFVPROC glad_glGetSamplerParameterfv = NULL; +PFNGLGETSAMPLERPARAMETERIVPROC glad_glGetSamplerParameteriv = NULL; +PFNGLGETSHADERINFOLOGPROC glad_glGetShaderInfoLog = NULL; +PFNGLGETSHADERPRECISIONFORMATPROC glad_glGetShaderPrecisionFormat = NULL; +PFNGLGETSHADERSOURCEPROC glad_glGetShaderSource = NULL; +PFNGLGETSHADERIVPROC glad_glGetShaderiv = NULL; +PFNGLGETSTRINGPROC glad_glGetString = NULL; +PFNGLGETSTRINGIPROC glad_glGetStringi = NULL; +PFNGLGETSUBROUTINEINDEXPROC glad_glGetSubroutineIndex = NULL; +PFNGLGETSUBROUTINEUNIFORMLOCATIONPROC glad_glGetSubroutineUniformLocation = NULL; +PFNGLGETSYNCIVPROC glad_glGetSynciv = NULL; +PFNGLGETTEXIMAGEPROC glad_glGetTexImage = NULL; +PFNGLGETTEXLEVELPARAMETERFVPROC glad_glGetTexLevelParameterfv = NULL; +PFNGLGETTEXLEVELPARAMETERIVPROC glad_glGetTexLevelParameteriv = NULL; +PFNGLGETTEXPARAMETERIIVPROC glad_glGetTexParameterIiv = NULL; +PFNGLGETTEXPARAMETERIUIVPROC glad_glGetTexParameterIuiv = NULL; +PFNGLGETTEXPARAMETERFVPROC glad_glGetTexParameterfv = NULL; +PFNGLGETTEXPARAMETERIVPROC glad_glGetTexParameteriv = NULL; +PFNGLGETTRANSFORMFEEDBACKVARYINGPROC glad_glGetTransformFeedbackVarying = NULL; +PFNGLGETUNIFORMBLOCKINDEXPROC glad_glGetUniformBlockIndex = NULL; +PFNGLGETUNIFORMINDICESPROC glad_glGetUniformIndices = NULL; +PFNGLGETUNIFORMLOCATIONPROC glad_glGetUniformLocation = NULL; +PFNGLGETUNIFORMSUBROUTINEUIVPROC glad_glGetUniformSubroutineuiv = NULL; +PFNGLGETUNIFORMDVPROC glad_glGetUniformdv = NULL; +PFNGLGETUNIFORMFVPROC glad_glGetUniformfv = NULL; +PFNGLGETUNIFORMIVPROC glad_glGetUniformiv = NULL; +PFNGLGETUNIFORMUIVPROC glad_glGetUniformuiv = NULL; +PFNGLGETVERTEXATTRIBIIVPROC glad_glGetVertexAttribIiv = NULL; +PFNGLGETVERTEXATTRIBIUIVPROC glad_glGetVertexAttribIuiv = NULL; +PFNGLGETVERTEXATTRIBLDVPROC glad_glGetVertexAttribLdv = NULL; +PFNGLGETVERTEXATTRIBPOINTERVPROC glad_glGetVertexAttribPointerv = NULL; +PFNGLGETVERTEXATTRIBDVPROC glad_glGetVertexAttribdv = NULL; +PFNGLGETVERTEXATTRIBFVPROC glad_glGetVertexAttribfv = NULL; +PFNGLGETVERTEXATTRIBIVPROC glad_glGetVertexAttribiv = NULL; +PFNGLHINTPROC glad_glHint = NULL; +PFNGLINVALIDATEBUFFERDATAPROC glad_glInvalidateBufferData = NULL; +PFNGLINVALIDATEBUFFERSUBDATAPROC glad_glInvalidateBufferSubData = NULL; +PFNGLINVALIDATEFRAMEBUFFERPROC glad_glInvalidateFramebuffer = NULL; +PFNGLINVALIDATESUBFRAMEBUFFERPROC glad_glInvalidateSubFramebuffer = NULL; +PFNGLINVALIDATETEXIMAGEPROC glad_glInvalidateTexImage = NULL; +PFNGLINVALIDATETEXSUBIMAGEPROC glad_glInvalidateTexSubImage = NULL; +PFNGLISBUFFERPROC glad_glIsBuffer = NULL; +PFNGLISENABLEDPROC glad_glIsEnabled = NULL; +PFNGLISENABLEDIPROC glad_glIsEnabledi = NULL; +PFNGLISFRAMEBUFFERPROC glad_glIsFramebuffer = NULL; +PFNGLISPROGRAMPROC glad_glIsProgram = NULL; +PFNGLISPROGRAMPIPELINEPROC glad_glIsProgramPipeline = NULL; +PFNGLISQUERYPROC glad_glIsQuery = NULL; +PFNGLISRENDERBUFFERPROC glad_glIsRenderbuffer = NULL; +PFNGLISSAMPLERPROC glad_glIsSampler = NULL; +PFNGLISSHADERPROC glad_glIsShader = NULL; +PFNGLISSYNCPROC glad_glIsSync = NULL; +PFNGLISTEXTUREPROC glad_glIsTexture = NULL; +PFNGLISTRANSFORMFEEDBACKPROC glad_glIsTransformFeedback = NULL; +PFNGLISVERTEXARRAYPROC glad_glIsVertexArray = NULL; +PFNGLLINEWIDTHPROC glad_glLineWidth = NULL; +PFNGLLINKPROGRAMPROC glad_glLinkProgram = NULL; +PFNGLLOGICOPPROC glad_glLogicOp = NULL; +PFNGLMAPBUFFERPROC glad_glMapBuffer = NULL; +PFNGLMAPBUFFERRANGEPROC glad_glMapBufferRange = NULL; +PFNGLMEMORYBARRIERPROC glad_glMemoryBarrier = NULL; +PFNGLMINSAMPLESHADINGPROC glad_glMinSampleShading = NULL; +PFNGLMULTIDRAWARRAYSPROC glad_glMultiDrawArrays = NULL; +PFNGLMULTIDRAWARRAYSINDIRECTPROC glad_glMultiDrawArraysIndirect = NULL; +PFNGLMULTIDRAWELEMENTSPROC glad_glMultiDrawElements = NULL; +PFNGLMULTIDRAWELEMENTSBASEVERTEXPROC glad_glMultiDrawElementsBaseVertex = NULL; +PFNGLMULTIDRAWELEMENTSINDIRECTPROC glad_glMultiDrawElementsIndirect = NULL; +PFNGLOBJECTLABELPROC glad_glObjectLabel = NULL; +PFNGLOBJECTPTRLABELPROC glad_glObjectPtrLabel = NULL; +PFNGLPATCHPARAMETERFVPROC glad_glPatchParameterfv = NULL; +PFNGLPATCHPARAMETERIPROC glad_glPatchParameteri = NULL; +PFNGLPAUSETRANSFORMFEEDBACKPROC glad_glPauseTransformFeedback = NULL; +PFNGLPIXELSTOREFPROC glad_glPixelStoref = NULL; +PFNGLPIXELSTOREIPROC glad_glPixelStorei = NULL; +PFNGLPOINTPARAMETERFPROC glad_glPointParameterf = NULL; +PFNGLPOINTPARAMETERFVPROC glad_glPointParameterfv = NULL; +PFNGLPOINTPARAMETERIPROC glad_glPointParameteri = NULL; +PFNGLPOINTPARAMETERIVPROC glad_glPointParameteriv = NULL; +PFNGLPOINTSIZEPROC glad_glPointSize = NULL; +PFNGLPOLYGONMODEPROC glad_glPolygonMode = NULL; +PFNGLPOLYGONOFFSETPROC glad_glPolygonOffset = NULL; +PFNGLPOPDEBUGGROUPPROC glad_glPopDebugGroup = NULL; +PFNGLPRIMITIVERESTARTINDEXPROC glad_glPrimitiveRestartIndex = NULL; +PFNGLPROGRAMBINARYPROC glad_glProgramBinary = NULL; +PFNGLPROGRAMPARAMETERIPROC glad_glProgramParameteri = NULL; +PFNGLPROGRAMUNIFORM1DPROC glad_glProgramUniform1d = NULL; +PFNGLPROGRAMUNIFORM1DVPROC glad_glProgramUniform1dv = NULL; +PFNGLPROGRAMUNIFORM1FPROC glad_glProgramUniform1f = NULL; +PFNGLPROGRAMUNIFORM1FVPROC glad_glProgramUniform1fv = NULL; +PFNGLPROGRAMUNIFORM1IPROC glad_glProgramUniform1i = NULL; +PFNGLPROGRAMUNIFORM1IVPROC glad_glProgramUniform1iv = NULL; +PFNGLPROGRAMUNIFORM1UIPROC glad_glProgramUniform1ui = NULL; +PFNGLPROGRAMUNIFORM1UIVPROC glad_glProgramUniform1uiv = NULL; +PFNGLPROGRAMUNIFORM2DPROC glad_glProgramUniform2d = NULL; +PFNGLPROGRAMUNIFORM2DVPROC glad_glProgramUniform2dv = NULL; +PFNGLPROGRAMUNIFORM2FPROC glad_glProgramUniform2f = NULL; +PFNGLPROGRAMUNIFORM2FVPROC glad_glProgramUniform2fv = NULL; +PFNGLPROGRAMUNIFORM2IPROC glad_glProgramUniform2i = NULL; +PFNGLPROGRAMUNIFORM2IVPROC glad_glProgramUniform2iv = NULL; +PFNGLPROGRAMUNIFORM2UIPROC glad_glProgramUniform2ui = NULL; +PFNGLPROGRAMUNIFORM2UIVPROC glad_glProgramUniform2uiv = NULL; +PFNGLPROGRAMUNIFORM3DPROC glad_glProgramUniform3d = NULL; +PFNGLPROGRAMUNIFORM3DVPROC glad_glProgramUniform3dv = NULL; +PFNGLPROGRAMUNIFORM3FPROC glad_glProgramUniform3f = NULL; +PFNGLPROGRAMUNIFORM3FVPROC glad_glProgramUniform3fv = NULL; +PFNGLPROGRAMUNIFORM3IPROC glad_glProgramUniform3i = NULL; +PFNGLPROGRAMUNIFORM3IVPROC glad_glProgramUniform3iv = NULL; +PFNGLPROGRAMUNIFORM3UIPROC glad_glProgramUniform3ui = NULL; +PFNGLPROGRAMUNIFORM3UIVPROC glad_glProgramUniform3uiv = NULL; +PFNGLPROGRAMUNIFORM4DPROC glad_glProgramUniform4d = NULL; +PFNGLPROGRAMUNIFORM4DVPROC glad_glProgramUniform4dv = NULL; +PFNGLPROGRAMUNIFORM4FPROC glad_glProgramUniform4f = NULL; +PFNGLPROGRAMUNIFORM4FVPROC glad_glProgramUniform4fv = NULL; +PFNGLPROGRAMUNIFORM4IPROC glad_glProgramUniform4i = NULL; +PFNGLPROGRAMUNIFORM4IVPROC glad_glProgramUniform4iv = NULL; +PFNGLPROGRAMUNIFORM4UIPROC glad_glProgramUniform4ui = NULL; +PFNGLPROGRAMUNIFORM4UIVPROC glad_glProgramUniform4uiv = NULL; +PFNGLPROGRAMUNIFORMMATRIX2DVPROC glad_glProgramUniformMatrix2dv = NULL; +PFNGLPROGRAMUNIFORMMATRIX2FVPROC glad_glProgramUniformMatrix2fv = NULL; +PFNGLPROGRAMUNIFORMMATRIX2X3DVPROC glad_glProgramUniformMatrix2x3dv = NULL; +PFNGLPROGRAMUNIFORMMATRIX2X3FVPROC glad_glProgramUniformMatrix2x3fv = NULL; +PFNGLPROGRAMUNIFORMMATRIX2X4DVPROC glad_glProgramUniformMatrix2x4dv = NULL; +PFNGLPROGRAMUNIFORMMATRIX2X4FVPROC glad_glProgramUniformMatrix2x4fv = NULL; +PFNGLPROGRAMUNIFORMMATRIX3DVPROC glad_glProgramUniformMatrix3dv = NULL; +PFNGLPROGRAMUNIFORMMATRIX3FVPROC glad_glProgramUniformMatrix3fv = NULL; +PFNGLPROGRAMUNIFORMMATRIX3X2DVPROC glad_glProgramUniformMatrix3x2dv = NULL; +PFNGLPROGRAMUNIFORMMATRIX3X2FVPROC glad_glProgramUniformMatrix3x2fv = NULL; +PFNGLPROGRAMUNIFORMMATRIX3X4DVPROC glad_glProgramUniformMatrix3x4dv = NULL; +PFNGLPROGRAMUNIFORMMATRIX3X4FVPROC glad_glProgramUniformMatrix3x4fv = NULL; +PFNGLPROGRAMUNIFORMMATRIX4DVPROC glad_glProgramUniformMatrix4dv = NULL; +PFNGLPROGRAMUNIFORMMATRIX4FVPROC glad_glProgramUniformMatrix4fv = NULL; +PFNGLPROGRAMUNIFORMMATRIX4X2DVPROC glad_glProgramUniformMatrix4x2dv = NULL; +PFNGLPROGRAMUNIFORMMATRIX4X2FVPROC glad_glProgramUniformMatrix4x2fv = NULL; +PFNGLPROGRAMUNIFORMMATRIX4X3DVPROC glad_glProgramUniformMatrix4x3dv = NULL; +PFNGLPROGRAMUNIFORMMATRIX4X3FVPROC glad_glProgramUniformMatrix4x3fv = NULL; +PFNGLPROVOKINGVERTEXPROC glad_glProvokingVertex = NULL; +PFNGLPUSHDEBUGGROUPPROC glad_glPushDebugGroup = NULL; +PFNGLQUERYCOUNTERPROC glad_glQueryCounter = NULL; +PFNGLREADBUFFERPROC glad_glReadBuffer = NULL; +PFNGLREADPIXELSPROC glad_glReadPixels = NULL; +PFNGLRELEASESHADERCOMPILERPROC glad_glReleaseShaderCompiler = NULL; +PFNGLRENDERBUFFERSTORAGEPROC glad_glRenderbufferStorage = NULL; +PFNGLRENDERBUFFERSTORAGEMULTISAMPLEPROC glad_glRenderbufferStorageMultisample = NULL; +PFNGLRESUMETRANSFORMFEEDBACKPROC glad_glResumeTransformFeedback = NULL; +PFNGLSAMPLECOVERAGEPROC glad_glSampleCoverage = NULL; +PFNGLSAMPLEMASKIPROC glad_glSampleMaski = NULL; +PFNGLSAMPLERPARAMETERIIVPROC glad_glSamplerParameterIiv = NULL; +PFNGLSAMPLERPARAMETERIUIVPROC glad_glSamplerParameterIuiv = NULL; +PFNGLSAMPLERPARAMETERFPROC glad_glSamplerParameterf = NULL; +PFNGLSAMPLERPARAMETERFVPROC glad_glSamplerParameterfv = NULL; +PFNGLSAMPLERPARAMETERIPROC glad_glSamplerParameteri = NULL; +PFNGLSAMPLERPARAMETERIVPROC glad_glSamplerParameteriv = NULL; +PFNGLSCISSORPROC glad_glScissor = NULL; +PFNGLSCISSORARRAYVPROC glad_glScissorArrayv = NULL; +PFNGLSCISSORINDEXEDPROC glad_glScissorIndexed = NULL; +PFNGLSCISSORINDEXEDVPROC glad_glScissorIndexedv = NULL; +PFNGLSHADERBINARYPROC glad_glShaderBinary = NULL; +PFNGLSHADERSOURCEPROC glad_glShaderSource = NULL; +PFNGLSHADERSTORAGEBLOCKBINDINGPROC glad_glShaderStorageBlockBinding = NULL; +PFNGLSTENCILFUNCPROC glad_glStencilFunc = NULL; +PFNGLSTENCILFUNCSEPARATEPROC glad_glStencilFuncSeparate = NULL; +PFNGLSTENCILMASKPROC glad_glStencilMask = NULL; +PFNGLSTENCILMASKSEPARATEPROC glad_glStencilMaskSeparate = NULL; +PFNGLSTENCILOPPROC glad_glStencilOp = NULL; +PFNGLSTENCILOPSEPARATEPROC glad_glStencilOpSeparate = NULL; +PFNGLTEXBUFFERPROC glad_glTexBuffer = NULL; +PFNGLTEXBUFFERRANGEPROC glad_glTexBufferRange = NULL; +PFNGLTEXIMAGE1DPROC glad_glTexImage1D = NULL; +PFNGLTEXIMAGE2DPROC glad_glTexImage2D = NULL; +PFNGLTEXIMAGE2DMULTISAMPLEPROC glad_glTexImage2DMultisample = NULL; +PFNGLTEXIMAGE3DPROC glad_glTexImage3D = NULL; +PFNGLTEXIMAGE3DMULTISAMPLEPROC glad_glTexImage3DMultisample = NULL; +PFNGLTEXPARAMETERIIVPROC glad_glTexParameterIiv = NULL; +PFNGLTEXPARAMETERIUIVPROC glad_glTexParameterIuiv = NULL; +PFNGLTEXPARAMETERFPROC glad_glTexParameterf = NULL; +PFNGLTEXPARAMETERFVPROC glad_glTexParameterfv = NULL; +PFNGLTEXPARAMETERIPROC glad_glTexParameteri = NULL; +PFNGLTEXPARAMETERIVPROC glad_glTexParameteriv = NULL; +PFNGLTEXSTORAGE1DPROC glad_glTexStorage1D = NULL; +PFNGLTEXSTORAGE2DPROC glad_glTexStorage2D = NULL; +PFNGLTEXSTORAGE2DMULTISAMPLEPROC glad_glTexStorage2DMultisample = NULL; +PFNGLTEXSTORAGE3DPROC glad_glTexStorage3D = NULL; +PFNGLTEXSTORAGE3DMULTISAMPLEPROC glad_glTexStorage3DMultisample = NULL; +PFNGLTEXSUBIMAGE1DPROC glad_glTexSubImage1D = NULL; +PFNGLTEXSUBIMAGE2DPROC glad_glTexSubImage2D = NULL; +PFNGLTEXSUBIMAGE3DPROC glad_glTexSubImage3D = NULL; +PFNGLTEXTUREVIEWPROC glad_glTextureView = NULL; +PFNGLTRANSFORMFEEDBACKVARYINGSPROC glad_glTransformFeedbackVaryings = NULL; +PFNGLUNIFORM1DPROC glad_glUniform1d = NULL; +PFNGLUNIFORM1DVPROC glad_glUniform1dv = NULL; +PFNGLUNIFORM1FPROC glad_glUniform1f = NULL; +PFNGLUNIFORM1FVPROC glad_glUniform1fv = NULL; +PFNGLUNIFORM1IPROC glad_glUniform1i = NULL; +PFNGLUNIFORM1IVPROC glad_glUniform1iv = NULL; +PFNGLUNIFORM1UIPROC glad_glUniform1ui = NULL; +PFNGLUNIFORM1UIVPROC glad_glUniform1uiv = NULL; +PFNGLUNIFORM2DPROC glad_glUniform2d = NULL; +PFNGLUNIFORM2DVPROC glad_glUniform2dv = NULL; +PFNGLUNIFORM2FPROC glad_glUniform2f = NULL; +PFNGLUNIFORM2FVPROC glad_glUniform2fv = NULL; +PFNGLUNIFORM2IPROC glad_glUniform2i = NULL; +PFNGLUNIFORM2IVPROC glad_glUniform2iv = NULL; +PFNGLUNIFORM2UIPROC glad_glUniform2ui = NULL; +PFNGLUNIFORM2UIVPROC glad_glUniform2uiv = NULL; +PFNGLUNIFORM3DPROC glad_glUniform3d = NULL; +PFNGLUNIFORM3DVPROC glad_glUniform3dv = NULL; +PFNGLUNIFORM3FPROC glad_glUniform3f = NULL; +PFNGLUNIFORM3FVPROC glad_glUniform3fv = NULL; +PFNGLUNIFORM3IPROC glad_glUniform3i = NULL; +PFNGLUNIFORM3IVPROC glad_glUniform3iv = NULL; +PFNGLUNIFORM3UIPROC glad_glUniform3ui = NULL; +PFNGLUNIFORM3UIVPROC glad_glUniform3uiv = NULL; +PFNGLUNIFORM4DPROC glad_glUniform4d = NULL; +PFNGLUNIFORM4DVPROC glad_glUniform4dv = NULL; +PFNGLUNIFORM4FPROC glad_glUniform4f = NULL; +PFNGLUNIFORM4FVPROC glad_glUniform4fv = NULL; +PFNGLUNIFORM4IPROC glad_glUniform4i = NULL; +PFNGLUNIFORM4IVPROC glad_glUniform4iv = NULL; +PFNGLUNIFORM4UIPROC glad_glUniform4ui = NULL; +PFNGLUNIFORM4UIVPROC glad_glUniform4uiv = NULL; +PFNGLUNIFORMBLOCKBINDINGPROC glad_glUniformBlockBinding = NULL; +PFNGLUNIFORMMATRIX2DVPROC glad_glUniformMatrix2dv = NULL; +PFNGLUNIFORMMATRIX2FVPROC glad_glUniformMatrix2fv = NULL; +PFNGLUNIFORMMATRIX2X3DVPROC glad_glUniformMatrix2x3dv = NULL; +PFNGLUNIFORMMATRIX2X3FVPROC glad_glUniformMatrix2x3fv = NULL; +PFNGLUNIFORMMATRIX2X4DVPROC glad_glUniformMatrix2x4dv = NULL; +PFNGLUNIFORMMATRIX2X4FVPROC glad_glUniformMatrix2x4fv = NULL; +PFNGLUNIFORMMATRIX3DVPROC glad_glUniformMatrix3dv = NULL; +PFNGLUNIFORMMATRIX3FVPROC glad_glUniformMatrix3fv = NULL; +PFNGLUNIFORMMATRIX3X2DVPROC glad_glUniformMatrix3x2dv = NULL; +PFNGLUNIFORMMATRIX3X2FVPROC glad_glUniformMatrix3x2fv = NULL; +PFNGLUNIFORMMATRIX3X4DVPROC glad_glUniformMatrix3x4dv = NULL; +PFNGLUNIFORMMATRIX3X4FVPROC glad_glUniformMatrix3x4fv = NULL; +PFNGLUNIFORMMATRIX4DVPROC glad_glUniformMatrix4dv = NULL; +PFNGLUNIFORMMATRIX4FVPROC glad_glUniformMatrix4fv = NULL; +PFNGLUNIFORMMATRIX4X2DVPROC glad_glUniformMatrix4x2dv = NULL; +PFNGLUNIFORMMATRIX4X2FVPROC glad_glUniformMatrix4x2fv = NULL; +PFNGLUNIFORMMATRIX4X3DVPROC glad_glUniformMatrix4x3dv = NULL; +PFNGLUNIFORMMATRIX4X3FVPROC glad_glUniformMatrix4x3fv = NULL; +PFNGLUNIFORMSUBROUTINESUIVPROC glad_glUniformSubroutinesuiv = NULL; +PFNGLUNMAPBUFFERPROC glad_glUnmapBuffer = NULL; +PFNGLUSEPROGRAMPROC glad_glUseProgram = NULL; +PFNGLUSEPROGRAMSTAGESPROC glad_glUseProgramStages = NULL; +PFNGLVALIDATEPROGRAMPROC glad_glValidateProgram = NULL; +PFNGLVALIDATEPROGRAMPIPELINEPROC glad_glValidateProgramPipeline = NULL; +PFNGLVERTEXATTRIB1DPROC glad_glVertexAttrib1d = NULL; +PFNGLVERTEXATTRIB1DVPROC glad_glVertexAttrib1dv = NULL; +PFNGLVERTEXATTRIB1FPROC glad_glVertexAttrib1f = NULL; +PFNGLVERTEXATTRIB1FVPROC glad_glVertexAttrib1fv = NULL; +PFNGLVERTEXATTRIB1SPROC glad_glVertexAttrib1s = NULL; +PFNGLVERTEXATTRIB1SVPROC glad_glVertexAttrib1sv = NULL; +PFNGLVERTEXATTRIB2DPROC glad_glVertexAttrib2d = NULL; +PFNGLVERTEXATTRIB2DVPROC glad_glVertexAttrib2dv = NULL; +PFNGLVERTEXATTRIB2FPROC glad_glVertexAttrib2f = NULL; +PFNGLVERTEXATTRIB2FVPROC glad_glVertexAttrib2fv = NULL; +PFNGLVERTEXATTRIB2SPROC glad_glVertexAttrib2s = NULL; +PFNGLVERTEXATTRIB2SVPROC glad_glVertexAttrib2sv = NULL; +PFNGLVERTEXATTRIB3DPROC glad_glVertexAttrib3d = NULL; +PFNGLVERTEXATTRIB3DVPROC glad_glVertexAttrib3dv = NULL; +PFNGLVERTEXATTRIB3FPROC glad_glVertexAttrib3f = NULL; +PFNGLVERTEXATTRIB3FVPROC glad_glVertexAttrib3fv = NULL; +PFNGLVERTEXATTRIB3SPROC glad_glVertexAttrib3s = NULL; +PFNGLVERTEXATTRIB3SVPROC glad_glVertexAttrib3sv = NULL; +PFNGLVERTEXATTRIB4NBVPROC glad_glVertexAttrib4Nbv = NULL; +PFNGLVERTEXATTRIB4NIVPROC glad_glVertexAttrib4Niv = NULL; +PFNGLVERTEXATTRIB4NSVPROC glad_glVertexAttrib4Nsv = NULL; +PFNGLVERTEXATTRIB4NUBPROC glad_glVertexAttrib4Nub = NULL; +PFNGLVERTEXATTRIB4NUBVPROC glad_glVertexAttrib4Nubv = NULL; +PFNGLVERTEXATTRIB4NUIVPROC glad_glVertexAttrib4Nuiv = NULL; +PFNGLVERTEXATTRIB4NUSVPROC glad_glVertexAttrib4Nusv = NULL; +PFNGLVERTEXATTRIB4BVPROC glad_glVertexAttrib4bv = NULL; +PFNGLVERTEXATTRIB4DPROC glad_glVertexAttrib4d = NULL; +PFNGLVERTEXATTRIB4DVPROC glad_glVertexAttrib4dv = NULL; +PFNGLVERTEXATTRIB4FPROC glad_glVertexAttrib4f = NULL; +PFNGLVERTEXATTRIB4FVPROC glad_glVertexAttrib4fv = NULL; +PFNGLVERTEXATTRIB4IVPROC glad_glVertexAttrib4iv = NULL; +PFNGLVERTEXATTRIB4SPROC glad_glVertexAttrib4s = NULL; +PFNGLVERTEXATTRIB4SVPROC glad_glVertexAttrib4sv = NULL; +PFNGLVERTEXATTRIB4UBVPROC glad_glVertexAttrib4ubv = NULL; +PFNGLVERTEXATTRIB4UIVPROC glad_glVertexAttrib4uiv = NULL; +PFNGLVERTEXATTRIB4USVPROC glad_glVertexAttrib4usv = NULL; +PFNGLVERTEXATTRIBBINDINGPROC glad_glVertexAttribBinding = NULL; +PFNGLVERTEXATTRIBDIVISORPROC glad_glVertexAttribDivisor = NULL; +PFNGLVERTEXATTRIBFORMATPROC glad_glVertexAttribFormat = NULL; +PFNGLVERTEXATTRIBI1IPROC glad_glVertexAttribI1i = NULL; +PFNGLVERTEXATTRIBI1IVPROC glad_glVertexAttribI1iv = NULL; +PFNGLVERTEXATTRIBI1UIPROC glad_glVertexAttribI1ui = NULL; +PFNGLVERTEXATTRIBI1UIVPROC glad_glVertexAttribI1uiv = NULL; +PFNGLVERTEXATTRIBI2IPROC glad_glVertexAttribI2i = NULL; +PFNGLVERTEXATTRIBI2IVPROC glad_glVertexAttribI2iv = NULL; +PFNGLVERTEXATTRIBI2UIPROC glad_glVertexAttribI2ui = NULL; +PFNGLVERTEXATTRIBI2UIVPROC glad_glVertexAttribI2uiv = NULL; +PFNGLVERTEXATTRIBI3IPROC glad_glVertexAttribI3i = NULL; +PFNGLVERTEXATTRIBI3IVPROC glad_glVertexAttribI3iv = NULL; +PFNGLVERTEXATTRIBI3UIPROC glad_glVertexAttribI3ui = NULL; +PFNGLVERTEXATTRIBI3UIVPROC glad_glVertexAttribI3uiv = NULL; +PFNGLVERTEXATTRIBI4BVPROC glad_glVertexAttribI4bv = NULL; +PFNGLVERTEXATTRIBI4IPROC glad_glVertexAttribI4i = NULL; +PFNGLVERTEXATTRIBI4IVPROC glad_glVertexAttribI4iv = NULL; +PFNGLVERTEXATTRIBI4SVPROC glad_glVertexAttribI4sv = NULL; +PFNGLVERTEXATTRIBI4UBVPROC glad_glVertexAttribI4ubv = NULL; +PFNGLVERTEXATTRIBI4UIPROC glad_glVertexAttribI4ui = NULL; +PFNGLVERTEXATTRIBI4UIVPROC glad_glVertexAttribI4uiv = NULL; +PFNGLVERTEXATTRIBI4USVPROC glad_glVertexAttribI4usv = NULL; +PFNGLVERTEXATTRIBIFORMATPROC glad_glVertexAttribIFormat = NULL; +PFNGLVERTEXATTRIBIPOINTERPROC glad_glVertexAttribIPointer = NULL; +PFNGLVERTEXATTRIBL1DPROC glad_glVertexAttribL1d = NULL; +PFNGLVERTEXATTRIBL1DVPROC glad_glVertexAttribL1dv = NULL; +PFNGLVERTEXATTRIBL2DPROC glad_glVertexAttribL2d = NULL; +PFNGLVERTEXATTRIBL2DVPROC glad_glVertexAttribL2dv = NULL; +PFNGLVERTEXATTRIBL3DPROC glad_glVertexAttribL3d = NULL; +PFNGLVERTEXATTRIBL3DVPROC glad_glVertexAttribL3dv = NULL; +PFNGLVERTEXATTRIBL4DPROC glad_glVertexAttribL4d = NULL; +PFNGLVERTEXATTRIBL4DVPROC glad_glVertexAttribL4dv = NULL; +PFNGLVERTEXATTRIBLFORMATPROC glad_glVertexAttribLFormat = NULL; +PFNGLVERTEXATTRIBLPOINTERPROC glad_glVertexAttribLPointer = NULL; +PFNGLVERTEXATTRIBP1UIPROC glad_glVertexAttribP1ui = NULL; +PFNGLVERTEXATTRIBP1UIVPROC glad_glVertexAttribP1uiv = NULL; +PFNGLVERTEXATTRIBP2UIPROC glad_glVertexAttribP2ui = NULL; +PFNGLVERTEXATTRIBP2UIVPROC glad_glVertexAttribP2uiv = NULL; +PFNGLVERTEXATTRIBP3UIPROC glad_glVertexAttribP3ui = NULL; +PFNGLVERTEXATTRIBP3UIVPROC glad_glVertexAttribP3uiv = NULL; +PFNGLVERTEXATTRIBP4UIPROC glad_glVertexAttribP4ui = NULL; +PFNGLVERTEXATTRIBP4UIVPROC glad_glVertexAttribP4uiv = NULL; +PFNGLVERTEXATTRIBPOINTERPROC glad_glVertexAttribPointer = NULL; +PFNGLVERTEXBINDINGDIVISORPROC glad_glVertexBindingDivisor = NULL; +PFNGLVIEWPORTPROC glad_glViewport = NULL; +PFNGLVIEWPORTARRAYVPROC glad_glViewportArrayv = NULL; +PFNGLVIEWPORTINDEXEDFPROC glad_glViewportIndexedf = NULL; +PFNGLVIEWPORTINDEXEDFVPROC glad_glViewportIndexedfv = NULL; +PFNGLWAITSYNCPROC glad_glWaitSync = NULL; + + +static void glad_gl_load_GL_VERSION_1_0( GLADuserptrloadfunc load, void* userptr) { + if(!GLAD_GL_VERSION_1_0) return; + glad_glBlendFunc = (PFNGLBLENDFUNCPROC) load(userptr, "glBlendFunc"); + glad_glClear = (PFNGLCLEARPROC) load(userptr, "glClear"); + glad_glClearColor = (PFNGLCLEARCOLORPROC) load(userptr, "glClearColor"); + glad_glClearDepth = (PFNGLCLEARDEPTHPROC) load(userptr, "glClearDepth"); + glad_glClearStencil = (PFNGLCLEARSTENCILPROC) load(userptr, "glClearStencil"); + glad_glColorMask = (PFNGLCOLORMASKPROC) load(userptr, "glColorMask"); + glad_glCullFace = (PFNGLCULLFACEPROC) load(userptr, "glCullFace"); + glad_glDepthFunc = (PFNGLDEPTHFUNCPROC) load(userptr, "glDepthFunc"); + glad_glDepthMask = (PFNGLDEPTHMASKPROC) load(userptr, "glDepthMask"); + glad_glDepthRange = (PFNGLDEPTHRANGEPROC) load(userptr, "glDepthRange"); + glad_glDisable = (PFNGLDISABLEPROC) load(userptr, "glDisable"); + glad_glDrawBuffer = (PFNGLDRAWBUFFERPROC) load(userptr, "glDrawBuffer"); + glad_glEnable = (PFNGLENABLEPROC) load(userptr, "glEnable"); + glad_glFinish = (PFNGLFINISHPROC) load(userptr, "glFinish"); + glad_glFlush = (PFNGLFLUSHPROC) load(userptr, "glFlush"); + glad_glFrontFace = (PFNGLFRONTFACEPROC) load(userptr, "glFrontFace"); + glad_glGetBooleanv = (PFNGLGETBOOLEANVPROC) load(userptr, "glGetBooleanv"); + glad_glGetDoublev = (PFNGLGETDOUBLEVPROC) load(userptr, "glGetDoublev"); + glad_glGetError = (PFNGLGETERRORPROC) load(userptr, "glGetError"); + glad_glGetFloatv = (PFNGLGETFLOATVPROC) load(userptr, "glGetFloatv"); + glad_glGetIntegerv = (PFNGLGETINTEGERVPROC) load(userptr, "glGetIntegerv"); + glad_glGetString = (PFNGLGETSTRINGPROC) load(userptr, "glGetString"); + glad_glGetTexImage = (PFNGLGETTEXIMAGEPROC) load(userptr, "glGetTexImage"); + glad_glGetTexLevelParameterfv = (PFNGLGETTEXLEVELPARAMETERFVPROC) load(userptr, "glGetTexLevelParameterfv"); + glad_glGetTexLevelParameteriv = (PFNGLGETTEXLEVELPARAMETERIVPROC) load(userptr, "glGetTexLevelParameteriv"); + glad_glGetTexParameterfv = (PFNGLGETTEXPARAMETERFVPROC) load(userptr, "glGetTexParameterfv"); + glad_glGetTexParameteriv = (PFNGLGETTEXPARAMETERIVPROC) load(userptr, "glGetTexParameteriv"); + glad_glHint = (PFNGLHINTPROC) load(userptr, "glHint"); + glad_glIsEnabled = (PFNGLISENABLEDPROC) load(userptr, "glIsEnabled"); + glad_glLineWidth = (PFNGLLINEWIDTHPROC) load(userptr, "glLineWidth"); + glad_glLogicOp = (PFNGLLOGICOPPROC) load(userptr, "glLogicOp"); + glad_glPixelStoref = (PFNGLPIXELSTOREFPROC) load(userptr, "glPixelStoref"); + glad_glPixelStorei = (PFNGLPIXELSTOREIPROC) load(userptr, "glPixelStorei"); + glad_glPointSize = (PFNGLPOINTSIZEPROC) load(userptr, "glPointSize"); + glad_glPolygonMode = (PFNGLPOLYGONMODEPROC) load(userptr, "glPolygonMode"); + glad_glReadBuffer = (PFNGLREADBUFFERPROC) load(userptr, "glReadBuffer"); + glad_glReadPixels = (PFNGLREADPIXELSPROC) load(userptr, "glReadPixels"); + glad_glScissor = (PFNGLSCISSORPROC) load(userptr, "glScissor"); + glad_glStencilFunc = (PFNGLSTENCILFUNCPROC) load(userptr, "glStencilFunc"); + glad_glStencilMask = (PFNGLSTENCILMASKPROC) load(userptr, "glStencilMask"); + glad_glStencilOp = (PFNGLSTENCILOPPROC) load(userptr, "glStencilOp"); + glad_glTexImage1D = (PFNGLTEXIMAGE1DPROC) load(userptr, "glTexImage1D"); + glad_glTexImage2D = (PFNGLTEXIMAGE2DPROC) load(userptr, "glTexImage2D"); + glad_glTexParameterf = (PFNGLTEXPARAMETERFPROC) load(userptr, "glTexParameterf"); + glad_glTexParameterfv = (PFNGLTEXPARAMETERFVPROC) load(userptr, "glTexParameterfv"); + glad_glTexParameteri = (PFNGLTEXPARAMETERIPROC) load(userptr, "glTexParameteri"); + glad_glTexParameteriv = (PFNGLTEXPARAMETERIVPROC) load(userptr, "glTexParameteriv"); + glad_glViewport = (PFNGLVIEWPORTPROC) load(userptr, "glViewport"); +} +static void glad_gl_load_GL_VERSION_1_1( GLADuserptrloadfunc load, void* userptr) { + if(!GLAD_GL_VERSION_1_1) return; + glad_glBindTexture = (PFNGLBINDTEXTUREPROC) load(userptr, "glBindTexture"); + glad_glCopyTexImage1D = (PFNGLCOPYTEXIMAGE1DPROC) load(userptr, "glCopyTexImage1D"); + glad_glCopyTexImage2D = (PFNGLCOPYTEXIMAGE2DPROC) load(userptr, "glCopyTexImage2D"); + glad_glCopyTexSubImage1D = (PFNGLCOPYTEXSUBIMAGE1DPROC) load(userptr, "glCopyTexSubImage1D"); + glad_glCopyTexSubImage2D = (PFNGLCOPYTEXSUBIMAGE2DPROC) load(userptr, "glCopyTexSubImage2D"); + glad_glDeleteTextures = (PFNGLDELETETEXTURESPROC) load(userptr, "glDeleteTextures"); + glad_glDrawArrays = (PFNGLDRAWARRAYSPROC) load(userptr, "glDrawArrays"); + glad_glDrawElements = (PFNGLDRAWELEMENTSPROC) load(userptr, "glDrawElements"); + glad_glGenTextures = (PFNGLGENTEXTURESPROC) load(userptr, "glGenTextures"); + glad_glGetPointerv = (PFNGLGETPOINTERVPROC) load(userptr, "glGetPointerv"); + glad_glIsTexture = (PFNGLISTEXTUREPROC) load(userptr, "glIsTexture"); + glad_glPolygonOffset = (PFNGLPOLYGONOFFSETPROC) load(userptr, "glPolygonOffset"); + glad_glTexSubImage1D = (PFNGLTEXSUBIMAGE1DPROC) load(userptr, "glTexSubImage1D"); + glad_glTexSubImage2D = (PFNGLTEXSUBIMAGE2DPROC) load(userptr, "glTexSubImage2D"); +} +static void glad_gl_load_GL_VERSION_1_2( GLADuserptrloadfunc load, void* userptr) { + if(!GLAD_GL_VERSION_1_2) return; + glad_glCopyTexSubImage3D = (PFNGLCOPYTEXSUBIMAGE3DPROC) load(userptr, "glCopyTexSubImage3D"); + glad_glDrawRangeElements = (PFNGLDRAWRANGEELEMENTSPROC) load(userptr, "glDrawRangeElements"); + glad_glTexImage3D = (PFNGLTEXIMAGE3DPROC) load(userptr, "glTexImage3D"); + glad_glTexSubImage3D = (PFNGLTEXSUBIMAGE3DPROC) load(userptr, "glTexSubImage3D"); +} +static void glad_gl_load_GL_VERSION_1_3( GLADuserptrloadfunc load, void* userptr) { + if(!GLAD_GL_VERSION_1_3) return; + glad_glActiveTexture = (PFNGLACTIVETEXTUREPROC) load(userptr, "glActiveTexture"); + glad_glCompressedTexImage1D = (PFNGLCOMPRESSEDTEXIMAGE1DPROC) load(userptr, "glCompressedTexImage1D"); + glad_glCompressedTexImage2D = (PFNGLCOMPRESSEDTEXIMAGE2DPROC) load(userptr, "glCompressedTexImage2D"); + glad_glCompressedTexImage3D = (PFNGLCOMPRESSEDTEXIMAGE3DPROC) load(userptr, "glCompressedTexImage3D"); + glad_glCompressedTexSubImage1D = (PFNGLCOMPRESSEDTEXSUBIMAGE1DPROC) load(userptr, "glCompressedTexSubImage1D"); + glad_glCompressedTexSubImage2D = (PFNGLCOMPRESSEDTEXSUBIMAGE2DPROC) load(userptr, "glCompressedTexSubImage2D"); + glad_glCompressedTexSubImage3D = (PFNGLCOMPRESSEDTEXSUBIMAGE3DPROC) load(userptr, "glCompressedTexSubImage3D"); + glad_glGetCompressedTexImage = (PFNGLGETCOMPRESSEDTEXIMAGEPROC) load(userptr, "glGetCompressedTexImage"); + glad_glSampleCoverage = (PFNGLSAMPLECOVERAGEPROC) load(userptr, "glSampleCoverage"); +} +static void glad_gl_load_GL_VERSION_1_4( GLADuserptrloadfunc load, void* userptr) { + if(!GLAD_GL_VERSION_1_4) return; + glad_glBlendColor = (PFNGLBLENDCOLORPROC) load(userptr, "glBlendColor"); + glad_glBlendEquation = (PFNGLBLENDEQUATIONPROC) load(userptr, "glBlendEquation"); + glad_glBlendFuncSeparate = (PFNGLBLENDFUNCSEPARATEPROC) load(userptr, "glBlendFuncSeparate"); + glad_glMultiDrawArrays = (PFNGLMULTIDRAWARRAYSPROC) load(userptr, "glMultiDrawArrays"); + glad_glMultiDrawElements = (PFNGLMULTIDRAWELEMENTSPROC) load(userptr, "glMultiDrawElements"); + glad_glPointParameterf = (PFNGLPOINTPARAMETERFPROC) load(userptr, "glPointParameterf"); + glad_glPointParameterfv = (PFNGLPOINTPARAMETERFVPROC) load(userptr, "glPointParameterfv"); + glad_glPointParameteri = (PFNGLPOINTPARAMETERIPROC) load(userptr, "glPointParameteri"); + glad_glPointParameteriv = (PFNGLPOINTPARAMETERIVPROC) load(userptr, "glPointParameteriv"); +} +static void glad_gl_load_GL_VERSION_1_5( GLADuserptrloadfunc load, void* userptr) { + if(!GLAD_GL_VERSION_1_5) return; + glad_glBeginQuery = (PFNGLBEGINQUERYPROC) load(userptr, "glBeginQuery"); + glad_glBindBuffer = (PFNGLBINDBUFFERPROC) load(userptr, "glBindBuffer"); + glad_glBufferData = (PFNGLBUFFERDATAPROC) load(userptr, "glBufferData"); + glad_glBufferSubData = (PFNGLBUFFERSUBDATAPROC) load(userptr, "glBufferSubData"); + glad_glDeleteBuffers = (PFNGLDELETEBUFFERSPROC) load(userptr, "glDeleteBuffers"); + glad_glDeleteQueries = (PFNGLDELETEQUERIESPROC) load(userptr, "glDeleteQueries"); + glad_glEndQuery = (PFNGLENDQUERYPROC) load(userptr, "glEndQuery"); + glad_glGenBuffers = (PFNGLGENBUFFERSPROC) load(userptr, "glGenBuffers"); + glad_glGenQueries = (PFNGLGENQUERIESPROC) load(userptr, "glGenQueries"); + glad_glGetBufferParameteriv = (PFNGLGETBUFFERPARAMETERIVPROC) load(userptr, "glGetBufferParameteriv"); + glad_glGetBufferPointerv = (PFNGLGETBUFFERPOINTERVPROC) load(userptr, "glGetBufferPointerv"); + glad_glGetBufferSubData = (PFNGLGETBUFFERSUBDATAPROC) load(userptr, "glGetBufferSubData"); + glad_glGetQueryObjectiv = (PFNGLGETQUERYOBJECTIVPROC) load(userptr, "glGetQueryObjectiv"); + glad_glGetQueryObjectuiv = (PFNGLGETQUERYOBJECTUIVPROC) load(userptr, "glGetQueryObjectuiv"); + glad_glGetQueryiv = (PFNGLGETQUERYIVPROC) load(userptr, "glGetQueryiv"); + glad_glIsBuffer = (PFNGLISBUFFERPROC) load(userptr, "glIsBuffer"); + glad_glIsQuery = (PFNGLISQUERYPROC) load(userptr, "glIsQuery"); + glad_glMapBuffer = (PFNGLMAPBUFFERPROC) load(userptr, "glMapBuffer"); + glad_glUnmapBuffer = (PFNGLUNMAPBUFFERPROC) load(userptr, "glUnmapBuffer"); +} +static void glad_gl_load_GL_VERSION_2_0( GLADuserptrloadfunc load, void* userptr) { + if(!GLAD_GL_VERSION_2_0) return; + glad_glAttachShader = (PFNGLATTACHSHADERPROC) load(userptr, "glAttachShader"); + glad_glBindAttribLocation = (PFNGLBINDATTRIBLOCATIONPROC) load(userptr, "glBindAttribLocation"); + glad_glBlendEquationSeparate = (PFNGLBLENDEQUATIONSEPARATEPROC) load(userptr, "glBlendEquationSeparate"); + glad_glCompileShader = (PFNGLCOMPILESHADERPROC) load(userptr, "glCompileShader"); + glad_glCreateProgram = (PFNGLCREATEPROGRAMPROC) load(userptr, "glCreateProgram"); + glad_glCreateShader = (PFNGLCREATESHADERPROC) load(userptr, "glCreateShader"); + glad_glDeleteProgram = (PFNGLDELETEPROGRAMPROC) load(userptr, "glDeleteProgram"); + glad_glDeleteShader = (PFNGLDELETESHADERPROC) load(userptr, "glDeleteShader"); + glad_glDetachShader = (PFNGLDETACHSHADERPROC) load(userptr, "glDetachShader"); + glad_glDisableVertexAttribArray = (PFNGLDISABLEVERTEXATTRIBARRAYPROC) load(userptr, "glDisableVertexAttribArray"); + glad_glDrawBuffers = (PFNGLDRAWBUFFERSPROC) load(userptr, "glDrawBuffers"); + glad_glEnableVertexAttribArray = (PFNGLENABLEVERTEXATTRIBARRAYPROC) load(userptr, "glEnableVertexAttribArray"); + glad_glGetActiveAttrib = (PFNGLGETACTIVEATTRIBPROC) load(userptr, "glGetActiveAttrib"); + glad_glGetActiveUniform = (PFNGLGETACTIVEUNIFORMPROC) load(userptr, "glGetActiveUniform"); + glad_glGetAttachedShaders = (PFNGLGETATTACHEDSHADERSPROC) load(userptr, "glGetAttachedShaders"); + glad_glGetAttribLocation = (PFNGLGETATTRIBLOCATIONPROC) load(userptr, "glGetAttribLocation"); + glad_glGetProgramInfoLog = (PFNGLGETPROGRAMINFOLOGPROC) load(userptr, "glGetProgramInfoLog"); + glad_glGetProgramiv = (PFNGLGETPROGRAMIVPROC) load(userptr, "glGetProgramiv"); + glad_glGetShaderInfoLog = (PFNGLGETSHADERINFOLOGPROC) load(userptr, "glGetShaderInfoLog"); + glad_glGetShaderSource = (PFNGLGETSHADERSOURCEPROC) load(userptr, "glGetShaderSource"); + glad_glGetShaderiv = (PFNGLGETSHADERIVPROC) load(userptr, "glGetShaderiv"); + glad_glGetUniformLocation = (PFNGLGETUNIFORMLOCATIONPROC) load(userptr, "glGetUniformLocation"); + glad_glGetUniformfv = (PFNGLGETUNIFORMFVPROC) load(userptr, "glGetUniformfv"); + glad_glGetUniformiv = (PFNGLGETUNIFORMIVPROC) load(userptr, "glGetUniformiv"); + glad_glGetVertexAttribPointerv = (PFNGLGETVERTEXATTRIBPOINTERVPROC) load(userptr, "glGetVertexAttribPointerv"); + glad_glGetVertexAttribdv = (PFNGLGETVERTEXATTRIBDVPROC) load(userptr, "glGetVertexAttribdv"); + glad_glGetVertexAttribfv = (PFNGLGETVERTEXATTRIBFVPROC) load(userptr, "glGetVertexAttribfv"); + glad_glGetVertexAttribiv = (PFNGLGETVERTEXATTRIBIVPROC) load(userptr, "glGetVertexAttribiv"); + glad_glIsProgram = (PFNGLISPROGRAMPROC) load(userptr, "glIsProgram"); + glad_glIsShader = (PFNGLISSHADERPROC) load(userptr, "glIsShader"); + glad_glLinkProgram = (PFNGLLINKPROGRAMPROC) load(userptr, "glLinkProgram"); + glad_glShaderSource = (PFNGLSHADERSOURCEPROC) load(userptr, "glShaderSource"); + glad_glStencilFuncSeparate = (PFNGLSTENCILFUNCSEPARATEPROC) load(userptr, "glStencilFuncSeparate"); + glad_glStencilMaskSeparate = (PFNGLSTENCILMASKSEPARATEPROC) load(userptr, "glStencilMaskSeparate"); + glad_glStencilOpSeparate = (PFNGLSTENCILOPSEPARATEPROC) load(userptr, "glStencilOpSeparate"); + glad_glUniform1f = (PFNGLUNIFORM1FPROC) load(userptr, "glUniform1f"); + glad_glUniform1fv = (PFNGLUNIFORM1FVPROC) load(userptr, "glUniform1fv"); + glad_glUniform1i = (PFNGLUNIFORM1IPROC) load(userptr, "glUniform1i"); + glad_glUniform1iv = (PFNGLUNIFORM1IVPROC) load(userptr, "glUniform1iv"); + glad_glUniform2f = (PFNGLUNIFORM2FPROC) load(userptr, "glUniform2f"); + glad_glUniform2fv = (PFNGLUNIFORM2FVPROC) load(userptr, "glUniform2fv"); + glad_glUniform2i = (PFNGLUNIFORM2IPROC) load(userptr, "glUniform2i"); + glad_glUniform2iv = (PFNGLUNIFORM2IVPROC) load(userptr, "glUniform2iv"); + glad_glUniform3f = (PFNGLUNIFORM3FPROC) load(userptr, "glUniform3f"); + glad_glUniform3fv = (PFNGLUNIFORM3FVPROC) load(userptr, "glUniform3fv"); + glad_glUniform3i = (PFNGLUNIFORM3IPROC) load(userptr, "glUniform3i"); + glad_glUniform3iv = (PFNGLUNIFORM3IVPROC) load(userptr, "glUniform3iv"); + glad_glUniform4f = (PFNGLUNIFORM4FPROC) load(userptr, "glUniform4f"); + glad_glUniform4fv = (PFNGLUNIFORM4FVPROC) load(userptr, "glUniform4fv"); + glad_glUniform4i = (PFNGLUNIFORM4IPROC) load(userptr, "glUniform4i"); + glad_glUniform4iv = (PFNGLUNIFORM4IVPROC) load(userptr, "glUniform4iv"); + glad_glUniformMatrix2fv = (PFNGLUNIFORMMATRIX2FVPROC) load(userptr, "glUniformMatrix2fv"); + glad_glUniformMatrix3fv = (PFNGLUNIFORMMATRIX3FVPROC) load(userptr, "glUniformMatrix3fv"); + glad_glUniformMatrix4fv = (PFNGLUNIFORMMATRIX4FVPROC) load(userptr, "glUniformMatrix4fv"); + glad_glUseProgram = (PFNGLUSEPROGRAMPROC) load(userptr, "glUseProgram"); + glad_glValidateProgram = (PFNGLVALIDATEPROGRAMPROC) load(userptr, "glValidateProgram"); + glad_glVertexAttrib1d = (PFNGLVERTEXATTRIB1DPROC) load(userptr, "glVertexAttrib1d"); + glad_glVertexAttrib1dv = (PFNGLVERTEXATTRIB1DVPROC) load(userptr, "glVertexAttrib1dv"); + glad_glVertexAttrib1f = (PFNGLVERTEXATTRIB1FPROC) load(userptr, "glVertexAttrib1f"); + glad_glVertexAttrib1fv = (PFNGLVERTEXATTRIB1FVPROC) load(userptr, "glVertexAttrib1fv"); + glad_glVertexAttrib1s = (PFNGLVERTEXATTRIB1SPROC) load(userptr, "glVertexAttrib1s"); + glad_glVertexAttrib1sv = (PFNGLVERTEXATTRIB1SVPROC) load(userptr, "glVertexAttrib1sv"); + glad_glVertexAttrib2d = (PFNGLVERTEXATTRIB2DPROC) load(userptr, "glVertexAttrib2d"); + glad_glVertexAttrib2dv = (PFNGLVERTEXATTRIB2DVPROC) load(userptr, "glVertexAttrib2dv"); + glad_glVertexAttrib2f = (PFNGLVERTEXATTRIB2FPROC) load(userptr, "glVertexAttrib2f"); + glad_glVertexAttrib2fv = (PFNGLVERTEXATTRIB2FVPROC) load(userptr, "glVertexAttrib2fv"); + glad_glVertexAttrib2s = (PFNGLVERTEXATTRIB2SPROC) load(userptr, "glVertexAttrib2s"); + glad_glVertexAttrib2sv = (PFNGLVERTEXATTRIB2SVPROC) load(userptr, "glVertexAttrib2sv"); + glad_glVertexAttrib3d = (PFNGLVERTEXATTRIB3DPROC) load(userptr, "glVertexAttrib3d"); + glad_glVertexAttrib3dv = (PFNGLVERTEXATTRIB3DVPROC) load(userptr, "glVertexAttrib3dv"); + glad_glVertexAttrib3f = (PFNGLVERTEXATTRIB3FPROC) load(userptr, "glVertexAttrib3f"); + glad_glVertexAttrib3fv = (PFNGLVERTEXATTRIB3FVPROC) load(userptr, "glVertexAttrib3fv"); + glad_glVertexAttrib3s = (PFNGLVERTEXATTRIB3SPROC) load(userptr, "glVertexAttrib3s"); + glad_glVertexAttrib3sv = (PFNGLVERTEXATTRIB3SVPROC) load(userptr, "glVertexAttrib3sv"); + glad_glVertexAttrib4Nbv = (PFNGLVERTEXATTRIB4NBVPROC) load(userptr, "glVertexAttrib4Nbv"); + glad_glVertexAttrib4Niv = (PFNGLVERTEXATTRIB4NIVPROC) load(userptr, "glVertexAttrib4Niv"); + glad_glVertexAttrib4Nsv = (PFNGLVERTEXATTRIB4NSVPROC) load(userptr, "glVertexAttrib4Nsv"); + glad_glVertexAttrib4Nub = (PFNGLVERTEXATTRIB4NUBPROC) load(userptr, "glVertexAttrib4Nub"); + glad_glVertexAttrib4Nubv = (PFNGLVERTEXATTRIB4NUBVPROC) load(userptr, "glVertexAttrib4Nubv"); + glad_glVertexAttrib4Nuiv = (PFNGLVERTEXATTRIB4NUIVPROC) load(userptr, "glVertexAttrib4Nuiv"); + glad_glVertexAttrib4Nusv = (PFNGLVERTEXATTRIB4NUSVPROC) load(userptr, "glVertexAttrib4Nusv"); + glad_glVertexAttrib4bv = (PFNGLVERTEXATTRIB4BVPROC) load(userptr, "glVertexAttrib4bv"); + glad_glVertexAttrib4d = (PFNGLVERTEXATTRIB4DPROC) load(userptr, "glVertexAttrib4d"); + glad_glVertexAttrib4dv = (PFNGLVERTEXATTRIB4DVPROC) load(userptr, "glVertexAttrib4dv"); + glad_glVertexAttrib4f = (PFNGLVERTEXATTRIB4FPROC) load(userptr, "glVertexAttrib4f"); + glad_glVertexAttrib4fv = (PFNGLVERTEXATTRIB4FVPROC) load(userptr, "glVertexAttrib4fv"); + glad_glVertexAttrib4iv = (PFNGLVERTEXATTRIB4IVPROC) load(userptr, "glVertexAttrib4iv"); + glad_glVertexAttrib4s = (PFNGLVERTEXATTRIB4SPROC) load(userptr, "glVertexAttrib4s"); + glad_glVertexAttrib4sv = (PFNGLVERTEXATTRIB4SVPROC) load(userptr, "glVertexAttrib4sv"); + glad_glVertexAttrib4ubv = (PFNGLVERTEXATTRIB4UBVPROC) load(userptr, "glVertexAttrib4ubv"); + glad_glVertexAttrib4uiv = (PFNGLVERTEXATTRIB4UIVPROC) load(userptr, "glVertexAttrib4uiv"); + glad_glVertexAttrib4usv = (PFNGLVERTEXATTRIB4USVPROC) load(userptr, "glVertexAttrib4usv"); + glad_glVertexAttribPointer = (PFNGLVERTEXATTRIBPOINTERPROC) load(userptr, "glVertexAttribPointer"); +} +static void glad_gl_load_GL_VERSION_2_1( GLADuserptrloadfunc load, void* userptr) { + if(!GLAD_GL_VERSION_2_1) return; + glad_glUniformMatrix2x3fv = (PFNGLUNIFORMMATRIX2X3FVPROC) load(userptr, "glUniformMatrix2x3fv"); + glad_glUniformMatrix2x4fv = (PFNGLUNIFORMMATRIX2X4FVPROC) load(userptr, "glUniformMatrix2x4fv"); + glad_glUniformMatrix3x2fv = (PFNGLUNIFORMMATRIX3X2FVPROC) load(userptr, "glUniformMatrix3x2fv"); + glad_glUniformMatrix3x4fv = (PFNGLUNIFORMMATRIX3X4FVPROC) load(userptr, "glUniformMatrix3x4fv"); + glad_glUniformMatrix4x2fv = (PFNGLUNIFORMMATRIX4X2FVPROC) load(userptr, "glUniformMatrix4x2fv"); + glad_glUniformMatrix4x3fv = (PFNGLUNIFORMMATRIX4X3FVPROC) load(userptr, "glUniformMatrix4x3fv"); +} +static void glad_gl_load_GL_VERSION_3_0( GLADuserptrloadfunc load, void* userptr) { + if(!GLAD_GL_VERSION_3_0) return; + glad_glBeginConditionalRender = (PFNGLBEGINCONDITIONALRENDERPROC) load(userptr, "glBeginConditionalRender"); + glad_glBeginTransformFeedback = (PFNGLBEGINTRANSFORMFEEDBACKPROC) load(userptr, "glBeginTransformFeedback"); + glad_glBindBufferBase = (PFNGLBINDBUFFERBASEPROC) load(userptr, "glBindBufferBase"); + glad_glBindBufferRange = (PFNGLBINDBUFFERRANGEPROC) load(userptr, "glBindBufferRange"); + glad_glBindFragDataLocation = (PFNGLBINDFRAGDATALOCATIONPROC) load(userptr, "glBindFragDataLocation"); + glad_glBindFramebuffer = (PFNGLBINDFRAMEBUFFERPROC) load(userptr, "glBindFramebuffer"); + glad_glBindRenderbuffer = (PFNGLBINDRENDERBUFFERPROC) load(userptr, "glBindRenderbuffer"); + glad_glBindVertexArray = (PFNGLBINDVERTEXARRAYPROC) load(userptr, "glBindVertexArray"); + glad_glBlitFramebuffer = (PFNGLBLITFRAMEBUFFERPROC) load(userptr, "glBlitFramebuffer"); + glad_glCheckFramebufferStatus = (PFNGLCHECKFRAMEBUFFERSTATUSPROC) load(userptr, "glCheckFramebufferStatus"); + glad_glClampColor = (PFNGLCLAMPCOLORPROC) load(userptr, "glClampColor"); + glad_glClearBufferfi = (PFNGLCLEARBUFFERFIPROC) load(userptr, "glClearBufferfi"); + glad_glClearBufferfv = (PFNGLCLEARBUFFERFVPROC) load(userptr, "glClearBufferfv"); + glad_glClearBufferiv = (PFNGLCLEARBUFFERIVPROC) load(userptr, "glClearBufferiv"); + glad_glClearBufferuiv = (PFNGLCLEARBUFFERUIVPROC) load(userptr, "glClearBufferuiv"); + glad_glColorMaski = (PFNGLCOLORMASKIPROC) load(userptr, "glColorMaski"); + glad_glDeleteFramebuffers = (PFNGLDELETEFRAMEBUFFERSPROC) load(userptr, "glDeleteFramebuffers"); + glad_glDeleteRenderbuffers = (PFNGLDELETERENDERBUFFERSPROC) load(userptr, "glDeleteRenderbuffers"); + glad_glDeleteVertexArrays = (PFNGLDELETEVERTEXARRAYSPROC) load(userptr, "glDeleteVertexArrays"); + glad_glDisablei = (PFNGLDISABLEIPROC) load(userptr, "glDisablei"); + glad_glEnablei = (PFNGLENABLEIPROC) load(userptr, "glEnablei"); + glad_glEndConditionalRender = (PFNGLENDCONDITIONALRENDERPROC) load(userptr, "glEndConditionalRender"); + glad_glEndTransformFeedback = (PFNGLENDTRANSFORMFEEDBACKPROC) load(userptr, "glEndTransformFeedback"); + glad_glFlushMappedBufferRange = (PFNGLFLUSHMAPPEDBUFFERRANGEPROC) load(userptr, "glFlushMappedBufferRange"); + glad_glFramebufferRenderbuffer = (PFNGLFRAMEBUFFERRENDERBUFFERPROC) load(userptr, "glFramebufferRenderbuffer"); + glad_glFramebufferTexture1D = (PFNGLFRAMEBUFFERTEXTURE1DPROC) load(userptr, "glFramebufferTexture1D"); + glad_glFramebufferTexture2D = (PFNGLFRAMEBUFFERTEXTURE2DPROC) load(userptr, "glFramebufferTexture2D"); + glad_glFramebufferTexture3D = (PFNGLFRAMEBUFFERTEXTURE3DPROC) load(userptr, "glFramebufferTexture3D"); + glad_glFramebufferTextureLayer = (PFNGLFRAMEBUFFERTEXTURELAYERPROC) load(userptr, "glFramebufferTextureLayer"); + glad_glGenFramebuffers = (PFNGLGENFRAMEBUFFERSPROC) load(userptr, "glGenFramebuffers"); + glad_glGenRenderbuffers = (PFNGLGENRENDERBUFFERSPROC) load(userptr, "glGenRenderbuffers"); + glad_glGenVertexArrays = (PFNGLGENVERTEXARRAYSPROC) load(userptr, "glGenVertexArrays"); + glad_glGenerateMipmap = (PFNGLGENERATEMIPMAPPROC) load(userptr, "glGenerateMipmap"); + glad_glGetBooleani_v = (PFNGLGETBOOLEANI_VPROC) load(userptr, "glGetBooleani_v"); + glad_glGetFragDataLocation = (PFNGLGETFRAGDATALOCATIONPROC) load(userptr, "glGetFragDataLocation"); + glad_glGetFramebufferAttachmentParameteriv = (PFNGLGETFRAMEBUFFERATTACHMENTPARAMETERIVPROC) load(userptr, "glGetFramebufferAttachmentParameteriv"); + glad_glGetIntegeri_v = (PFNGLGETINTEGERI_VPROC) load(userptr, "glGetIntegeri_v"); + glad_glGetRenderbufferParameteriv = (PFNGLGETRENDERBUFFERPARAMETERIVPROC) load(userptr, "glGetRenderbufferParameteriv"); + glad_glGetStringi = (PFNGLGETSTRINGIPROC) load(userptr, "glGetStringi"); + glad_glGetTexParameterIiv = (PFNGLGETTEXPARAMETERIIVPROC) load(userptr, "glGetTexParameterIiv"); + glad_glGetTexParameterIuiv = (PFNGLGETTEXPARAMETERIUIVPROC) load(userptr, "glGetTexParameterIuiv"); + glad_glGetTransformFeedbackVarying = (PFNGLGETTRANSFORMFEEDBACKVARYINGPROC) load(userptr, "glGetTransformFeedbackVarying"); + glad_glGetUniformuiv = (PFNGLGETUNIFORMUIVPROC) load(userptr, "glGetUniformuiv"); + glad_glGetVertexAttribIiv = (PFNGLGETVERTEXATTRIBIIVPROC) load(userptr, "glGetVertexAttribIiv"); + glad_glGetVertexAttribIuiv = (PFNGLGETVERTEXATTRIBIUIVPROC) load(userptr, "glGetVertexAttribIuiv"); + glad_glIsEnabledi = (PFNGLISENABLEDIPROC) load(userptr, "glIsEnabledi"); + glad_glIsFramebuffer = (PFNGLISFRAMEBUFFERPROC) load(userptr, "glIsFramebuffer"); + glad_glIsRenderbuffer = (PFNGLISRENDERBUFFERPROC) load(userptr, "glIsRenderbuffer"); + glad_glIsVertexArray = (PFNGLISVERTEXARRAYPROC) load(userptr, "glIsVertexArray"); + glad_glMapBufferRange = (PFNGLMAPBUFFERRANGEPROC) load(userptr, "glMapBufferRange"); + glad_glRenderbufferStorage = (PFNGLRENDERBUFFERSTORAGEPROC) load(userptr, "glRenderbufferStorage"); + glad_glRenderbufferStorageMultisample = (PFNGLRENDERBUFFERSTORAGEMULTISAMPLEPROC) load(userptr, "glRenderbufferStorageMultisample"); + glad_glTexParameterIiv = (PFNGLTEXPARAMETERIIVPROC) load(userptr, "glTexParameterIiv"); + glad_glTexParameterIuiv = (PFNGLTEXPARAMETERIUIVPROC) load(userptr, "glTexParameterIuiv"); + glad_glTransformFeedbackVaryings = (PFNGLTRANSFORMFEEDBACKVARYINGSPROC) load(userptr, "glTransformFeedbackVaryings"); + glad_glUniform1ui = (PFNGLUNIFORM1UIPROC) load(userptr, "glUniform1ui"); + glad_glUniform1uiv = (PFNGLUNIFORM1UIVPROC) load(userptr, "glUniform1uiv"); + glad_glUniform2ui = (PFNGLUNIFORM2UIPROC) load(userptr, "glUniform2ui"); + glad_glUniform2uiv = (PFNGLUNIFORM2UIVPROC) load(userptr, "glUniform2uiv"); + glad_glUniform3ui = (PFNGLUNIFORM3UIPROC) load(userptr, "glUniform3ui"); + glad_glUniform3uiv = (PFNGLUNIFORM3UIVPROC) load(userptr, "glUniform3uiv"); + glad_glUniform4ui = (PFNGLUNIFORM4UIPROC) load(userptr, "glUniform4ui"); + glad_glUniform4uiv = (PFNGLUNIFORM4UIVPROC) load(userptr, "glUniform4uiv"); + glad_glVertexAttribI1i = (PFNGLVERTEXATTRIBI1IPROC) load(userptr, "glVertexAttribI1i"); + glad_glVertexAttribI1iv = (PFNGLVERTEXATTRIBI1IVPROC) load(userptr, "glVertexAttribI1iv"); + glad_glVertexAttribI1ui = (PFNGLVERTEXATTRIBI1UIPROC) load(userptr, "glVertexAttribI1ui"); + glad_glVertexAttribI1uiv = (PFNGLVERTEXATTRIBI1UIVPROC) load(userptr, "glVertexAttribI1uiv"); + glad_glVertexAttribI2i = (PFNGLVERTEXATTRIBI2IPROC) load(userptr, "glVertexAttribI2i"); + glad_glVertexAttribI2iv = (PFNGLVERTEXATTRIBI2IVPROC) load(userptr, "glVertexAttribI2iv"); + glad_glVertexAttribI2ui = (PFNGLVERTEXATTRIBI2UIPROC) load(userptr, "glVertexAttribI2ui"); + glad_glVertexAttribI2uiv = (PFNGLVERTEXATTRIBI2UIVPROC) load(userptr, "glVertexAttribI2uiv"); + glad_glVertexAttribI3i = (PFNGLVERTEXATTRIBI3IPROC) load(userptr, "glVertexAttribI3i"); + glad_glVertexAttribI3iv = (PFNGLVERTEXATTRIBI3IVPROC) load(userptr, "glVertexAttribI3iv"); + glad_glVertexAttribI3ui = (PFNGLVERTEXATTRIBI3UIPROC) load(userptr, "glVertexAttribI3ui"); + glad_glVertexAttribI3uiv = (PFNGLVERTEXATTRIBI3UIVPROC) load(userptr, "glVertexAttribI3uiv"); + glad_glVertexAttribI4bv = (PFNGLVERTEXATTRIBI4BVPROC) load(userptr, "glVertexAttribI4bv"); + glad_glVertexAttribI4i = (PFNGLVERTEXATTRIBI4IPROC) load(userptr, "glVertexAttribI4i"); + glad_glVertexAttribI4iv = (PFNGLVERTEXATTRIBI4IVPROC) load(userptr, "glVertexAttribI4iv"); + glad_glVertexAttribI4sv = (PFNGLVERTEXATTRIBI4SVPROC) load(userptr, "glVertexAttribI4sv"); + glad_glVertexAttribI4ubv = (PFNGLVERTEXATTRIBI4UBVPROC) load(userptr, "glVertexAttribI4ubv"); + glad_glVertexAttribI4ui = (PFNGLVERTEXATTRIBI4UIPROC) load(userptr, "glVertexAttribI4ui"); + glad_glVertexAttribI4uiv = (PFNGLVERTEXATTRIBI4UIVPROC) load(userptr, "glVertexAttribI4uiv"); + glad_glVertexAttribI4usv = (PFNGLVERTEXATTRIBI4USVPROC) load(userptr, "glVertexAttribI4usv"); + glad_glVertexAttribIPointer = (PFNGLVERTEXATTRIBIPOINTERPROC) load(userptr, "glVertexAttribIPointer"); +} +static void glad_gl_load_GL_VERSION_3_1( GLADuserptrloadfunc load, void* userptr) { + if(!GLAD_GL_VERSION_3_1) return; + glad_glBindBufferBase = (PFNGLBINDBUFFERBASEPROC) load(userptr, "glBindBufferBase"); + glad_glBindBufferRange = (PFNGLBINDBUFFERRANGEPROC) load(userptr, "glBindBufferRange"); + glad_glCopyBufferSubData = (PFNGLCOPYBUFFERSUBDATAPROC) load(userptr, "glCopyBufferSubData"); + glad_glDrawArraysInstanced = (PFNGLDRAWARRAYSINSTANCEDPROC) load(userptr, "glDrawArraysInstanced"); + glad_glDrawElementsInstanced = (PFNGLDRAWELEMENTSINSTANCEDPROC) load(userptr, "glDrawElementsInstanced"); + glad_glGetActiveUniformBlockName = (PFNGLGETACTIVEUNIFORMBLOCKNAMEPROC) load(userptr, "glGetActiveUniformBlockName"); + glad_glGetActiveUniformBlockiv = (PFNGLGETACTIVEUNIFORMBLOCKIVPROC) load(userptr, "glGetActiveUniformBlockiv"); + glad_glGetActiveUniformName = (PFNGLGETACTIVEUNIFORMNAMEPROC) load(userptr, "glGetActiveUniformName"); + glad_glGetActiveUniformsiv = (PFNGLGETACTIVEUNIFORMSIVPROC) load(userptr, "glGetActiveUniformsiv"); + glad_glGetIntegeri_v = (PFNGLGETINTEGERI_VPROC) load(userptr, "glGetIntegeri_v"); + glad_glGetUniformBlockIndex = (PFNGLGETUNIFORMBLOCKINDEXPROC) load(userptr, "glGetUniformBlockIndex"); + glad_glGetUniformIndices = (PFNGLGETUNIFORMINDICESPROC) load(userptr, "glGetUniformIndices"); + glad_glPrimitiveRestartIndex = (PFNGLPRIMITIVERESTARTINDEXPROC) load(userptr, "glPrimitiveRestartIndex"); + glad_glTexBuffer = (PFNGLTEXBUFFERPROC) load(userptr, "glTexBuffer"); + glad_glUniformBlockBinding = (PFNGLUNIFORMBLOCKBINDINGPROC) load(userptr, "glUniformBlockBinding"); +} +static void glad_gl_load_GL_VERSION_3_2( GLADuserptrloadfunc load, void* userptr) { + if(!GLAD_GL_VERSION_3_2) return; + glad_glClientWaitSync = (PFNGLCLIENTWAITSYNCPROC) load(userptr, "glClientWaitSync"); + glad_glDeleteSync = (PFNGLDELETESYNCPROC) load(userptr, "glDeleteSync"); + glad_glDrawElementsBaseVertex = (PFNGLDRAWELEMENTSBASEVERTEXPROC) load(userptr, "glDrawElementsBaseVertex"); + glad_glDrawElementsInstancedBaseVertex = (PFNGLDRAWELEMENTSINSTANCEDBASEVERTEXPROC) load(userptr, "glDrawElementsInstancedBaseVertex"); + glad_glDrawRangeElementsBaseVertex = (PFNGLDRAWRANGEELEMENTSBASEVERTEXPROC) load(userptr, "glDrawRangeElementsBaseVertex"); + glad_glFenceSync = (PFNGLFENCESYNCPROC) load(userptr, "glFenceSync"); + glad_glFramebufferTexture = (PFNGLFRAMEBUFFERTEXTUREPROC) load(userptr, "glFramebufferTexture"); + glad_glGetBufferParameteri64v = (PFNGLGETBUFFERPARAMETERI64VPROC) load(userptr, "glGetBufferParameteri64v"); + glad_glGetInteger64i_v = (PFNGLGETINTEGER64I_VPROC) load(userptr, "glGetInteger64i_v"); + glad_glGetInteger64v = (PFNGLGETINTEGER64VPROC) load(userptr, "glGetInteger64v"); + glad_glGetMultisamplefv = (PFNGLGETMULTISAMPLEFVPROC) load(userptr, "glGetMultisamplefv"); + glad_glGetSynciv = (PFNGLGETSYNCIVPROC) load(userptr, "glGetSynciv"); + glad_glIsSync = (PFNGLISSYNCPROC) load(userptr, "glIsSync"); + glad_glMultiDrawElementsBaseVertex = (PFNGLMULTIDRAWELEMENTSBASEVERTEXPROC) load(userptr, "glMultiDrawElementsBaseVertex"); + glad_glProvokingVertex = (PFNGLPROVOKINGVERTEXPROC) load(userptr, "glProvokingVertex"); + glad_glSampleMaski = (PFNGLSAMPLEMASKIPROC) load(userptr, "glSampleMaski"); + glad_glTexImage2DMultisample = (PFNGLTEXIMAGE2DMULTISAMPLEPROC) load(userptr, "glTexImage2DMultisample"); + glad_glTexImage3DMultisample = (PFNGLTEXIMAGE3DMULTISAMPLEPROC) load(userptr, "glTexImage3DMultisample"); + glad_glWaitSync = (PFNGLWAITSYNCPROC) load(userptr, "glWaitSync"); +} +static void glad_gl_load_GL_VERSION_3_3( GLADuserptrloadfunc load, void* userptr) { + if(!GLAD_GL_VERSION_3_3) return; + glad_glBindFragDataLocationIndexed = (PFNGLBINDFRAGDATALOCATIONINDEXEDPROC) load(userptr, "glBindFragDataLocationIndexed"); + glad_glBindSampler = (PFNGLBINDSAMPLERPROC) load(userptr, "glBindSampler"); + glad_glDeleteSamplers = (PFNGLDELETESAMPLERSPROC) load(userptr, "glDeleteSamplers"); + glad_glGenSamplers = (PFNGLGENSAMPLERSPROC) load(userptr, "glGenSamplers"); + glad_glGetFragDataIndex = (PFNGLGETFRAGDATAINDEXPROC) load(userptr, "glGetFragDataIndex"); + glad_glGetQueryObjecti64v = (PFNGLGETQUERYOBJECTI64VPROC) load(userptr, "glGetQueryObjecti64v"); + glad_glGetQueryObjectui64v = (PFNGLGETQUERYOBJECTUI64VPROC) load(userptr, "glGetQueryObjectui64v"); + glad_glGetSamplerParameterIiv = (PFNGLGETSAMPLERPARAMETERIIVPROC) load(userptr, "glGetSamplerParameterIiv"); + glad_glGetSamplerParameterIuiv = (PFNGLGETSAMPLERPARAMETERIUIVPROC) load(userptr, "glGetSamplerParameterIuiv"); + glad_glGetSamplerParameterfv = (PFNGLGETSAMPLERPARAMETERFVPROC) load(userptr, "glGetSamplerParameterfv"); + glad_glGetSamplerParameteriv = (PFNGLGETSAMPLERPARAMETERIVPROC) load(userptr, "glGetSamplerParameteriv"); + glad_glIsSampler = (PFNGLISSAMPLERPROC) load(userptr, "glIsSampler"); + glad_glQueryCounter = (PFNGLQUERYCOUNTERPROC) load(userptr, "glQueryCounter"); + glad_glSamplerParameterIiv = (PFNGLSAMPLERPARAMETERIIVPROC) load(userptr, "glSamplerParameterIiv"); + glad_glSamplerParameterIuiv = (PFNGLSAMPLERPARAMETERIUIVPROC) load(userptr, "glSamplerParameterIuiv"); + glad_glSamplerParameterf = (PFNGLSAMPLERPARAMETERFPROC) load(userptr, "glSamplerParameterf"); + glad_glSamplerParameterfv = (PFNGLSAMPLERPARAMETERFVPROC) load(userptr, "glSamplerParameterfv"); + glad_glSamplerParameteri = (PFNGLSAMPLERPARAMETERIPROC) load(userptr, "glSamplerParameteri"); + glad_glSamplerParameteriv = (PFNGLSAMPLERPARAMETERIVPROC) load(userptr, "glSamplerParameteriv"); + glad_glVertexAttribDivisor = (PFNGLVERTEXATTRIBDIVISORPROC) load(userptr, "glVertexAttribDivisor"); + glad_glVertexAttribP1ui = (PFNGLVERTEXATTRIBP1UIPROC) load(userptr, "glVertexAttribP1ui"); + glad_glVertexAttribP1uiv = (PFNGLVERTEXATTRIBP1UIVPROC) load(userptr, "glVertexAttribP1uiv"); + glad_glVertexAttribP2ui = (PFNGLVERTEXATTRIBP2UIPROC) load(userptr, "glVertexAttribP2ui"); + glad_glVertexAttribP2uiv = (PFNGLVERTEXATTRIBP2UIVPROC) load(userptr, "glVertexAttribP2uiv"); + glad_glVertexAttribP3ui = (PFNGLVERTEXATTRIBP3UIPROC) load(userptr, "glVertexAttribP3ui"); + glad_glVertexAttribP3uiv = (PFNGLVERTEXATTRIBP3UIVPROC) load(userptr, "glVertexAttribP3uiv"); + glad_glVertexAttribP4ui = (PFNGLVERTEXATTRIBP4UIPROC) load(userptr, "glVertexAttribP4ui"); + glad_glVertexAttribP4uiv = (PFNGLVERTEXATTRIBP4UIVPROC) load(userptr, "glVertexAttribP4uiv"); +} +static void glad_gl_load_GL_VERSION_4_0( GLADuserptrloadfunc load, void* userptr) { + if(!GLAD_GL_VERSION_4_0) return; + glad_glBeginQueryIndexed = (PFNGLBEGINQUERYINDEXEDPROC) load(userptr, "glBeginQueryIndexed"); + glad_glBindTransformFeedback = (PFNGLBINDTRANSFORMFEEDBACKPROC) load(userptr, "glBindTransformFeedback"); + glad_glBlendEquationSeparatei = (PFNGLBLENDEQUATIONSEPARATEIPROC) load(userptr, "glBlendEquationSeparatei"); + glad_glBlendEquationi = (PFNGLBLENDEQUATIONIPROC) load(userptr, "glBlendEquationi"); + glad_glBlendFuncSeparatei = (PFNGLBLENDFUNCSEPARATEIPROC) load(userptr, "glBlendFuncSeparatei"); + glad_glBlendFunci = (PFNGLBLENDFUNCIPROC) load(userptr, "glBlendFunci"); + glad_glDeleteTransformFeedbacks = (PFNGLDELETETRANSFORMFEEDBACKSPROC) load(userptr, "glDeleteTransformFeedbacks"); + glad_glDrawArraysIndirect = (PFNGLDRAWARRAYSINDIRECTPROC) load(userptr, "glDrawArraysIndirect"); + glad_glDrawElementsIndirect = (PFNGLDRAWELEMENTSINDIRECTPROC) load(userptr, "glDrawElementsIndirect"); + glad_glDrawTransformFeedback = (PFNGLDRAWTRANSFORMFEEDBACKPROC) load(userptr, "glDrawTransformFeedback"); + glad_glDrawTransformFeedbackStream = (PFNGLDRAWTRANSFORMFEEDBACKSTREAMPROC) load(userptr, "glDrawTransformFeedbackStream"); + glad_glEndQueryIndexed = (PFNGLENDQUERYINDEXEDPROC) load(userptr, "glEndQueryIndexed"); + glad_glGenTransformFeedbacks = (PFNGLGENTRANSFORMFEEDBACKSPROC) load(userptr, "glGenTransformFeedbacks"); + glad_glGetActiveSubroutineName = (PFNGLGETACTIVESUBROUTINENAMEPROC) load(userptr, "glGetActiveSubroutineName"); + glad_glGetActiveSubroutineUniformName = (PFNGLGETACTIVESUBROUTINEUNIFORMNAMEPROC) load(userptr, "glGetActiveSubroutineUniformName"); + glad_glGetActiveSubroutineUniformiv = (PFNGLGETACTIVESUBROUTINEUNIFORMIVPROC) load(userptr, "glGetActiveSubroutineUniformiv"); + glad_glGetProgramStageiv = (PFNGLGETPROGRAMSTAGEIVPROC) load(userptr, "glGetProgramStageiv"); + glad_glGetQueryIndexediv = (PFNGLGETQUERYINDEXEDIVPROC) load(userptr, "glGetQueryIndexediv"); + glad_glGetSubroutineIndex = (PFNGLGETSUBROUTINEINDEXPROC) load(userptr, "glGetSubroutineIndex"); + glad_glGetSubroutineUniformLocation = (PFNGLGETSUBROUTINEUNIFORMLOCATIONPROC) load(userptr, "glGetSubroutineUniformLocation"); + glad_glGetUniformSubroutineuiv = (PFNGLGETUNIFORMSUBROUTINEUIVPROC) load(userptr, "glGetUniformSubroutineuiv"); + glad_glGetUniformdv = (PFNGLGETUNIFORMDVPROC) load(userptr, "glGetUniformdv"); + glad_glIsTransformFeedback = (PFNGLISTRANSFORMFEEDBACKPROC) load(userptr, "glIsTransformFeedback"); + glad_glMinSampleShading = (PFNGLMINSAMPLESHADINGPROC) load(userptr, "glMinSampleShading"); + glad_glPatchParameterfv = (PFNGLPATCHPARAMETERFVPROC) load(userptr, "glPatchParameterfv"); + glad_glPatchParameteri = (PFNGLPATCHPARAMETERIPROC) load(userptr, "glPatchParameteri"); + glad_glPauseTransformFeedback = (PFNGLPAUSETRANSFORMFEEDBACKPROC) load(userptr, "glPauseTransformFeedback"); + glad_glResumeTransformFeedback = (PFNGLRESUMETRANSFORMFEEDBACKPROC) load(userptr, "glResumeTransformFeedback"); + glad_glUniform1d = (PFNGLUNIFORM1DPROC) load(userptr, "glUniform1d"); + glad_glUniform1dv = (PFNGLUNIFORM1DVPROC) load(userptr, "glUniform1dv"); + glad_glUniform2d = (PFNGLUNIFORM2DPROC) load(userptr, "glUniform2d"); + glad_glUniform2dv = (PFNGLUNIFORM2DVPROC) load(userptr, "glUniform2dv"); + glad_glUniform3d = (PFNGLUNIFORM3DPROC) load(userptr, "glUniform3d"); + glad_glUniform3dv = (PFNGLUNIFORM3DVPROC) load(userptr, "glUniform3dv"); + glad_glUniform4d = (PFNGLUNIFORM4DPROC) load(userptr, "glUniform4d"); + glad_glUniform4dv = (PFNGLUNIFORM4DVPROC) load(userptr, "glUniform4dv"); + glad_glUniformMatrix2dv = (PFNGLUNIFORMMATRIX2DVPROC) load(userptr, "glUniformMatrix2dv"); + glad_glUniformMatrix2x3dv = (PFNGLUNIFORMMATRIX2X3DVPROC) load(userptr, "glUniformMatrix2x3dv"); + glad_glUniformMatrix2x4dv = (PFNGLUNIFORMMATRIX2X4DVPROC) load(userptr, "glUniformMatrix2x4dv"); + glad_glUniformMatrix3dv = (PFNGLUNIFORMMATRIX3DVPROC) load(userptr, "glUniformMatrix3dv"); + glad_glUniformMatrix3x2dv = (PFNGLUNIFORMMATRIX3X2DVPROC) load(userptr, "glUniformMatrix3x2dv"); + glad_glUniformMatrix3x4dv = (PFNGLUNIFORMMATRIX3X4DVPROC) load(userptr, "glUniformMatrix3x4dv"); + glad_glUniformMatrix4dv = (PFNGLUNIFORMMATRIX4DVPROC) load(userptr, "glUniformMatrix4dv"); + glad_glUniformMatrix4x2dv = (PFNGLUNIFORMMATRIX4X2DVPROC) load(userptr, "glUniformMatrix4x2dv"); + glad_glUniformMatrix4x3dv = (PFNGLUNIFORMMATRIX4X3DVPROC) load(userptr, "glUniformMatrix4x3dv"); + glad_glUniformSubroutinesuiv = (PFNGLUNIFORMSUBROUTINESUIVPROC) load(userptr, "glUniformSubroutinesuiv"); +} +static void glad_gl_load_GL_VERSION_4_1( GLADuserptrloadfunc load, void* userptr) { + if(!GLAD_GL_VERSION_4_1) return; + glad_glActiveShaderProgram = (PFNGLACTIVESHADERPROGRAMPROC) load(userptr, "glActiveShaderProgram"); + glad_glBindProgramPipeline = (PFNGLBINDPROGRAMPIPELINEPROC) load(userptr, "glBindProgramPipeline"); + glad_glClearDepthf = (PFNGLCLEARDEPTHFPROC) load(userptr, "glClearDepthf"); + glad_glCreateShaderProgramv = (PFNGLCREATESHADERPROGRAMVPROC) load(userptr, "glCreateShaderProgramv"); + glad_glDeleteProgramPipelines = (PFNGLDELETEPROGRAMPIPELINESPROC) load(userptr, "glDeleteProgramPipelines"); + glad_glDepthRangeArrayv = (PFNGLDEPTHRANGEARRAYVPROC) load(userptr, "glDepthRangeArrayv"); + glad_glDepthRangeIndexed = (PFNGLDEPTHRANGEINDEXEDPROC) load(userptr, "glDepthRangeIndexed"); + glad_glDepthRangef = (PFNGLDEPTHRANGEFPROC) load(userptr, "glDepthRangef"); + glad_glGenProgramPipelines = (PFNGLGENPROGRAMPIPELINESPROC) load(userptr, "glGenProgramPipelines"); + glad_glGetDoublei_v = (PFNGLGETDOUBLEI_VPROC) load(userptr, "glGetDoublei_v"); + glad_glGetFloati_v = (PFNGLGETFLOATI_VPROC) load(userptr, "glGetFloati_v"); + glad_glGetProgramBinary = (PFNGLGETPROGRAMBINARYPROC) load(userptr, "glGetProgramBinary"); + glad_glGetProgramPipelineInfoLog = (PFNGLGETPROGRAMPIPELINEINFOLOGPROC) load(userptr, "glGetProgramPipelineInfoLog"); + glad_glGetProgramPipelineiv = (PFNGLGETPROGRAMPIPELINEIVPROC) load(userptr, "glGetProgramPipelineiv"); + glad_glGetShaderPrecisionFormat = (PFNGLGETSHADERPRECISIONFORMATPROC) load(userptr, "glGetShaderPrecisionFormat"); + glad_glGetVertexAttribLdv = (PFNGLGETVERTEXATTRIBLDVPROC) load(userptr, "glGetVertexAttribLdv"); + glad_glIsProgramPipeline = (PFNGLISPROGRAMPIPELINEPROC) load(userptr, "glIsProgramPipeline"); + glad_glProgramBinary = (PFNGLPROGRAMBINARYPROC) load(userptr, "glProgramBinary"); + glad_glProgramParameteri = (PFNGLPROGRAMPARAMETERIPROC) load(userptr, "glProgramParameteri"); + glad_glProgramUniform1d = (PFNGLPROGRAMUNIFORM1DPROC) load(userptr, "glProgramUniform1d"); + glad_glProgramUniform1dv = (PFNGLPROGRAMUNIFORM1DVPROC) load(userptr, "glProgramUniform1dv"); + glad_glProgramUniform1f = (PFNGLPROGRAMUNIFORM1FPROC) load(userptr, "glProgramUniform1f"); + glad_glProgramUniform1fv = (PFNGLPROGRAMUNIFORM1FVPROC) load(userptr, "glProgramUniform1fv"); + glad_glProgramUniform1i = (PFNGLPROGRAMUNIFORM1IPROC) load(userptr, "glProgramUniform1i"); + glad_glProgramUniform1iv = (PFNGLPROGRAMUNIFORM1IVPROC) load(userptr, "glProgramUniform1iv"); + glad_glProgramUniform1ui = (PFNGLPROGRAMUNIFORM1UIPROC) load(userptr, "glProgramUniform1ui"); + glad_glProgramUniform1uiv = (PFNGLPROGRAMUNIFORM1UIVPROC) load(userptr, "glProgramUniform1uiv"); + glad_glProgramUniform2d = (PFNGLPROGRAMUNIFORM2DPROC) load(userptr, "glProgramUniform2d"); + glad_glProgramUniform2dv = (PFNGLPROGRAMUNIFORM2DVPROC) load(userptr, "glProgramUniform2dv"); + glad_glProgramUniform2f = (PFNGLPROGRAMUNIFORM2FPROC) load(userptr, "glProgramUniform2f"); + glad_glProgramUniform2fv = (PFNGLPROGRAMUNIFORM2FVPROC) load(userptr, "glProgramUniform2fv"); + glad_glProgramUniform2i = (PFNGLPROGRAMUNIFORM2IPROC) load(userptr, "glProgramUniform2i"); + glad_glProgramUniform2iv = (PFNGLPROGRAMUNIFORM2IVPROC) load(userptr, "glProgramUniform2iv"); + glad_glProgramUniform2ui = (PFNGLPROGRAMUNIFORM2UIPROC) load(userptr, "glProgramUniform2ui"); + glad_glProgramUniform2uiv = (PFNGLPROGRAMUNIFORM2UIVPROC) load(userptr, "glProgramUniform2uiv"); + glad_glProgramUniform3d = (PFNGLPROGRAMUNIFORM3DPROC) load(userptr, "glProgramUniform3d"); + glad_glProgramUniform3dv = (PFNGLPROGRAMUNIFORM3DVPROC) load(userptr, "glProgramUniform3dv"); + glad_glProgramUniform3f = (PFNGLPROGRAMUNIFORM3FPROC) load(userptr, "glProgramUniform3f"); + glad_glProgramUniform3fv = (PFNGLPROGRAMUNIFORM3FVPROC) load(userptr, "glProgramUniform3fv"); + glad_glProgramUniform3i = (PFNGLPROGRAMUNIFORM3IPROC) load(userptr, "glProgramUniform3i"); + glad_glProgramUniform3iv = (PFNGLPROGRAMUNIFORM3IVPROC) load(userptr, "glProgramUniform3iv"); + glad_glProgramUniform3ui = (PFNGLPROGRAMUNIFORM3UIPROC) load(userptr, "glProgramUniform3ui"); + glad_glProgramUniform3uiv = (PFNGLPROGRAMUNIFORM3UIVPROC) load(userptr, "glProgramUniform3uiv"); + glad_glProgramUniform4d = (PFNGLPROGRAMUNIFORM4DPROC) load(userptr, "glProgramUniform4d"); + glad_glProgramUniform4dv = (PFNGLPROGRAMUNIFORM4DVPROC) load(userptr, "glProgramUniform4dv"); + glad_glProgramUniform4f = (PFNGLPROGRAMUNIFORM4FPROC) load(userptr, "glProgramUniform4f"); + glad_glProgramUniform4fv = (PFNGLPROGRAMUNIFORM4FVPROC) load(userptr, "glProgramUniform4fv"); + glad_glProgramUniform4i = (PFNGLPROGRAMUNIFORM4IPROC) load(userptr, "glProgramUniform4i"); + glad_glProgramUniform4iv = (PFNGLPROGRAMUNIFORM4IVPROC) load(userptr, "glProgramUniform4iv"); + glad_glProgramUniform4ui = (PFNGLPROGRAMUNIFORM4UIPROC) load(userptr, "glProgramUniform4ui"); + glad_glProgramUniform4uiv = (PFNGLPROGRAMUNIFORM4UIVPROC) load(userptr, "glProgramUniform4uiv"); + glad_glProgramUniformMatrix2dv = (PFNGLPROGRAMUNIFORMMATRIX2DVPROC) load(userptr, "glProgramUniformMatrix2dv"); + glad_glProgramUniformMatrix2fv = (PFNGLPROGRAMUNIFORMMATRIX2FVPROC) load(userptr, "glProgramUniformMatrix2fv"); + glad_glProgramUniformMatrix2x3dv = (PFNGLPROGRAMUNIFORMMATRIX2X3DVPROC) load(userptr, "glProgramUniformMatrix2x3dv"); + glad_glProgramUniformMatrix2x3fv = (PFNGLPROGRAMUNIFORMMATRIX2X3FVPROC) load(userptr, "glProgramUniformMatrix2x3fv"); + glad_glProgramUniformMatrix2x4dv = (PFNGLPROGRAMUNIFORMMATRIX2X4DVPROC) load(userptr, "glProgramUniformMatrix2x4dv"); + glad_glProgramUniformMatrix2x4fv = (PFNGLPROGRAMUNIFORMMATRIX2X4FVPROC) load(userptr, "glProgramUniformMatrix2x4fv"); + glad_glProgramUniformMatrix3dv = (PFNGLPROGRAMUNIFORMMATRIX3DVPROC) load(userptr, "glProgramUniformMatrix3dv"); + glad_glProgramUniformMatrix3fv = (PFNGLPROGRAMUNIFORMMATRIX3FVPROC) load(userptr, "glProgramUniformMatrix3fv"); + glad_glProgramUniformMatrix3x2dv = (PFNGLPROGRAMUNIFORMMATRIX3X2DVPROC) load(userptr, "glProgramUniformMatrix3x2dv"); + glad_glProgramUniformMatrix3x2fv = (PFNGLPROGRAMUNIFORMMATRIX3X2FVPROC) load(userptr, "glProgramUniformMatrix3x2fv"); + glad_glProgramUniformMatrix3x4dv = (PFNGLPROGRAMUNIFORMMATRIX3X4DVPROC) load(userptr, "glProgramUniformMatrix3x4dv"); + glad_glProgramUniformMatrix3x4fv = (PFNGLPROGRAMUNIFORMMATRIX3X4FVPROC) load(userptr, "glProgramUniformMatrix3x4fv"); + glad_glProgramUniformMatrix4dv = (PFNGLPROGRAMUNIFORMMATRIX4DVPROC) load(userptr, "glProgramUniformMatrix4dv"); + glad_glProgramUniformMatrix4fv = (PFNGLPROGRAMUNIFORMMATRIX4FVPROC) load(userptr, "glProgramUniformMatrix4fv"); + glad_glProgramUniformMatrix4x2dv = (PFNGLPROGRAMUNIFORMMATRIX4X2DVPROC) load(userptr, "glProgramUniformMatrix4x2dv"); + glad_glProgramUniformMatrix4x2fv = (PFNGLPROGRAMUNIFORMMATRIX4X2FVPROC) load(userptr, "glProgramUniformMatrix4x2fv"); + glad_glProgramUniformMatrix4x3dv = (PFNGLPROGRAMUNIFORMMATRIX4X3DVPROC) load(userptr, "glProgramUniformMatrix4x3dv"); + glad_glProgramUniformMatrix4x3fv = (PFNGLPROGRAMUNIFORMMATRIX4X3FVPROC) load(userptr, "glProgramUniformMatrix4x3fv"); + glad_glReleaseShaderCompiler = (PFNGLRELEASESHADERCOMPILERPROC) load(userptr, "glReleaseShaderCompiler"); + glad_glScissorArrayv = (PFNGLSCISSORARRAYVPROC) load(userptr, "glScissorArrayv"); + glad_glScissorIndexed = (PFNGLSCISSORINDEXEDPROC) load(userptr, "glScissorIndexed"); + glad_glScissorIndexedv = (PFNGLSCISSORINDEXEDVPROC) load(userptr, "glScissorIndexedv"); + glad_glShaderBinary = (PFNGLSHADERBINARYPROC) load(userptr, "glShaderBinary"); + glad_glUseProgramStages = (PFNGLUSEPROGRAMSTAGESPROC) load(userptr, "glUseProgramStages"); + glad_glValidateProgramPipeline = (PFNGLVALIDATEPROGRAMPIPELINEPROC) load(userptr, "glValidateProgramPipeline"); + glad_glVertexAttribL1d = (PFNGLVERTEXATTRIBL1DPROC) load(userptr, "glVertexAttribL1d"); + glad_glVertexAttribL1dv = (PFNGLVERTEXATTRIBL1DVPROC) load(userptr, "glVertexAttribL1dv"); + glad_glVertexAttribL2d = (PFNGLVERTEXATTRIBL2DPROC) load(userptr, "glVertexAttribL2d"); + glad_glVertexAttribL2dv = (PFNGLVERTEXATTRIBL2DVPROC) load(userptr, "glVertexAttribL2dv"); + glad_glVertexAttribL3d = (PFNGLVERTEXATTRIBL3DPROC) load(userptr, "glVertexAttribL3d"); + glad_glVertexAttribL3dv = (PFNGLVERTEXATTRIBL3DVPROC) load(userptr, "glVertexAttribL3dv"); + glad_glVertexAttribL4d = (PFNGLVERTEXATTRIBL4DPROC) load(userptr, "glVertexAttribL4d"); + glad_glVertexAttribL4dv = (PFNGLVERTEXATTRIBL4DVPROC) load(userptr, "glVertexAttribL4dv"); + glad_glVertexAttribLPointer = (PFNGLVERTEXATTRIBLPOINTERPROC) load(userptr, "glVertexAttribLPointer"); + glad_glViewportArrayv = (PFNGLVIEWPORTARRAYVPROC) load(userptr, "glViewportArrayv"); + glad_glViewportIndexedf = (PFNGLVIEWPORTINDEXEDFPROC) load(userptr, "glViewportIndexedf"); + glad_glViewportIndexedfv = (PFNGLVIEWPORTINDEXEDFVPROC) load(userptr, "glViewportIndexedfv"); +} +static void glad_gl_load_GL_VERSION_4_2( GLADuserptrloadfunc load, void* userptr) { + if(!GLAD_GL_VERSION_4_2) return; + glad_glBindImageTexture = (PFNGLBINDIMAGETEXTUREPROC) load(userptr, "glBindImageTexture"); + glad_glDrawArraysInstancedBaseInstance = (PFNGLDRAWARRAYSINSTANCEDBASEINSTANCEPROC) load(userptr, "glDrawArraysInstancedBaseInstance"); + glad_glDrawElementsInstancedBaseInstance = (PFNGLDRAWELEMENTSINSTANCEDBASEINSTANCEPROC) load(userptr, "glDrawElementsInstancedBaseInstance"); + glad_glDrawElementsInstancedBaseVertexBaseInstance = (PFNGLDRAWELEMENTSINSTANCEDBASEVERTEXBASEINSTANCEPROC) load(userptr, "glDrawElementsInstancedBaseVertexBaseInstance"); + glad_glDrawTransformFeedbackInstanced = (PFNGLDRAWTRANSFORMFEEDBACKINSTANCEDPROC) load(userptr, "glDrawTransformFeedbackInstanced"); + glad_glDrawTransformFeedbackStreamInstanced = (PFNGLDRAWTRANSFORMFEEDBACKSTREAMINSTANCEDPROC) load(userptr, "glDrawTransformFeedbackStreamInstanced"); + glad_glGetActiveAtomicCounterBufferiv = (PFNGLGETACTIVEATOMICCOUNTERBUFFERIVPROC) load(userptr, "glGetActiveAtomicCounterBufferiv"); + glad_glGetInternalformativ = (PFNGLGETINTERNALFORMATIVPROC) load(userptr, "glGetInternalformativ"); + glad_glMemoryBarrier = (PFNGLMEMORYBARRIERPROC) load(userptr, "glMemoryBarrier"); + glad_glTexStorage1D = (PFNGLTEXSTORAGE1DPROC) load(userptr, "glTexStorage1D"); + glad_glTexStorage2D = (PFNGLTEXSTORAGE2DPROC) load(userptr, "glTexStorage2D"); + glad_glTexStorage3D = (PFNGLTEXSTORAGE3DPROC) load(userptr, "glTexStorage3D"); +} +static void glad_gl_load_GL_VERSION_4_3( GLADuserptrloadfunc load, void* userptr) { + if(!GLAD_GL_VERSION_4_3) return; + glad_glBindVertexBuffer = (PFNGLBINDVERTEXBUFFERPROC) load(userptr, "glBindVertexBuffer"); + glad_glClearBufferData = (PFNGLCLEARBUFFERDATAPROC) load(userptr, "glClearBufferData"); + glad_glClearBufferSubData = (PFNGLCLEARBUFFERSUBDATAPROC) load(userptr, "glClearBufferSubData"); + glad_glCopyImageSubData = (PFNGLCOPYIMAGESUBDATAPROC) load(userptr, "glCopyImageSubData"); + glad_glDebugMessageCallback = (PFNGLDEBUGMESSAGECALLBACKPROC) load(userptr, "glDebugMessageCallback"); + glad_glDebugMessageControl = (PFNGLDEBUGMESSAGECONTROLPROC) load(userptr, "glDebugMessageControl"); + glad_glDebugMessageInsert = (PFNGLDEBUGMESSAGEINSERTPROC) load(userptr, "glDebugMessageInsert"); + glad_glDispatchCompute = (PFNGLDISPATCHCOMPUTEPROC) load(userptr, "glDispatchCompute"); + glad_glDispatchComputeIndirect = (PFNGLDISPATCHCOMPUTEINDIRECTPROC) load(userptr, "glDispatchComputeIndirect"); + glad_glFramebufferParameteri = (PFNGLFRAMEBUFFERPARAMETERIPROC) load(userptr, "glFramebufferParameteri"); + glad_glGetDebugMessageLog = (PFNGLGETDEBUGMESSAGELOGPROC) load(userptr, "glGetDebugMessageLog"); + glad_glGetFramebufferParameteriv = (PFNGLGETFRAMEBUFFERPARAMETERIVPROC) load(userptr, "glGetFramebufferParameteriv"); + glad_glGetInternalformati64v = (PFNGLGETINTERNALFORMATI64VPROC) load(userptr, "glGetInternalformati64v"); + glad_glGetObjectLabel = (PFNGLGETOBJECTLABELPROC) load(userptr, "glGetObjectLabel"); + glad_glGetObjectPtrLabel = (PFNGLGETOBJECTPTRLABELPROC) load(userptr, "glGetObjectPtrLabel"); + glad_glGetPointerv = (PFNGLGETPOINTERVPROC) load(userptr, "glGetPointerv"); + glad_glGetProgramInterfaceiv = (PFNGLGETPROGRAMINTERFACEIVPROC) load(userptr, "glGetProgramInterfaceiv"); + glad_glGetProgramResourceIndex = (PFNGLGETPROGRAMRESOURCEINDEXPROC) load(userptr, "glGetProgramResourceIndex"); + glad_glGetProgramResourceLocation = (PFNGLGETPROGRAMRESOURCELOCATIONPROC) load(userptr, "glGetProgramResourceLocation"); + glad_glGetProgramResourceLocationIndex = (PFNGLGETPROGRAMRESOURCELOCATIONINDEXPROC) load(userptr, "glGetProgramResourceLocationIndex"); + glad_glGetProgramResourceName = (PFNGLGETPROGRAMRESOURCENAMEPROC) load(userptr, "glGetProgramResourceName"); + glad_glGetProgramResourceiv = (PFNGLGETPROGRAMRESOURCEIVPROC) load(userptr, "glGetProgramResourceiv"); + glad_glInvalidateBufferData = (PFNGLINVALIDATEBUFFERDATAPROC) load(userptr, "glInvalidateBufferData"); + glad_glInvalidateBufferSubData = (PFNGLINVALIDATEBUFFERSUBDATAPROC) load(userptr, "glInvalidateBufferSubData"); + glad_glInvalidateFramebuffer = (PFNGLINVALIDATEFRAMEBUFFERPROC) load(userptr, "glInvalidateFramebuffer"); + glad_glInvalidateSubFramebuffer = (PFNGLINVALIDATESUBFRAMEBUFFERPROC) load(userptr, "glInvalidateSubFramebuffer"); + glad_glInvalidateTexImage = (PFNGLINVALIDATETEXIMAGEPROC) load(userptr, "glInvalidateTexImage"); + glad_glInvalidateTexSubImage = (PFNGLINVALIDATETEXSUBIMAGEPROC) load(userptr, "glInvalidateTexSubImage"); + glad_glMultiDrawArraysIndirect = (PFNGLMULTIDRAWARRAYSINDIRECTPROC) load(userptr, "glMultiDrawArraysIndirect"); + glad_glMultiDrawElementsIndirect = (PFNGLMULTIDRAWELEMENTSINDIRECTPROC) load(userptr, "glMultiDrawElementsIndirect"); + glad_glObjectLabel = (PFNGLOBJECTLABELPROC) load(userptr, "glObjectLabel"); + glad_glObjectPtrLabel = (PFNGLOBJECTPTRLABELPROC) load(userptr, "glObjectPtrLabel"); + glad_glPopDebugGroup = (PFNGLPOPDEBUGGROUPPROC) load(userptr, "glPopDebugGroup"); + glad_glPushDebugGroup = (PFNGLPUSHDEBUGGROUPPROC) load(userptr, "glPushDebugGroup"); + glad_glShaderStorageBlockBinding = (PFNGLSHADERSTORAGEBLOCKBINDINGPROC) load(userptr, "glShaderStorageBlockBinding"); + glad_glTexBufferRange = (PFNGLTEXBUFFERRANGEPROC) load(userptr, "glTexBufferRange"); + glad_glTexStorage2DMultisample = (PFNGLTEXSTORAGE2DMULTISAMPLEPROC) load(userptr, "glTexStorage2DMultisample"); + glad_glTexStorage3DMultisample = (PFNGLTEXSTORAGE3DMULTISAMPLEPROC) load(userptr, "glTexStorage3DMultisample"); + glad_glTextureView = (PFNGLTEXTUREVIEWPROC) load(userptr, "glTextureView"); + glad_glVertexAttribBinding = (PFNGLVERTEXATTRIBBINDINGPROC) load(userptr, "glVertexAttribBinding"); + glad_glVertexAttribFormat = (PFNGLVERTEXATTRIBFORMATPROC) load(userptr, "glVertexAttribFormat"); + glad_glVertexAttribIFormat = (PFNGLVERTEXATTRIBIFORMATPROC) load(userptr, "glVertexAttribIFormat"); + glad_glVertexAttribLFormat = (PFNGLVERTEXATTRIBLFORMATPROC) load(userptr, "glVertexAttribLFormat"); + glad_glVertexBindingDivisor = (PFNGLVERTEXBINDINGDIVISORPROC) load(userptr, "glVertexBindingDivisor"); +} + + + +static void glad_gl_free_extensions(char **exts_i) { + if (exts_i != NULL) { + unsigned int index; + for(index = 0; exts_i[index]; index++) { + free((void *) (exts_i[index])); + } + free((void *)exts_i); + exts_i = NULL; + } +} +static int glad_gl_get_extensions( const char **out_exts, char ***out_exts_i) { +#if defined(GL_ES_VERSION_3_0) || defined(GL_VERSION_3_0) + if (glad_glGetStringi != NULL && glad_glGetIntegerv != NULL) { + unsigned int index = 0; + unsigned int num_exts_i = 0; + char **exts_i = NULL; + glad_glGetIntegerv(GL_NUM_EXTENSIONS, (int*) &num_exts_i); + exts_i = (char **) malloc((num_exts_i + 1) * (sizeof *exts_i)); + if (exts_i == NULL) { + return 0; + } + for(index = 0; index < num_exts_i; index++) { + const char *gl_str_tmp = (const char*) glad_glGetStringi(GL_EXTENSIONS, index); + size_t len = strlen(gl_str_tmp) + 1; + + char *local_str = (char*) malloc(len * sizeof(char)); + if(local_str == NULL) { + exts_i[index] = NULL; + glad_gl_free_extensions(exts_i); + return 0; + } + + memcpy(local_str, gl_str_tmp, len * sizeof(char)); + exts_i[index] = local_str; + } + exts_i[index] = NULL; + + *out_exts_i = exts_i; + + return 1; + } +#else + GLAD_UNUSED(out_exts_i); +#endif + if (glad_glGetString == NULL) { + return 0; + } + *out_exts = (const char *)glad_glGetString(GL_EXTENSIONS); + return 1; +} +static int glad_gl_has_extension(const char *exts, char **exts_i, const char *ext) { + if(exts_i) { + unsigned int index; + for(index = 0; exts_i[index]; index++) { + const char *e = exts_i[index]; + if(strcmp(e, ext) == 0) { + return 1; + } + } + } else { + const char *extensions; + const char *loc; + const char *terminator; + extensions = exts; + if(extensions == NULL || ext == NULL) { + return 0; + } + while(1) { + loc = strstr(extensions, ext); + if(loc == NULL) { + return 0; + } + terminator = loc + strlen(ext); + if((loc == extensions || *(loc - 1) == ' ') && + (*terminator == ' ' || *terminator == '\0')) { + return 1; + } + extensions = terminator; + } + } + return 0; +} + +static GLADapiproc glad_gl_get_proc_from_userptr(void *userptr, const char* name) { + return (GLAD_GNUC_EXTENSION (GLADapiproc (*)(const char *name)) userptr)(name); +} + +static int glad_gl_find_extensions_gl(void) { + const char *exts = NULL; + char **exts_i = NULL; + if (!glad_gl_get_extensions(&exts, &exts_i)) return 0; + + GLAD_UNUSED(&glad_gl_has_extension); + + glad_gl_free_extensions(exts_i); + + return 1; +} + +static int glad_gl_find_core_gl(void) { + int i; + const char* version; + const char* prefixes[] = { + "OpenGL ES-CM ", + "OpenGL ES-CL ", + "OpenGL ES ", + "OpenGL SC ", + NULL + }; + int major = 0; + int minor = 0; + version = (const char*) glad_glGetString(GL_VERSION); + if (!version) return 0; + for (i = 0; prefixes[i]; i++) { + const size_t length = strlen(prefixes[i]); + if (strncmp(version, prefixes[i], length) == 0) { + version += length; + break; + } + } + + GLAD_IMPL_UTIL_SSCANF(version, "%d.%d", &major, &minor); + + GLAD_GL_VERSION_1_0 = (major == 1 && minor >= 0) || major > 1; + GLAD_GL_VERSION_1_1 = (major == 1 && minor >= 1) || major > 1; + GLAD_GL_VERSION_1_2 = (major == 1 && minor >= 2) || major > 1; + GLAD_GL_VERSION_1_3 = (major == 1 && minor >= 3) || major > 1; + GLAD_GL_VERSION_1_4 = (major == 1 && minor >= 4) || major > 1; + GLAD_GL_VERSION_1_5 = (major == 1 && minor >= 5) || major > 1; + GLAD_GL_VERSION_2_0 = (major == 2 && minor >= 0) || major > 2; + GLAD_GL_VERSION_2_1 = (major == 2 && minor >= 1) || major > 2; + GLAD_GL_VERSION_3_0 = (major == 3 && minor >= 0) || major > 3; + GLAD_GL_VERSION_3_1 = (major == 3 && minor >= 1) || major > 3; + GLAD_GL_VERSION_3_2 = (major == 3 && minor >= 2) || major > 3; + GLAD_GL_VERSION_3_3 = (major == 3 && minor >= 3) || major > 3; + GLAD_GL_VERSION_4_0 = (major == 4 && minor >= 0) || major > 4; + GLAD_GL_VERSION_4_1 = (major == 4 && minor >= 1) || major > 4; + GLAD_GL_VERSION_4_2 = (major == 4 && minor >= 2) || major > 4; + GLAD_GL_VERSION_4_3 = (major == 4 && minor >= 3) || major > 4; + + return GLAD_MAKE_VERSION(major, minor); +} + +int gladLoadGLUserPtr( GLADuserptrloadfunc load, void *userptr) { + int version; + + glad_glGetString = (PFNGLGETSTRINGPROC) load(userptr, "glGetString"); + if(glad_glGetString == NULL) return 0; + version = glad_gl_find_core_gl(); + + glad_gl_load_GL_VERSION_1_0(load, userptr); + glad_gl_load_GL_VERSION_1_1(load, userptr); + glad_gl_load_GL_VERSION_1_2(load, userptr); + glad_gl_load_GL_VERSION_1_3(load, userptr); + glad_gl_load_GL_VERSION_1_4(load, userptr); + glad_gl_load_GL_VERSION_1_5(load, userptr); + glad_gl_load_GL_VERSION_2_0(load, userptr); + glad_gl_load_GL_VERSION_2_1(load, userptr); + glad_gl_load_GL_VERSION_3_0(load, userptr); + glad_gl_load_GL_VERSION_3_1(load, userptr); + glad_gl_load_GL_VERSION_3_2(load, userptr); + glad_gl_load_GL_VERSION_3_3(load, userptr); + glad_gl_load_GL_VERSION_4_0(load, userptr); + glad_gl_load_GL_VERSION_4_1(load, userptr); + glad_gl_load_GL_VERSION_4_2(load, userptr); + glad_gl_load_GL_VERSION_4_3(load, userptr); + + if (!glad_gl_find_extensions_gl()) return 0; + + + + return version; +} + + +int gladLoadGL( GLADloadfunc load) { + return gladLoadGLUserPtr( glad_gl_get_proc_from_userptr, GLAD_GNUC_EXTENSION (void*) load); +} + + + + + + +#ifdef __cplusplus +} +#endif diff --git a/src/main.c b/src/main.c new file mode 100644 index 0000000..56fcec2 --- /dev/null +++ b/src/main.c @@ -0,0 +1,53 @@ +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "glad/gl.h" +#include + +extern void load(); +extern void draw(); + +int main() +{ + glfwInitHint(GLFW_PLATFORM, GLFW_PLATFORM_X11); + glfwInit(); + glfwWindowHint(GLFW_CONTEXT_VERSION_MAJOR, 3); + glfwWindowHint(GLFW_CONTEXT_VERSION_MINOR, 3); + //glfwWindowHint(GLFW_OPENGL_DEBUG_CONTEXT, GLFW_TRUE); + + GLFWwindow* window = glfwCreateWindow(1024, 1024, "LearnOpenGL", NULL, NULL); + if (window == NULL) { + const char* description; + glfwGetError(&description); + printf("Failed to create GLFW window: %s\n", description); + glfwTerminate(); + return -1; + } + glfwMakeContextCurrent(window); + gladLoadGL(glfwGetProcAddress); + + glViewport(0, 0, 1024, 1024); + + load(); + + while(!glfwWindowShouldClose(window)) { + if(glfwGetKey(window, GLFW_KEY_ESCAPE) == GLFW_PRESS) + glfwSetWindowShouldClose(window, true); + + draw(); + + glfwSwapBuffers(window); + glfwPollEvents(); + } + + glfwTerminate(); + + return 0; +} diff --git a/src/opengl.c b/src/opengl.c new file mode 100644 index 0000000..37b6da7 --- /dev/null +++ b/src/opengl.c @@ -0,0 +1,112 @@ +#include +#include +#include +#include +#include + +#include "glad/gl.h" +#include "opengl.h" + +void * read_file(const char * filename, int * out_size) +{ + FILE * f = fopen(filename, "rb"); + if (f == NULL) { + fprintf(stderr, "fopen(%s): %s\n", filename, strerror(errno)); + return NULL; + } + + int fseek_end_ret = fseek(f, 0, SEEK_END); + if (fseek_end_ret < 0) { + fprintf(stderr, "fseek(%s, SEEK_END): %s\n", filename, strerror(errno)); + return NULL; + } + + long size = ftell(f); + if (size < 0) { + fprintf(stderr, "ftell(%s): %s\n", filename, strerror(errno)); + return NULL; + } + + int fseek_set_ret = fseek(f, 0, SEEK_SET); + if (fseek_set_ret < 0) { + fprintf(stderr, "lseek(%s, SEEK_SET): %s\n", filename, strerror(errno)); + return NULL; + } + rewind(f); + + void * buf = malloc(size); + + size_t read_size = fread(buf, 1, size, f); + if (read_size != size) { + fprintf(stderr, "fread(%s): %s\n", filename, strerror(errno)); + return NULL; + } + + *out_size = size; + + return buf; +} + +unsigned int compile(const char * vertex_source, + int vertex_source_size, + const char * fragment_source, + int fragment_source_size) +{ + int compile_status; + char info_log[512]; + + // vertex shader + unsigned int vertex_shader = glCreateShader(GL_VERTEX_SHADER); + glShaderSource(vertex_shader, 1, &vertex_source, &vertex_source_size); + glCompileShader(vertex_shader); + glGetShaderiv(vertex_shader, GL_COMPILE_STATUS, &compile_status); + if (!compile_status) { + glGetShaderInfoLog(vertex_shader, 512, NULL, info_log); + fprintf(stderr, "vertex shader compile: %s\n", info_log); + } + + // fragment shader + unsigned int fragment_shader = glCreateShader(GL_FRAGMENT_SHADER); + glShaderSource(fragment_shader, 1, &fragment_source, &fragment_source_size); + glCompileShader(fragment_shader); + glGetShaderiv(fragment_shader, GL_COMPILE_STATUS, &compile_status); + if (!compile_status) { + glGetShaderInfoLog(fragment_shader, 512, NULL, info_log); + fprintf(stderr, "fragment shader compile: %s\n", info_log); + } + + // link shaders + unsigned int shader_program = glCreateProgram(); + glAttachShader(shader_program, vertex_shader); + glAttachShader(shader_program, fragment_shader); + glLinkProgram(shader_program); + glGetProgramiv(shader_program, GL_LINK_STATUS, &compile_status); + if (!compile_status) { + glGetProgramInfoLog(shader_program, 512, NULL, info_log); + fprintf(stderr, "shader link: %s\n", info_log); + } + + glDeleteShader(vertex_shader); + glDeleteShader(fragment_shader); + + return shader_program; +} + +unsigned int compile_from_files(const char * vertex_path, + const char * fragment_path) +{ + int vertex_source_size; + char * vertex_source = read_file(vertex_path, &vertex_source_size); + assert(vertex_source != NULL); + int fragment_source_size; + char * fragment_source = read_file(fragment_path, &fragment_source_size); + assert(fragment_source != NULL); + + unsigned int program = compile(vertex_source, vertex_source_size, + fragment_source, fragment_source_size); + + free(vertex_source); + free(fragment_source); + + return program; +} diff --git a/src/test.cpp b/src/test.cpp new file mode 100644 index 0000000..a0f1580 --- /dev/null +++ b/src/test.cpp @@ -0,0 +1,242 @@ +#include +#include + +#include "glad/gl.h" +#include "opengl.h" +#include "directxmath/directxmath.h" +#include "test.h" + +struct location { + struct { + unsigned int position; + unsigned int normal; + unsigned int texture; + } attrib; + struct { + unsigned int transform; + unsigned int terrain_sampler; + } uniform; +}; + +// state +static unsigned int test_program; +static struct location location; + +void load_program() +{ + unsigned int program = compile_from_files("shader/test.vert", + "shader/test.frag"); + + location.attrib.position = glGetAttribLocation(program, "Position"); + location.attrib.normal = glGetAttribLocation(program, "Normal"); + location.attrib.texture = glGetAttribLocation(program, "Texture"); + printf("attributes:\n position %u\n normal %u\n texture %u\n", + location.attrib.position, + location.attrib.normal, + location.attrib.texture); + + location.uniform.transform = glGetUniformLocation(program, "Transform"); + location.uniform.terrain_sampler = glGetUniformLocation(program, "TerrainSampler"); + printf("uniforms:\n transform %u\n terrain_sampler %u\n", + location.uniform.transform, + location.uniform.terrain_sampler); + + test_program = program; +} + +static unsigned int vertex_array_objects[4]; +static unsigned int vertex_buffers[4]; +static unsigned int index_buffers[4]; +static unsigned int index_count[4]; + +const char * vertex_paths[] = { + "minecraft/region.0.0.vtx", + "minecraft/region.0.-1.vtx", + "minecraft/region.-1.0.vtx", + "minecraft/region.-1.-1.vtx", +}; + +static const char * index_paths[] = { + "minecraft/region.0.0.idx", + "minecraft/region.0.-1.idx", + "minecraft/region.-1.0.idx", + "minecraft/region.-1.-1.idx", +}; + +void load_vertex_buffer(int i) +{ + int vertex_buffer_data_size; + void * vertex_buffer_data = read_file(vertex_paths[i], &vertex_buffer_data_size); + + glBindBuffer(GL_ARRAY_BUFFER, vertex_buffers[i]); + glBufferData(GL_ARRAY_BUFFER, vertex_buffer_data_size, vertex_buffer_data, GL_STATIC_DRAW); + + free(vertex_buffer_data); +} + +void load_element_buffer(int i) +{ + int index_buffer_data_size; + void * index_buffer_data = read_file(index_paths[i], &index_buffer_data_size); + + glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, index_buffers[i]); + glBufferData(GL_ELEMENT_ARRAY_BUFFER, index_buffer_data_size, index_buffer_data, GL_STATIC_DRAW); + index_count[i] = index_buffer_data_size / 4; + + free(index_buffer_data); +} + +void load_vertex_attributes() +{ + glVertexAttribPointer(location.attrib.position, + 3, + GL_FLOAT, + GL_FALSE, + (sizeof (float)) * 9, + (void*)(0 * 4) + ); + glVertexAttribPointer(location.attrib.normal, + 3, + GL_FLOAT, + GL_FALSE, + (sizeof (float)) * 9, + (void*)(3 * 4) + ); + glVertexAttribPointer(location.attrib.texture, + 3, + GL_FLOAT, + GL_FALSE, + (sizeof (float)) * 9, + (void*)(6 * 4) + ); + glEnableVertexAttribArray(location.attrib.position); + glEnableVertexAttribArray(location.attrib.normal); + glEnableVertexAttribArray(location.attrib.texture); +} + +void load_buffers() +{ + glGenVertexArrays(4, vertex_array_objects); + glGenBuffers(4, index_buffers); + glGenBuffers(4, vertex_buffers); + + for (int i = 0; i < 4; i++) { + glBindVertexArray(vertex_array_objects[i]); + + load_element_buffer(i); + load_vertex_buffer(i); + load_vertex_attributes(); + } +} + +static unsigned int texture; + +void load_textures() +{ + glGenTextures(1, &texture); + glActiveTexture(GL_TEXTURE0); + glBindTexture(GL_TEXTURE_2D, texture); + glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST); + glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST); + glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE); + glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE); + + int texture_data_size; + void * texture_data = read_file("minecraft/terrain.data", &texture_data_size); + assert(texture_data != NULL); + + int width = 256; + int height = 256; + glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, width, height, 0, GL_RGBA, GL_UNSIGNED_BYTE, texture_data); + + free(texture_data); +} + +static unsigned int textures_ubo; + +void load_texture_shader_storage() +{ + unsigned int buffer; + glGenBuffers(1, &buffer); + glBindBuffer(GL_UNIFORM_BUFFER, buffer); + + int textures_data_size; + void * textures_data = read_file("minecraft/block_id_to_texture_id.data", &textures_data_size); + assert(textures_data != NULL); + + printf("%d\n", textures_data_size); + + glBufferData(GL_UNIFORM_BUFFER, textures_data_size, textures_data, GL_STATIC_DRAW); + free(textures_data); + + textures_ubo = buffer; +} + +extern "C" { + void * SDL_GL_GetProcAddress(const char *proc); +} + +void load() +{ + fprintf(stderr, "getproc %p\n", SDL_GL_GetProcAddress); + gladLoadGL((GLADloadfunc)SDL_GL_GetProcAddress); + + load_program(); + load_buffers(); + load_textures(); + //load_texture_shader_storage(); + + //unsigned int textures_layout = glGetUniformBlockIndex(test_program, "TexturesLayout"); + //glUniformBlockBinding(test_program, textures_layout, 0); + //printf("textures_layout %d\n", textures_layout); +} + +static float vx = 0.0; +static float vy = 0.0; +static float vz = 0.0; + +void update(float lx, float ly, float ry) +{ + vx += 2.5 * lx; + vy += -2.5 * ry; + vz += -2.5 * ly; +} + +void draw() +{ + XMVECTOR eye = XMVectorSet(vx + -50.0f, vz + -50.0f, vy + 150.0f, 0.0f); + XMVECTOR at = XMVectorSet(vx + 50.0f, vz + 50.0f, vy + 50.0f, 0.0f); + XMVECTOR up = XMVectorSet(0.0f, 0.0f, 1.0f, 0.0f); + XMMATRIX view = XMMatrixLookAtRH(eye, at, up); + + float fov_angle_y = XMConvertToRadians(45 * 0.75); + float aspect_ratio = 1.0; + float near_z = 1.0; + float far_z = 0.1; + XMMATRIX projection = XMMatrixPerspectiveFovRH(fov_angle_y, aspect_ratio, near_z, far_z); + XMMATRIX transform = view * projection; + + glClearColor(0.1f, 0.1f, 0.1f, 0.1f); + glClearDepth(-1.0f); + glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT); + + glUseProgram(test_program); + + glEnable(GL_DEPTH_TEST); + glDepthFunc(GL_GREATER); + + glActiveTexture(GL_TEXTURE0); + glBindTexture(GL_TEXTURE_2D, texture); + + glUniformMatrix4fv(location.uniform.transform, 1, false, (float *)&transform); + glUniform1i(location.uniform.terrain_sampler, 0); + + //glBindBuffer(GL_UNIFORM_BUFFER, textures_ubo); + //glBindBufferBase(GL_UNIFORM_BUFFER, 0, textures_ubo); + + for (int i = 0; i < 4; i++) { + glBindVertexArray(vertex_array_objects[i]); + + glDrawElements(GL_TRIANGLES, index_count[i], GL_UNSIGNED_INT, 0); + } +}